From 38cd7d1c1c9f719573607253f2b094456514e847 Mon Sep 17 00:00:00 2001 From: mohammadreza javid Date: Fri, 20 Mar 2026 13:38:26 +0330 Subject: [PATCH 1/5] update go version from 1.23 to 1.25.3 --- go.mod | 70 +- go.sum | 172 +- .../brianvoe/gofakeit/v7/BENCHMARKS.md | 667 +- .../github.com/brianvoe/gofakeit/v7/README.md | 119 +- .../brianvoe/gofakeit/v7/address.go | 103 +- .../brianvoe/gofakeit/v7/airline.go | 176 + .../github.com/brianvoe/gofakeit/v7/animal.go | 14 + vendor/github.com/brianvoe/gofakeit/v7/app.go | 6 + .../github.com/brianvoe/gofakeit/v7/auth.go | 153 +- .../github.com/brianvoe/gofakeit/v7/beer.go | 16 + .../github.com/brianvoe/gofakeit/v7/book.go | 8 + vendor/github.com/brianvoe/gofakeit/v7/car.go | 23 + .../brianvoe/gofakeit/v7/celebrity.go | 6 + .../github.com/brianvoe/gofakeit/v7/color.go | 15 +- .../brianvoe/gofakeit/v7/company.go | 132 +- vendor/github.com/brianvoe/gofakeit/v7/csv.go | 8 +- .../brianvoe/gofakeit/v7/data/address.go | 1 + .../brianvoe/gofakeit/v7/data/airline.go | 120 + .../brianvoe/gofakeit/v7/data/auth.go | 35 + .../brianvoe/gofakeit/v7/data/bank.go | 67 + .../brianvoe/gofakeit/v7/data/currency.go | 4 +- .../brianvoe/gofakeit/v7/data/data.go | 6 +- .../brianvoe/gofakeit/v7/data/emoji.go | 6563 +----- .../brianvoe/gofakeit/v7/data/hipster.go | 113 + .../brianvoe/gofakeit/v7/data/internet.go | 1 + .../brianvoe/gofakeit/v7/data/isbn.go | 58 + .../brianvoe/gofakeit/v7/data/job.go | 65 +- .../brianvoe/gofakeit/v7/data/person.go | 667 +- .../brianvoe/gofakeit/v7/data/product.go | 412 +- .../brianvoe/gofakeit/v7/data/sentence.go | 5 - .../brianvoe/gofakeit/v7/data/song.go | 246 + .../brianvoe/gofakeit/v7/data/text.go | 523 + .../gofakeit/v7/{time.go => datetime.go} | 314 +- .../github.com/brianvoe/gofakeit/v7/emoji.go | 512 +- .../github.com/brianvoe/gofakeit/v7/error.go | 78 +- .../github.com/brianvoe/gofakeit/v7/faker.go | 8 +- .../github.com/brianvoe/gofakeit/v7/file.go | 12 + .../brianvoe/gofakeit/v7/finance.go | 4 + .../github.com/brianvoe/gofakeit/v7/food.go | 101 +- .../github.com/brianvoe/gofakeit/v7/game.go | 25 + .../brianvoe/gofakeit/v7/generate.go | 193 +- .../github.com/brianvoe/gofakeit/v7/hacker.go | 42 + .../brianvoe/gofakeit/v7/hipster.go | 121 +- .../github.com/brianvoe/gofakeit/v7/html.go | 13 + vendor/github.com/brianvoe/gofakeit/v7/id.go | 127 + .../github.com/brianvoe/gofakeit/v7/image.go | 4 + .../brianvoe/gofakeit/v7/internet.go | 103 +- .../github.com/brianvoe/gofakeit/v7/json.go | 4 +- .../brianvoe/gofakeit/v7/languages.go | 8 + .../github.com/brianvoe/gofakeit/v7/lookup.go | 53 +- .../github.com/brianvoe/gofakeit/v7/lorem.go | 126 - .../github.com/brianvoe/gofakeit/v7/merch.png | Bin 0 -> 48523 bytes .../brianvoe/gofakeit/v7/minecraft.go | 36 + .../github.com/brianvoe/gofakeit/v7/misc.go | 53 +- .../github.com/brianvoe/gofakeit/v7/movie.go | 27 + .../github.com/brianvoe/gofakeit/v7/number.go | 313 +- .../brianvoe/gofakeit/v7/payment.go | 204 +- .../github.com/brianvoe/gofakeit/v7/person.go | 411 +- .../brianvoe/gofakeit/v7/product.go | 340 +- .../github.com/brianvoe/gofakeit/v7/school.go | 56 +- .../github.com/brianvoe/gofakeit/v7/slice.go | 6 +- .../github.com/brianvoe/gofakeit/v7/song.go | 131 + vendor/github.com/brianvoe/gofakeit/v7/sql.go | 9 +- .../github.com/brianvoe/gofakeit/v7/string.go | 59 +- .../github.com/brianvoe/gofakeit/v7/struct.go | 63 +- .../brianvoe/gofakeit/v7/template.go | 35 +- .../github.com/brianvoe/gofakeit/v7/text.go | 540 + .../brianvoe/gofakeit/v7/weighted.go | 6 + .../brianvoe/gofakeit/v7/word_adjective.go | 94 + .../brianvoe/gofakeit/v7/word_adverb.go | 49 + .../brianvoe/gofakeit/v7/word_comment.go | 72 - .../brianvoe/gofakeit/v7/word_connective.go | 14 + .../brianvoe/gofakeit/v7/word_general.go | 2 + .../brianvoe/gofakeit/v7/word_grammar.go | 34 - .../brianvoe/gofakeit/v7/word_misc.go | 32 + .../brianvoe/gofakeit/v7/word_noun.go | 67 + .../brianvoe/gofakeit/v7/word_phrase.go | 162 - .../brianvoe/gofakeit/v7/word_preposition.go | 8 + .../brianvoe/gofakeit/v7/word_pronoun.go | 18 + .../brianvoe/gofakeit/v7/word_sentence.go | 212 - .../brianvoe/gofakeit/v7/word_verb.go | 42 +- vendor/github.com/brianvoe/gofakeit/v7/xml.go | 13 +- vendor/github.com/cespare/xxhash/v2/README.md | 2 + vendor/github.com/cespare/xxhash/v2/xxhash.go | 29 +- .../cespare/xxhash/v2/xxhash_asm.go | 2 +- .../cespare/xxhash/v2/xxhash_other.go | 2 +- .../cespare/xxhash/v2/xxhash_safe.go | 2 +- .../cespare/xxhash/v2/xxhash_unsafe.go | 2 +- vendor/github.com/docker/cli/AUTHORS | 20 + vendor/github.com/docker/cli/NOTICE | 2 +- .../compose/interpolation/interpolation.go | 2 +- .../cli/cli/compose/loader/full-example.yml | 5 +- .../cli/cli/compose/loader/interpolate.go | 3 +- .../docker/cli/cli/compose/loader/loader.go | 4 +- .../docker/cli/cli/compose/loader/merge.go | 2 +- .../schema/data/config_schema_v3.13.json | 680 + .../docker/cli/cli/compose/schema/schema.go | 8 +- .../cli/cli/compose/template/template.go | 4 +- .../docker/cli/cli/compose/types/types.go | 10 +- vendor/github.com/docker/cli/opts/config.go | 3 +- vendor/github.com/docker/cli/opts/envfile.go | 4 +- vendor/github.com/docker/cli/opts/file.go | 76 - vendor/github.com/docker/cli/opts/mount.go | 4 +- vendor/github.com/docker/cli/opts/network.go | 7 +- vendor/github.com/docker/cli/opts/opts.go | 8 +- vendor/github.com/docker/cli/opts/parse.go | 9 +- vendor/github.com/docker/cli/opts/port.go | 4 +- vendor/github.com/docker/cli/opts/secret.go | 5 +- .../docker/cli/opts/throttledevice.go | 2 +- vendor/github.com/docker/cli/opts/ulimit.go | 13 +- .../docker/cli/pkg/kvfile/kvfile.go | 130 + .../github.com/fsnotify/fsnotify/.cirrus.yml | 13 - .../github.com/fsnotify/fsnotify/.gitignore | 11 +- vendor/github.com/fsnotify/fsnotify/.mailmap | 2 - .../github.com/fsnotify/fsnotify/.travis.yml | 36 + vendor/github.com/fsnotify/fsnotify/AUTHORS | 52 + .../github.com/fsnotify/fsnotify/CHANGELOG.md | 318 +- .../fsnotify/fsnotify/CONTRIBUTING.md | 89 +- vendor/github.com/fsnotify/fsnotify/LICENSE | 47 +- vendor/github.com/fsnotify/fsnotify/README.md | 250 +- .../fsnotify/fsnotify/backend_fen.go | 640 - .../fsnotify/fsnotify/backend_inotify.go | 594 - .../fsnotify/fsnotify/backend_kqueue.go | 782 - .../fsnotify/fsnotify/backend_other.go | 205 - .../fsnotify/fsnotify/backend_windows.go | 827 - vendor/github.com/fsnotify/fsnotify/fen.go | 37 + .../github.com/fsnotify/fsnotify/fsnotify.go | 146 +- .../github.com/fsnotify/fsnotify/inotify.go | 337 + .../fsnotify/fsnotify/inotify_poller.go | 187 + vendor/github.com/fsnotify/fsnotify/kqueue.go | 521 + vendor/github.com/fsnotify/fsnotify/mkdoc.zsh | 259 - .../{system_bsd.go => open_mode_bsd.go} | 5 +- .../{system_darwin.go => open_mode_darwin.go} | 5 +- .../github.com/fsnotify/fsnotify/windows.go | 561 + vendor/github.com/ghodss/yaml/.travis.yml | 7 - .../go-openapi/jsonpointer/.golangci.yml | 61 - .../go-openapi/jsonpointer/README.md | 8 +- .../go-openapi/jsonpointer/pointer.go | 191 +- .../go-openapi/jsonreference/.golangci.yml | 57 +- .../go-openapi/jsonreference/README.md | 14 +- vendor/github.com/go-openapi/spec/.gitignore | 3 +- .../github.com/go-openapi/spec/.golangci.yml | 21 +- vendor/github.com/go-openapi/spec/README.md | 28 +- .../github.com/go-openapi/spec/appveyor.yml | 32 + vendor/github.com/go-openapi/spec/bindata.go | 297 + vendor/github.com/go-openapi/spec/embed.go | 17 - vendor/github.com/go-openapi/spec/expander.go | 75 +- .../go-openapi/spec/normalizer_nonwindows.go | 2 +- .../github.com/go-openapi/spec/operation.go | 5 +- .../github.com/go-openapi/spec/parameter.go | 42 +- .../go-openapi/spec/schema_loader.go | 9 +- .../spec/schemas/jsonschema-draft-04.json | 149 - .../go-openapi/spec/schemas/v2/schema.json | 1607 -- vendor/github.com/go-openapi/spec/spec.go | 6 +- vendor/github.com/go-openapi/spec/swagger.go | 4 +- vendor/github.com/go-openapi/spec/url_go18.go | 8 + vendor/github.com/go-openapi/spec/url_go19.go | 3 + vendor/github.com/go-openapi/swag/.gitignore | 1 - .../github.com/go-openapi/swag/.golangci.yml | 54 +- .../github.com/go-openapi/swag/BENCHMARK.md | 52 - vendor/github.com/go-openapi/swag/README.md | 8 +- .../go-openapi/swag/initialism_index.go | 202 - vendor/github.com/go-openapi/swag/loading.go | 105 +- .../github.com/go-openapi/swag/name_lexem.go | 72 +- .../github.com/go-openapi/swag/post_go18.go | 24 + .../github.com/go-openapi/swag/post_go19.go | 68 + vendor/github.com/go-openapi/swag/pre_go18.go | 24 + vendor/github.com/go-openapi/swag/pre_go19.go | 70 + vendor/github.com/go-openapi/swag/split.go | 482 +- .../go-openapi/swag/string_bytes.go | 8 - vendor/github.com/go-openapi/swag/util.go | 224 +- vendor/github.com/go-openapi/swag/yaml.go | 39 +- vendor/github.com/go-sql-driver/mysql/AUTHORS | 13 + .../go-sql-driver/mysql/CHANGELOG.md | 52 + .../github.com/go-sql-driver/mysql/README.md | 18 +- .../go-sql-driver/mysql/atomic_bool.go | 19 - .../go-sql-driver/mysql/atomic_bool_go118.go | 47 - .../github.com/go-sql-driver/mysql/buffer.go | 145 +- .../go-sql-driver/mysql/collations.go | 2 +- .../go-sql-driver/mysql/compress.go | 213 + .../go-sql-driver/mysql/connection.go | 181 +- .../go-sql-driver/mysql/connector.go | 68 +- .../github.com/go-sql-driver/mysql/const.go | 7 +- vendor/github.com/go-sql-driver/mysql/dsn.go | 56 +- .../github.com/go-sql-driver/mysql/errors.go | 4 +- .../github.com/go-sql-driver/mysql/fields.go | 4 +- .../github.com/go-sql-driver/mysql/infile.go | 16 +- .../github.com/go-sql-driver/mysql/packets.go | 374 +- vendor/github.com/go-sql-driver/mysql/rows.go | 7 - .../go-sql-driver/mysql/statement.go | 13 +- .../go-sql-driver/mysql/transaction.go | 18 +- .../github.com/go-sql-driver/mysql/utils.go | 47 +- .../go-viper/mapstructure/v2/.editorconfig | 18 + .../go-viper/mapstructure/v2/.envrc | 4 + .../go-viper/mapstructure/v2/.gitignore | 6 + .../go-viper/mapstructure/v2/.golangci.yaml | 23 + .../go-viper/mapstructure/v2/CHANGELOG.md | 104 + .../go-viper/mapstructure/v2/LICENSE | 21 + .../go-viper/mapstructure/v2/README.md | 80 + .../go-viper/mapstructure/v2/decode_hooks.go | 609 + .../go-viper/mapstructure/v2/flake.lock | 472 + .../go-viper/mapstructure/v2/flake.nix | 39 + .../mapstructure/v2/internal/errors/errors.go | 11 + .../mapstructure/v2/internal/errors/join.go | 9 + .../v2/internal/errors/join_go1_19.go | 61 + .../go-viper/mapstructure/v2/mapstructure.go | 1593 ++ .../mapstructure/v2/reflect_go1_19.go | 44 + .../mapstructure/v2/reflect_go1_20.go | 10 + vendor/github.com/golang-jwt/jwt/.gitignore | 4 - vendor/github.com/golang-jwt/jwt/LICENSE | 9 - .../golang-jwt/jwt/MIGRATION_GUIDE.md | 22 - vendor/github.com/golang-jwt/jwt/README.md | 113 - .../golang-jwt/jwt/VERSION_HISTORY.md | 131 - vendor/github.com/golang-jwt/jwt/claims.go | 146 - vendor/github.com/golang-jwt/jwt/doc.go | 4 - vendor/github.com/golang-jwt/jwt/ecdsa.go | 142 - .../github.com/golang-jwt/jwt/ecdsa_utils.go | 69 - vendor/github.com/golang-jwt/jwt/ed25519.go | 81 - .../golang-jwt/jwt/ed25519_utils.go | 64 - vendor/github.com/golang-jwt/jwt/errors.go | 59 - vendor/github.com/golang-jwt/jwt/hmac.go | 95 - .../github.com/golang-jwt/jwt/map_claims.go | 120 - vendor/github.com/golang-jwt/jwt/none.go | 52 - vendor/github.com/golang-jwt/jwt/parser.go | 148 - vendor/github.com/golang-jwt/jwt/rsa.go | 101 - vendor/github.com/golang-jwt/jwt/rsa_pss.go | 142 - vendor/github.com/golang-jwt/jwt/rsa_utils.go | 101 - .../golang-jwt/jwt/signing_method.go | 35 - vendor/github.com/golang-jwt/jwt/token.go | 104 - vendor/github.com/golang-jwt/jwt/v4/parser.go | 77 +- .../golang-jwt/jwt/v5/MIGRATION_GUIDE.md | 16 +- vendor/github.com/golang-jwt/jwt/v5/README.md | 16 +- .../github.com/golang-jwt/jwt/v5/SECURITY.md | 4 +- vendor/github.com/golang-jwt/jwt/v5/ecdsa.go | 8 +- .../golang-jwt/jwt/v5/ecdsa_utils.go | 4 +- .../github.com/golang-jwt/jwt/v5/ed25519.go | 11 +- .../golang-jwt/jwt/v5/ed25519_utils.go | 4 +- vendor/github.com/golang-jwt/jwt/v5/errors.go | 40 + .../golang-jwt/jwt/v5/errors_go1_20.go | 47 - .../golang-jwt/jwt/v5/errors_go_other.go | 78 - vendor/github.com/golang-jwt/jwt/v5/hmac.go | 8 +- .../golang-jwt/jwt/v5/map_claims.go | 8 +- vendor/github.com/golang-jwt/jwt/v5/none.go | 6 +- vendor/github.com/golang-jwt/jwt/v5/parser.go | 121 +- .../golang-jwt/jwt/v5/parser_option.go | 33 +- vendor/github.com/golang-jwt/jwt/v5/rsa.go | 8 +- .../github.com/golang-jwt/jwt/v5/rsa_pss.go | 11 +- .../github.com/golang-jwt/jwt/v5/rsa_utils.go | 6 +- .../golang-jwt/jwt/v5/signing_method.go | 6 +- vendor/github.com/golang-jwt/jwt/v5/token.go | 34 +- vendor/github.com/golang-jwt/jwt/v5/types.go | 9 +- .../github.com/golang-jwt/jwt/v5/validator.go | 95 +- .../kavenegar/kavenegar-go/message_send.go | 1 + .../labstack/echo-jwt/v4/CHANGELOG.md | 32 + .../github.com/labstack/echo-jwt/v4/Makefile | 4 +- .../github.com/labstack/echo-jwt/v4/README.md | 35 +- vendor/github.com/labstack/echo-jwt/v4/jwt.go | 13 +- .../github.com/labstack/echo/v4/CHANGELOG.md | 288 + vendor/github.com/labstack/echo/v4/CLAUDE.md | 99 + vendor/github.com/labstack/echo/v4/Makefile | 7 +- vendor/github.com/labstack/echo/v4/README.md | 22 +- vendor/github.com/labstack/echo/v4/bind.go | 137 +- vendor/github.com/labstack/echo/v4/binder.go | 9 +- .../labstack/echo/v4/binder_generic.go | 573 + vendor/github.com/labstack/echo/v4/context.go | 58 +- .../labstack/echo/v4/context_generic.go | 40 + vendor/github.com/labstack/echo/v4/echo.go | 34 +- vendor/github.com/labstack/echo/v4/echo_fs.go | 2 +- vendor/github.com/labstack/echo/v4/group.go | 2 +- vendor/github.com/labstack/echo/v4/ip.go | 26 +- .../labstack/echo/v4/middleware/basic_auth.go | 29 +- .../labstack/echo/v4/middleware/body_dump.go | 12 +- .../labstack/echo/v4/middleware/body_limit.go | 6 +- .../labstack/echo/v4/middleware/compress.go | 10 +- .../echo/v4/middleware/context_timeout.go | 35 +- .../labstack/echo/v4/middleware/cors.go | 18 +- .../labstack/echo/v4/middleware/csrf.go | 113 +- .../labstack/echo/v4/middleware/jwt.go | 303 - .../labstack/echo/v4/middleware/logger.go | 314 +- .../echo/v4/middleware/logger_strings.go | 242 + .../labstack/echo/v4/middleware/middleware.go | 10 + .../labstack/echo/v4/middleware/proxy.go | 38 +- .../echo/v4/middleware/rate_limiter.go | 8 +- .../echo/v4/middleware/request_logger.go | 68 + .../v4/middleware/responsecontroller_1.19.go | 44 - .../v4/middleware/responsecontroller_1.20.go | 20 - .../labstack/echo/v4/middleware/static.go | 6 + .../labstack/echo/v4/middleware/timeout.go | 35 + .../labstack/echo/v4/middleware/util.go | 25 + .../github.com/labstack/echo/v4/renderer.go | 29 + .../github.com/labstack/echo/v4/response.go | 9 +- .../echo/v4/responsecontroller_1.19.go | 44 - .../echo/v4/responsecontroller_1.20.go | 20 - vendor/github.com/labstack/echo/v4/router.go | 35 +- .../mattn/go-colorable/colorable_appengine.go | 38 - .../mattn/go-colorable/colorable_others.go | 4 +- .../mattn/go-colorable/colorable_windows.go | 22 +- vendor/github.com/moby/sys/user/LICENSE | 202 + .../sys}/user/lookup_unix.go | 0 .../libcontainer => moby/sys}/user/user.go | 0 .../sys}/user/user_fuzzer.go | 0 vendor/github.com/opencontainers/runc/NOTICE | 4 +- .../libcontainer/user/lookup_deprecated.go | 81 + .../runc/libcontainer/user/user_deprecated.go | 146 + .../ory/dockertest/v3/CONTRIBUTING.md | 36 +- .../github.com/ory/dockertest/v3/SECURITY.md | 62 +- .../ory/dockertest/v3/docker/auth.go | 196 +- .../ory/dockertest/v3/docker/client.go | 122 +- .../ory/dockertest/v3/docker/image.go | 106 +- .../ory/dockertest/v3/docker/registry_auth.go | 13 + .../ory/dockertest/v3/dockertest.go | 52 +- .../github.com/redis/go-redis/v9/.gitignore | 15 +- .../redis/go-redis/v9/.golangci.yml | 36 +- .../github.com/redis/go-redis/v9/CHANGELOG.md | 124 - .../redis/go-redis/v9/CONTRIBUTING.md | 27 +- vendor/github.com/redis/go-redis/v9/Makefile | 112 +- vendor/github.com/redis/go-redis/v9/README.md | 454 +- .../redis/go-redis/v9/RELEASE-NOTES.md | 859 + .../redis/go-redis/v9/acl_commands.go | 81 + .../github.com/redis/go-redis/v9/adapters.go | 118 + .../github.com/redis/go-redis/v9/auth/auth.go | 61 + .../v9/auth/reauth_credentials_listener.go | 47 + .../redis/go-redis/v9/bitmap_commands.go | 64 +- .../redis/go-redis/v9/cluster_commands.go | 13 + .../github.com/redis/go-redis/v9/command.go | 2866 ++- .../go-redis/v9/command_policy_resolver.go | 209 + .../github.com/redis/go-redis/v9/commands.go | 87 +- .../redis/go-redis/v9/docker-compose.yml | 176 + vendor/github.com/redis/go-redis/v9/error.go | 275 +- .../redis/go-redis/v9/gears_commands.go | 149 - .../redis/go-redis/v9/generic_commands.go | 15 + .../redis/go-redis/v9/geo_commands.go | 10 +- .../redis/go-redis/v9/hash_commands.go | 455 +- .../redis/go-redis/v9/hotkeys_commands.go | 122 + .../conn_reauth_credentials_listener.go | 100 + .../internal/auth/streaming/cred_listeners.go | 77 + .../v9/internal/auth/streaming/manager.go | 137 + .../v9/internal/auth/streaming/pool_hook.go | 241 + .../go-redis/v9/internal/hashtag/hashtag.go | 12 + .../v9/internal/interfaces/interfaces.go | 59 + .../redis/go-redis/v9/internal/log.go | 61 +- .../go-redis/v9/internal/otel/metrics.go | 279 + .../redis/go-redis/v9/internal/pool/conn.go | 869 +- .../go-redis/v9/internal/pool/conn_check.go | 12 +- .../v9/internal/pool/conn_check_dummy.go | 15 +- .../go-redis/v9/internal/pool/conn_state.go | 343 + .../redis/go-redis/v9/internal/pool/hooks.go | 165 + .../redis/go-redis/v9/internal/pool/pool.go | 1158 +- .../go-redis/v9/internal/pool/pool_single.go | 54 +- .../go-redis/v9/internal/pool/pool_sticky.go | 13 + .../redis/go-redis/v9/internal/pool/pubsub.go | 81 + .../go-redis/v9/internal/pool/want_conn.go | 115 + .../go-redis/v9/internal/proto/reader.go | 102 +- .../v9/internal/proto/redis_errors.go | 527 + .../go-redis/v9/internal/proto/writer.go | 53 + .../redis/go-redis/v9/internal/redis.go | 3 + .../v9/internal/routing/aggregator.go | 1000 + .../go-redis/v9/internal/routing/policy.go | 144 + .../v9/internal/routing/shard_picker.go | 57 + .../redis/go-redis/v9/internal/semaphore.go | 193 + .../redis/go-redis/v9/internal/util.go | 67 + .../go-redis/v9/internal/util/atomic_max.go | 97 + .../go-redis/v9/internal/util/atomic_min.go | 96 + .../go-redis/v9/internal/util/convert.go | 41 + .../redis/go-redis/v9/internal/util/unsafe.go | 9 +- vendor/github.com/redis/go-redis/v9/json.go | 71 +- .../redis/go-redis/v9/list_commands.go | 8 + .../v9/maintnotifications/FEATURES.md | 235 + .../go-redis/v9/maintnotifications/README.md | 73 + .../v9/maintnotifications/circuit_breaker.go | 353 + .../go-redis/v9/maintnotifications/config.go | 457 + .../go-redis/v9/maintnotifications/errors.go | 76 + .../v9/maintnotifications/example_hooks.go | 101 + .../v9/maintnotifications/handoff_worker.go | 525 + .../go-redis/v9/maintnotifications/hooks.go | 60 + .../go-redis/v9/maintnotifications/manager.go | 362 + .../v9/maintnotifications/pool_hook.go | 182 + .../push_notification_handler.go | 524 + .../go-redis/v9/maintnotifications/state.go | 24 + .../github.com/redis/go-redis/v9/options.go | 407 +- .../redis/go-redis/v9/osscluster.go | 973 +- .../redis/go-redis/v9/osscluster_router.go | 992 + vendor/github.com/redis/go-redis/v9/otel.go | 204 + .../github.com/redis/go-redis/v9/package.json | 8 - .../github.com/redis/go-redis/v9/pipeline.go | 27 +- .../redis/go-redis/v9/probabilistic.go | 204 +- vendor/github.com/redis/go-redis/v9/pubsub.go | 129 +- .../redis/go-redis/v9/pubsub_commands.go | 14 +- .../redis/go-redis/v9/push/errors.go | 176 + .../redis/go-redis/v9/push/handler.go | 14 + .../redis/go-redis/v9/push/handler_context.go | 44 + .../redis/go-redis/v9/push/processor.go | 203 + .../github.com/redis/go-redis/v9/push/push.go | 7 + .../redis/go-redis/v9/push/registry.go | 61 + .../redis/go-redis/v9/push_notifications.go | 21 + vendor/github.com/redis/go-redis/v9/redis.go | 998 +- vendor/github.com/redis/go-redis/v9/result.go | 8 + vendor/github.com/redis/go-redis/v9/ring.go | 224 +- .../redis/go-redis/v9/search_builders.go | 825 + .../redis/go-redis/v9/search_commands.go | 3069 +++ .../github.com/redis/go-redis/v9/sentinel.go | 612 +- .../redis/go-redis/v9/set_commands.go | 171 +- .../redis/go-redis/v9/sortedset_commands.go | 40 +- .../redis/go-redis/v9/stream_commands.go | 179 +- .../redis/go-redis/v9/string_commands.go | 458 +- .../redis/go-redis/v9/timeseries_commands.go | 97 +- vendor/github.com/redis/go-redis/v9/tx.go | 8 +- .../github.com/redis/go-redis/v9/universal.go | 289 +- .../redis/go-redis/v9/vectorset_commands.go | 358 + .../github.com/redis/go-redis/v9/version.go | 2 +- .../rubenv/sql-migrate/.golangci.yaml | 195 +- .../github.com/rubenv/sql-migrate/migrate.go | 14 +- .../testify/assert/assertion_compare.go | 45 +- .../testify/assert/assertion_format.go | 85 +- .../testify/assert/assertion_forward.go | 170 +- .../testify/assert/assertion_order.go | 12 +- .../stretchr/testify/assert/assertions.go | 524 +- .../github.com/stretchr/testify/assert/doc.go | 4 + .../testify/assert/http_assertions.go | 4 +- .../testify/assert/yaml/yaml_custom.go | 24 + .../testify/assert/yaml/yaml_default.go | 36 + .../stretchr/testify/assert/yaml/yaml_fail.go | 17 + .../github.com/stretchr/testify/mock/mock.go | 177 +- .../stretchr/testify/require/doc.go | 2 + .../stretchr/testify/require/require.go | 532 +- .../stretchr/testify/require/require.go.tmpl | 2 +- .../testify/require/require_forward.go | 170 +- .../stretchr/testify/require/requirements.go | 2 +- .../github.com/stretchr/testify/suite/doc.go | 4 + .../stretchr/testify/suite/stats.go | 16 +- .../stretchr/testify/suite/suite.go | 112 +- vendor/github.com/sv-tools/openapi/LICENSE | 21 + .../sv-tools/openapi/spec/bool_or_schema.go | 73 + .../sv-tools/openapi/spec/callback.go | 68 + .../sv-tools/openapi/spec/components.go | 161 + .../sv-tools/openapi/spec/contact.go | 26 + .../sv-tools/openapi/spec/discriminator.go | 35 + .../sv-tools/openapi/spec/encoding.go | 80 + .../sv-tools/openapi/spec/example.go | 58 + .../sv-tools/openapi/spec/extensions.go | 138 + .../sv-tools/openapi/spec/external-docs.go | 24 + .../sv-tools/openapi/spec/header.go | 78 + .../github.com/sv-tools/openapi/spec/info.go | 46 + .../sv-tools/openapi/spec/json_schema.go | 257 + .../sv-tools/openapi/spec/license.go | 27 + .../github.com/sv-tools/openapi/spec/link.go | 90 + .../sv-tools/openapi/spec/media_type.go | 52 + .../sv-tools/openapi/spec/oauth-flow.go | 52 + .../sv-tools/openapi/spec/oauth-flows.go | 38 + .../sv-tools/openapi/spec/openapi.go | 60 + .../sv-tools/openapi/spec/operation.go | 103 + .../sv-tools/openapi/spec/parameter.go | 112 + .../sv-tools/openapi/spec/path_item.go | 80 + .../github.com/sv-tools/openapi/spec/paths.go | 65 + .../github.com/sv-tools/openapi/spec/ref.go | 111 + .../sv-tools/openapi/spec/request_body.go | 58 + .../sv-tools/openapi/spec/response.go | 42 + .../sv-tools/openapi/spec/responses.go | 130 + .../sv-tools/openapi/spec/schema.go | 195 + .../openapi/spec/security-requirement.go | 21 + .../sv-tools/openapi/spec/security-scheme.go | 85 + .../sv-tools/openapi/spec/server.go | 34 + .../sv-tools/openapi/spec/server_variable.go | 24 + .../sv-tools/openapi/spec/single_or_array.go | 61 + .../github.com/sv-tools/openapi/spec/tag.go | 26 + .../sv-tools/openapi/spec/type_formats.go | 94 + .../github.com/sv-tools/openapi/spec/types.go | 57 + .../github.com/sv-tools/openapi/spec/xml.go | 52 + .../github.com/swaggo/echo-swagger/README.md | 46 +- .../github.com/swaggo/echo-swagger/swagger.go | 89 +- vendor/github.com/swaggo/swag/Dockerfile | 10 +- vendor/github.com/swaggo/swag/Makefile | 13 +- vendor/github.com/swaggo/swag/README.md | 72 +- vendor/github.com/swaggo/swag/README_pt.md | 11 +- vendor/github.com/swaggo/swag/README_zh-CN.md | 11 +- vendor/github.com/swaggo/swag/const.go | 13 + vendor/github.com/swaggo/swag/enums.go | 5 +- vendor/github.com/swaggo/swag/field_parser.go | 72 +- vendor/github.com/swaggo/swag/formatter.go | 9 +- vendor/github.com/swaggo/swag/generics.go | 20 +- vendor/github.com/swaggo/swag/golist.go | 2 +- vendor/github.com/swaggo/swag/operation.go | 50 +- vendor/github.com/swaggo/swag/packages.go | 81 +- vendor/github.com/swaggo/swag/parser.go | 340 +- vendor/github.com/swaggo/swag/schema.go | 79 + vendor/github.com/swaggo/swag/types.go | 30 +- vendor/github.com/swaggo/swag/utils.go | 14 +- vendor/github.com/swaggo/swag/v2/.gitignore | 27 + .../github.com/swaggo/swag/v2/.goreleaser.yml | 30 + .../swaggo/swag/v2/CODE_OF_CONDUCT.md | 46 + .../github.com/swaggo/swag/v2/CONTRIBUTING.md | 16 + vendor/github.com/swaggo/swag/v2/Dockerfile | 36 + vendor/github.com/swaggo/swag/v2/Makefile | 78 + .../swaggo/swag/v2/PULL_REQUEST_TEMPLATE.md | 8 + vendor/github.com/swaggo/swag/v2/README.md | 1036 + vendor/github.com/swaggo/swag/v2/README_pt.md | 968 + .../github.com/swaggo/swag/v2/README_zh-CN.md | 771 + vendor/github.com/swaggo/swag/v2/const.go | 567 + vendor/github.com/swaggo/swag/v2/doc.go | 5 + vendor/github.com/swaggo/swag/v2/enums.go | 13 + .../github.com/swaggo/swag/v2/extensions.go | 4 + .../github.com/swaggo/swag/v2/field_parser.go | 703 + .../swaggo/swag/v2/field_parserv3.go | 596 + vendor/github.com/swaggo/swag/v2/formatter.go | 182 + vendor/github.com/swaggo/swag/v2/generics.go | 445 + .../github.com/swaggo/swag/v2/genericsv3.go | 34 + vendor/github.com/swaggo/swag/v2/golist.go | 78 + vendor/github.com/swaggo/swag/v2/license | 21 + vendor/github.com/swaggo/swag/v2/operation.go | 1266 ++ .../github.com/swaggo/swag/v2/operationv3.go | 1234 ++ vendor/github.com/swaggo/swag/v2/package.go | 197 + vendor/github.com/swaggo/swag/v2/packages.go | 652 + vendor/github.com/swaggo/swag/v2/parser.go | 2049 ++ vendor/github.com/swaggo/swag/v2/parserv3.go | 1087 + vendor/github.com/swaggo/swag/v2/schema.go | 294 + vendor/github.com/swaggo/swag/v2/schemav3.go | 141 + vendor/github.com/swaggo/swag/v2/spec.go | 64 + vendor/github.com/swaggo/swag/v2/swagger.go | 72 + vendor/github.com/swaggo/swag/v2/types.go | 123 + vendor/github.com/swaggo/swag/v2/typesv3.go | 10 + vendor/github.com/swaggo/swag/v2/utils.go | 81 + vendor/github.com/swaggo/swag/v2/version.go | 4 + vendor/github.com/swaggo/swag/version.go | 2 +- vendor/go.uber.org/atomic/.codecov.yml | 19 + vendor/go.uber.org/atomic/.gitignore | 15 + vendor/go.uber.org/atomic/CHANGELOG.md | 127 + vendor/go.uber.org/atomic/LICENSE.txt | 19 + vendor/go.uber.org/atomic/Makefile | 79 + vendor/go.uber.org/atomic/README.md | 63 + vendor/go.uber.org/atomic/bool.go | 88 + vendor/go.uber.org/atomic/bool_ext.go | 53 + vendor/go.uber.org/atomic/doc.go | 23 + vendor/go.uber.org/atomic/duration.go | 89 + vendor/go.uber.org/atomic/duration_ext.go | 40 + vendor/go.uber.org/atomic/error.go | 72 + vendor/go.uber.org/atomic/error_ext.go | 39 + vendor/go.uber.org/atomic/float32.go | 77 + vendor/go.uber.org/atomic/float32_ext.go | 76 + vendor/go.uber.org/atomic/float64.go | 77 + vendor/go.uber.org/atomic/float64_ext.go | 76 + vendor/go.uber.org/atomic/gen.go | 27 + vendor/go.uber.org/atomic/int32.go | 109 + vendor/go.uber.org/atomic/int64.go | 109 + vendor/go.uber.org/atomic/nocmp.go | 35 + vendor/go.uber.org/atomic/pointer_go118.go | 31 + .../atomic/pointer_go118_pre119.go | 60 + vendor/go.uber.org/atomic/pointer_go119.go | 61 + vendor/go.uber.org/atomic/string.go | 72 + vendor/go.uber.org/atomic/string_ext.go | 54 + vendor/go.uber.org/atomic/time.go | 55 + vendor/go.uber.org/atomic/time_ext.go | 36 + vendor/go.uber.org/atomic/uint32.go | 109 + vendor/go.uber.org/atomic/uint64.go | 109 + vendor/go.uber.org/atomic/uintptr.go | 109 + vendor/go.uber.org/atomic/unsafe_pointer.go | 65 + vendor/go.uber.org/atomic/value.go | 31 + vendor/golang.org/x/crypto/LICENSE | 4 +- vendor/golang.org/x/crypto/acme/acme.go | 102 +- .../x/crypto/acme/autocert/autocert.go | 25 +- .../x/crypto/acme/autocert/listener.go | 32 +- .../x/crypto/acme/autocert/renewal.go | 36 +- vendor/golang.org/x/crypto/acme/http.go | 32 +- vendor/golang.org/x/crypto/acme/jws.go | 2 +- vendor/golang.org/x/crypto/acme/rfc8555.go | 6 +- vendor/golang.org/x/crypto/acme/types.go | 24 +- .../golang.org/x/crypto/acme/version_go112.go | 27 - vendor/golang.org/x/crypto/bcrypt/bcrypt.go | 4 +- vendor/golang.org/x/crypto/blowfish/cipher.go | 2 +- vendor/golang.org/x/mod/LICENSE | 27 + vendor/golang.org/x/mod/PATENTS | 22 + .../x/mod/internal/lazyregexp/lazyre.go | 78 + vendor/golang.org/x/mod/module/module.go | 840 + vendor/golang.org/x/mod/module/pseudo.go | 250 + vendor/golang.org/x/mod/semver/semver.go | 407 + vendor/golang.org/x/net/LICENSE | 4 +- .../x/net/http2/client_conn_pool.go | 8 +- vendor/golang.org/x/net/http2/config.go | 169 + vendor/golang.org/x/net/http2/config_go125.go | 15 + vendor/golang.org/x/net/http2/config_go126.go | 15 + vendor/golang.org/x/net/http2/frame.go | 132 +- vendor/golang.org/x/net/http2/gotrack.go | 17 +- vendor/golang.org/x/net/http2/h2c/h2c.go | 14 +- vendor/golang.org/x/net/http2/http2.go | 114 +- vendor/golang.org/x/net/http2/server.go | 423 +- vendor/golang.org/x/net/http2/testsync.go | 331 - vendor/golang.org/x/net/http2/transport.go | 1297 +- vendor/golang.org/x/net/http2/unencrypted.go | 32 + vendor/golang.org/x/net/http2/write.go | 13 +- vendor/golang.org/x/net/http2/writesched.go | 67 +- ...rity.go => writesched_priority_rfc7540.go} | 113 +- .../net/http2/writesched_priority_rfc9218.go | 209 + .../x/net/http2/writesched_roundrobin.go | 2 +- .../x/net/internal/httpcommon/ascii.go | 53 + .../httpcommon}/headermap.go | 24 +- .../x/net/internal/httpcommon/request.go | 467 + vendor/golang.org/x/sync/LICENSE | 27 + vendor/golang.org/x/sync/PATENTS | 22 + vendor/golang.org/x/sync/errgroup/errgroup.go | 151 + vendor/golang.org/x/sys/LICENSE | 4 +- vendor/golang.org/x/sys/unix/README.md | 2 +- .../golang.org/x/sys/unix/affinity_linux.go | 9 +- vendor/golang.org/x/sys/unix/auxv.go | 36 + .../golang.org/x/sys/unix/auxv_unsupported.go | 13 + vendor/golang.org/x/sys/unix/fdset.go | 4 +- vendor/golang.org/x/sys/unix/ifreq_linux.go | 4 +- vendor/golang.org/x/sys/unix/ioctl_linux.go | 96 + vendor/golang.org/x/sys/unix/mkall.sh | 1 + vendor/golang.org/x/sys/unix/mkerrors.sh | 26 +- vendor/golang.org/x/sys/unix/mremap.go | 5 + vendor/golang.org/x/sys/unix/syscall_aix.go | 2 +- .../golang.org/x/sys/unix/syscall_darwin.go | 154 + .../x/sys/unix/syscall_dragonfly.go | 12 + vendor/golang.org/x/sys/unix/syscall_hurd.go | 1 + vendor/golang.org/x/sys/unix/syscall_linux.go | 117 +- .../x/sys/unix/syscall_linux_arm64.go | 2 + .../x/sys/unix/syscall_linux_loong64.go | 2 + .../x/sys/unix/syscall_linux_riscv64.go | 2 + .../golang.org/x/sys/unix/syscall_netbsd.go | 17 + .../golang.org/x/sys/unix/syscall_openbsd.go | 1 + .../golang.org/x/sys/unix/syscall_solaris.go | 89 +- vendor/golang.org/x/sys/unix/syscall_unix.go | 9 + .../x/sys/unix/syscall_zos_s390x.go | 104 +- .../golang.org/x/sys/unix/vgetrandom_linux.go | 13 + .../unix/vgetrandom_unsupported.go} | 11 +- .../x/sys/unix/zerrors_darwin_amd64.go | 12 + .../x/sys/unix/zerrors_darwin_arm64.go | 12 + vendor/golang.org/x/sys/unix/zerrors_linux.go | 524 +- .../x/sys/unix/zerrors_linux_386.go | 35 + .../x/sys/unix/zerrors_linux_amd64.go | 35 + .../x/sys/unix/zerrors_linux_arm.go | 35 + .../x/sys/unix/zerrors_linux_arm64.go | 37 + .../x/sys/unix/zerrors_linux_loong64.go | 35 + .../x/sys/unix/zerrors_linux_mips.go | 35 + .../x/sys/unix/zerrors_linux_mips64.go | 35 + .../x/sys/unix/zerrors_linux_mips64le.go | 35 + .../x/sys/unix/zerrors_linux_mipsle.go | 35 + .../x/sys/unix/zerrors_linux_ppc.go | 35 + .../x/sys/unix/zerrors_linux_ppc64.go | 35 + .../x/sys/unix/zerrors_linux_ppc64le.go | 35 + .../x/sys/unix/zerrors_linux_riscv64.go | 35 + .../x/sys/unix/zerrors_linux_s390x.go | 35 + .../x/sys/unix/zerrors_linux_sparc64.go | 35 + .../x/sys/unix/zerrors_zos_s390x.go | 2 + .../x/sys/unix/zsyscall_darwin_amd64.go | 185 + .../x/sys/unix/zsyscall_darwin_amd64.s | 45 + .../x/sys/unix/zsyscall_darwin_arm64.go | 185 + .../x/sys/unix/zsyscall_darwin_arm64.s | 45 + .../golang.org/x/sys/unix/zsyscall_linux.go | 53 +- .../x/sys/unix/zsyscall_openbsd_386.go | 24 + .../x/sys/unix/zsyscall_openbsd_386.s | 5 + .../x/sys/unix/zsyscall_openbsd_amd64.go | 24 + .../x/sys/unix/zsyscall_openbsd_amd64.s | 5 + .../x/sys/unix/zsyscall_openbsd_arm.go | 24 + .../x/sys/unix/zsyscall_openbsd_arm.s | 5 + .../x/sys/unix/zsyscall_openbsd_arm64.go | 24 + .../x/sys/unix/zsyscall_openbsd_arm64.s | 5 + .../x/sys/unix/zsyscall_openbsd_mips64.go | 24 + .../x/sys/unix/zsyscall_openbsd_mips64.s | 5 + .../x/sys/unix/zsyscall_openbsd_ppc64.go | 24 + .../x/sys/unix/zsyscall_openbsd_ppc64.s | 6 + .../x/sys/unix/zsyscall_openbsd_riscv64.go | 24 + .../x/sys/unix/zsyscall_openbsd_riscv64.s | 5 + .../x/sys/unix/zsyscall_solaris_amd64.go | 122 +- .../x/sys/unix/zsysnum_linux_386.go | 6 + .../x/sys/unix/zsysnum_linux_amd64.go | 7 + .../x/sys/unix/zsysnum_linux_arm.go | 6 + .../x/sys/unix/zsysnum_linux_arm64.go | 8 +- .../x/sys/unix/zsysnum_linux_loong64.go | 8 + .../x/sys/unix/zsysnum_linux_mips.go | 6 + .../x/sys/unix/zsysnum_linux_mips64.go | 6 + .../x/sys/unix/zsysnum_linux_mips64le.go | 6 + .../x/sys/unix/zsysnum_linux_mipsle.go | 6 + .../x/sys/unix/zsysnum_linux_ppc.go | 6 + .../x/sys/unix/zsysnum_linux_ppc64.go | 6 + .../x/sys/unix/zsysnum_linux_ppc64le.go | 6 + .../x/sys/unix/zsysnum_linux_riscv64.go | 8 +- .../x/sys/unix/zsysnum_linux_s390x.go | 6 + .../x/sys/unix/zsysnum_linux_sparc64.go | 6 + .../x/sys/unix/ztypes_darwin_amd64.go | 73 + .../x/sys/unix/ztypes_darwin_arm64.go | 73 + .../x/sys/unix/ztypes_freebsd_386.go | 1 + .../x/sys/unix/ztypes_freebsd_amd64.go | 1 + .../x/sys/unix/ztypes_freebsd_arm.go | 1 + .../x/sys/unix/ztypes_freebsd_arm64.go | 1 + .../x/sys/unix/ztypes_freebsd_riscv64.go | 1 + vendor/golang.org/x/sys/unix/ztypes_linux.go | 429 +- .../golang.org/x/sys/unix/ztypes_linux_386.go | 18 +- .../x/sys/unix/ztypes_linux_amd64.go | 16 + .../golang.org/x/sys/unix/ztypes_linux_arm.go | 20 +- .../x/sys/unix/ztypes_linux_arm64.go | 16 + .../x/sys/unix/ztypes_linux_loong64.go | 16 + .../x/sys/unix/ztypes_linux_mips.go | 18 +- .../x/sys/unix/ztypes_linux_mips64.go | 16 + .../x/sys/unix/ztypes_linux_mips64le.go | 16 + .../x/sys/unix/ztypes_linux_mipsle.go | 18 +- .../golang.org/x/sys/unix/ztypes_linux_ppc.go | 20 +- .../x/sys/unix/ztypes_linux_ppc64.go | 16 + .../x/sys/unix/ztypes_linux_ppc64le.go | 16 + .../x/sys/unix/ztypes_linux_riscv64.go | 49 + .../x/sys/unix/ztypes_linux_s390x.go | 16 + .../x/sys/unix/ztypes_linux_sparc64.go | 16 + .../x/sys/unix/ztypes_netbsd_arm.go | 2 +- .../golang.org/x/sys/unix/ztypes_zos_s390x.go | 6 + .../golang.org/x/sys/windows/dll_windows.go | 13 +- .../x/sys/windows/security_windows.go | 73 +- .../x/sys/windows/syscall_windows.go | 75 +- .../golang.org/x/sys/windows/types_windows.go | 536 +- .../x/sys/windows/zsyscall_windows.go | 1147 +- vendor/golang.org/x/text/LICENSE | 4 +- vendor/golang.org/x/text/unicode/bidi/core.go | 11 +- vendor/golang.org/x/time/LICENSE | 4 +- vendor/golang.org/x/time/rate/rate.go | 47 +- vendor/golang.org/x/time/rate/sometimes.go | 4 +- vendor/golang.org/x/tools/LICENSE | 4 +- .../x/tools/go/ast/astutil/enclosing.go | 45 +- .../x/tools/go/ast/astutil/imports.go | 75 +- .../x/tools/go/ast/astutil/rewrite.go | 6 +- .../golang.org/x/tools/go/ast/astutil/util.go | 13 +- .../x/tools/go/buildutil/allpackages.go | 4 +- .../x/tools/go/buildutil/fakecontext.go | 4 +- .../golang.org/x/tools/go/buildutil/tags.go | 4 +- .../golang.org/x/tools/go/internal/cgo/cgo.go | 2 +- vendor/golang.org/x/tools/go/loader/doc.go | 2 +- vendor/golang.org/x/tools/go/loader/loader.go | 27 +- vendor/golang.org/x/tools/imports/forward.go | 77 + .../x/tools/internal/event/core/event.go | 80 + .../x/tools/internal/event/core/export.go | 70 + .../x/tools/internal/event/core/fast.go | 77 + .../golang.org/x/tools/internal/event/doc.go | 7 + .../x/tools/internal/event/event.go | 127 + .../x/tools/internal/event/keys/keys.go | 564 + .../x/tools/internal/event/keys/standard.go | 22 + .../x/tools/internal/event/keys/util.go | 21 + .../x/tools/internal/event/label/label.go | 214 + .../x/tools/internal/gocommand/invoke.go | 567 + .../internal/gocommand/invoke_notunix.go | 13 + .../x/tools/internal/gocommand/invoke_unix.go | 13 + .../x/tools/internal/gocommand/vendor.go | 163 + .../x/tools/internal/gocommand/version.go | 71 + .../x/tools/internal/gopathwalk/walk.go | 336 + .../x/tools/internal/imports/fix.go | 1896 ++ .../x/tools/internal/imports/imports.go | 359 + .../x/tools/internal/imports/mod.go | 841 + .../x/tools/internal/imports/mod_cache.go | 331 + .../x/tools/internal/imports/sortimports.go | 298 + .../x/tools/internal/imports/source.go | 63 + .../x/tools/internal/imports/source_env.go | 129 + .../tools/internal/imports/source_modindex.go | 100 + .../x/tools/internal/modindex/directories.go | 131 + .../x/tools/internal/modindex/index.go | 287 + .../x/tools/internal/modindex/lookup.go | 178 + .../x/tools/internal/modindex/modindex.go | 119 + .../x/tools/internal/modindex/symbols.go | 244 + .../x/tools/internal/stdlib/deps.go | 519 + .../x/tools/internal/stdlib/import.go | 97 + .../x/tools/internal/stdlib/manifest.go | 17759 ++++++++++++++++ .../x/tools/internal/stdlib/stdlib.go | 105 + .../x/tools/internal/versions/features.go | 43 - .../x/tools/internal/versions/gover.go | 172 - .../x/tools/internal/versions/toolchain.go | 14 - .../internal/versions/toolchain_go120.go | 14 - .../internal/versions/toolchain_go121.go | 14 - .../x/tools/internal/versions/types.go | 19 - .../x/tools/internal/versions/types_go121.go | 30 - .../x/tools/internal/versions/types_go122.go | 41 - .../x/tools/internal/versions/versions.go | 57 - vendor/modules.txt | 153 +- .../ghodss => sigs.k8s.io}/yaml/.gitignore | 4 + vendor/sigs.k8s.io/yaml/.travis.yml | 12 + vendor/sigs.k8s.io/yaml/CONTRIBUTING.md | 31 + .../ghodss => sigs.k8s.io}/yaml/LICENSE | 0 vendor/sigs.k8s.io/yaml/OWNERS | 27 + .../ghodss => sigs.k8s.io}/yaml/README.md | 16 +- vendor/sigs.k8s.io/yaml/RELEASE.md | 9 + vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS | 17 + vendor/sigs.k8s.io/yaml/code-of-conduct.md | 3 + .../ghodss => sigs.k8s.io}/yaml/fields.go | 1 + .../ghodss => sigs.k8s.io}/yaml/yaml.go | 129 +- vendor/sigs.k8s.io/yaml/yaml_go110.go | 14 + 779 files changed, 99374 insertions(+), 23323 deletions(-) create mode 100644 vendor/github.com/brianvoe/gofakeit/v7/airline.go create mode 100644 vendor/github.com/brianvoe/gofakeit/v7/data/airline.go create mode 100644 vendor/github.com/brianvoe/gofakeit/v7/data/auth.go create mode 100644 vendor/github.com/brianvoe/gofakeit/v7/data/bank.go create mode 100644 vendor/github.com/brianvoe/gofakeit/v7/data/isbn.go delete mode 100644 vendor/github.com/brianvoe/gofakeit/v7/data/sentence.go create mode 100644 vendor/github.com/brianvoe/gofakeit/v7/data/song.go create mode 100644 vendor/github.com/brianvoe/gofakeit/v7/data/text.go rename vendor/github.com/brianvoe/gofakeit/v7/{time.go => datetime.go} (61%) create mode 100644 vendor/github.com/brianvoe/gofakeit/v7/id.go delete mode 100644 vendor/github.com/brianvoe/gofakeit/v7/lorem.go create mode 100644 vendor/github.com/brianvoe/gofakeit/v7/merch.png create mode 100644 vendor/github.com/brianvoe/gofakeit/v7/song.go create mode 100644 vendor/github.com/brianvoe/gofakeit/v7/text.go delete mode 100644 vendor/github.com/brianvoe/gofakeit/v7/word_comment.go delete mode 100644 vendor/github.com/brianvoe/gofakeit/v7/word_grammar.go delete mode 100644 vendor/github.com/brianvoe/gofakeit/v7/word_phrase.go delete mode 100644 vendor/github.com/brianvoe/gofakeit/v7/word_sentence.go create mode 100644 vendor/github.com/docker/cli/cli/compose/schema/data/config_schema_v3.13.json delete mode 100644 vendor/github.com/docker/cli/opts/file.go create mode 100644 vendor/github.com/docker/cli/pkg/kvfile/kvfile.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/.cirrus.yml delete mode 100644 vendor/github.com/fsnotify/fsnotify/.mailmap create mode 100644 vendor/github.com/fsnotify/fsnotify/.travis.yml create mode 100644 vendor/github.com/fsnotify/fsnotify/AUTHORS delete mode 100644 vendor/github.com/fsnotify/fsnotify/backend_fen.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/backend_inotify.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/backend_kqueue.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/backend_other.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/backend_windows.go create mode 100644 vendor/github.com/fsnotify/fsnotify/fen.go create mode 100644 vendor/github.com/fsnotify/fsnotify/inotify.go create mode 100644 vendor/github.com/fsnotify/fsnotify/inotify_poller.go create mode 100644 vendor/github.com/fsnotify/fsnotify/kqueue.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/mkdoc.zsh rename vendor/github.com/fsnotify/fsnotify/{system_bsd.go => open_mode_bsd.go} (50%) rename vendor/github.com/fsnotify/fsnotify/{system_darwin.go => open_mode_darwin.go} (50%) create mode 100644 vendor/github.com/fsnotify/fsnotify/windows.go delete mode 100644 vendor/github.com/ghodss/yaml/.travis.yml delete mode 100644 vendor/github.com/go-openapi/jsonpointer/.golangci.yml create mode 100644 vendor/github.com/go-openapi/spec/appveyor.yml create mode 100644 vendor/github.com/go-openapi/spec/bindata.go delete mode 100644 vendor/github.com/go-openapi/spec/embed.go delete mode 100644 vendor/github.com/go-openapi/spec/schemas/jsonschema-draft-04.json delete mode 100644 vendor/github.com/go-openapi/spec/schemas/v2/schema.json create mode 100644 vendor/github.com/go-openapi/spec/url_go18.go delete mode 100644 vendor/github.com/go-openapi/swag/BENCHMARK.md delete mode 100644 vendor/github.com/go-openapi/swag/initialism_index.go create mode 100644 vendor/github.com/go-openapi/swag/post_go18.go create mode 100644 vendor/github.com/go-openapi/swag/post_go19.go create mode 100644 vendor/github.com/go-openapi/swag/pre_go18.go create mode 100644 vendor/github.com/go-openapi/swag/pre_go19.go delete mode 100644 vendor/github.com/go-openapi/swag/string_bytes.go delete mode 100644 vendor/github.com/go-sql-driver/mysql/atomic_bool.go delete mode 100644 vendor/github.com/go-sql-driver/mysql/atomic_bool_go118.go create mode 100644 vendor/github.com/go-sql-driver/mysql/compress.go create mode 100644 vendor/github.com/go-viper/mapstructure/v2/.editorconfig create mode 100644 vendor/github.com/go-viper/mapstructure/v2/.envrc create mode 100644 vendor/github.com/go-viper/mapstructure/v2/.gitignore create mode 100644 vendor/github.com/go-viper/mapstructure/v2/.golangci.yaml create mode 100644 vendor/github.com/go-viper/mapstructure/v2/CHANGELOG.md create mode 100644 vendor/github.com/go-viper/mapstructure/v2/LICENSE create mode 100644 vendor/github.com/go-viper/mapstructure/v2/README.md create mode 100644 vendor/github.com/go-viper/mapstructure/v2/decode_hooks.go create mode 100644 vendor/github.com/go-viper/mapstructure/v2/flake.lock create mode 100644 vendor/github.com/go-viper/mapstructure/v2/flake.nix create mode 100644 vendor/github.com/go-viper/mapstructure/v2/internal/errors/errors.go create mode 100644 vendor/github.com/go-viper/mapstructure/v2/internal/errors/join.go create mode 100644 vendor/github.com/go-viper/mapstructure/v2/internal/errors/join_go1_19.go create mode 100644 vendor/github.com/go-viper/mapstructure/v2/mapstructure.go create mode 100644 vendor/github.com/go-viper/mapstructure/v2/reflect_go1_19.go create mode 100644 vendor/github.com/go-viper/mapstructure/v2/reflect_go1_20.go delete mode 100644 vendor/github.com/golang-jwt/jwt/.gitignore delete mode 100644 vendor/github.com/golang-jwt/jwt/LICENSE delete mode 100644 vendor/github.com/golang-jwt/jwt/MIGRATION_GUIDE.md delete mode 100644 vendor/github.com/golang-jwt/jwt/README.md delete mode 100644 vendor/github.com/golang-jwt/jwt/VERSION_HISTORY.md delete mode 100644 vendor/github.com/golang-jwt/jwt/claims.go delete mode 100644 vendor/github.com/golang-jwt/jwt/doc.go delete mode 100644 vendor/github.com/golang-jwt/jwt/ecdsa.go delete mode 100644 vendor/github.com/golang-jwt/jwt/ecdsa_utils.go delete mode 100644 vendor/github.com/golang-jwt/jwt/ed25519.go delete mode 100644 vendor/github.com/golang-jwt/jwt/ed25519_utils.go delete mode 100644 vendor/github.com/golang-jwt/jwt/errors.go delete mode 100644 vendor/github.com/golang-jwt/jwt/hmac.go delete mode 100644 vendor/github.com/golang-jwt/jwt/map_claims.go delete mode 100644 vendor/github.com/golang-jwt/jwt/none.go delete mode 100644 vendor/github.com/golang-jwt/jwt/parser.go delete mode 100644 vendor/github.com/golang-jwt/jwt/rsa.go delete mode 100644 vendor/github.com/golang-jwt/jwt/rsa_pss.go delete mode 100644 vendor/github.com/golang-jwt/jwt/rsa_utils.go delete mode 100644 vendor/github.com/golang-jwt/jwt/signing_method.go delete mode 100644 vendor/github.com/golang-jwt/jwt/token.go delete mode 100644 vendor/github.com/golang-jwt/jwt/v5/errors_go1_20.go delete mode 100644 vendor/github.com/golang-jwt/jwt/v5/errors_go_other.go create mode 100644 vendor/github.com/labstack/echo/v4/CLAUDE.md create mode 100644 vendor/github.com/labstack/echo/v4/binder_generic.go create mode 100644 vendor/github.com/labstack/echo/v4/context_generic.go delete mode 100644 vendor/github.com/labstack/echo/v4/middleware/jwt.go create mode 100644 vendor/github.com/labstack/echo/v4/middleware/logger_strings.go delete mode 100644 vendor/github.com/labstack/echo/v4/middleware/responsecontroller_1.19.go delete mode 100644 vendor/github.com/labstack/echo/v4/middleware/responsecontroller_1.20.go create mode 100644 vendor/github.com/labstack/echo/v4/renderer.go delete mode 100644 vendor/github.com/labstack/echo/v4/responsecontroller_1.19.go delete mode 100644 vendor/github.com/labstack/echo/v4/responsecontroller_1.20.go delete mode 100644 vendor/github.com/mattn/go-colorable/colorable_appengine.go create mode 100644 vendor/github.com/moby/sys/user/LICENSE rename vendor/github.com/{opencontainers/runc/libcontainer => moby/sys}/user/lookup_unix.go (100%) rename vendor/github.com/{opencontainers/runc/libcontainer => moby/sys}/user/user.go (100%) rename vendor/github.com/{opencontainers/runc/libcontainer => moby/sys}/user/user_fuzzer.go (100%) create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/user/lookup_deprecated.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/user/user_deprecated.go create mode 100644 vendor/github.com/ory/dockertest/v3/docker/registry_auth.go delete mode 100644 vendor/github.com/redis/go-redis/v9/CHANGELOG.md create mode 100644 vendor/github.com/redis/go-redis/v9/RELEASE-NOTES.md create mode 100644 vendor/github.com/redis/go-redis/v9/adapters.go create mode 100644 vendor/github.com/redis/go-redis/v9/auth/auth.go create mode 100644 vendor/github.com/redis/go-redis/v9/auth/reauth_credentials_listener.go create mode 100644 vendor/github.com/redis/go-redis/v9/command_policy_resolver.go create mode 100644 vendor/github.com/redis/go-redis/v9/docker-compose.yml delete mode 100644 vendor/github.com/redis/go-redis/v9/gears_commands.go create mode 100644 vendor/github.com/redis/go-redis/v9/hotkeys_commands.go create mode 100644 vendor/github.com/redis/go-redis/v9/internal/auth/streaming/conn_reauth_credentials_listener.go create mode 100644 vendor/github.com/redis/go-redis/v9/internal/auth/streaming/cred_listeners.go create mode 100644 vendor/github.com/redis/go-redis/v9/internal/auth/streaming/manager.go create mode 100644 vendor/github.com/redis/go-redis/v9/internal/auth/streaming/pool_hook.go create mode 100644 vendor/github.com/redis/go-redis/v9/internal/interfaces/interfaces.go create mode 100644 vendor/github.com/redis/go-redis/v9/internal/otel/metrics.go create mode 100644 vendor/github.com/redis/go-redis/v9/internal/pool/conn_state.go create mode 100644 vendor/github.com/redis/go-redis/v9/internal/pool/hooks.go create mode 100644 vendor/github.com/redis/go-redis/v9/internal/pool/pubsub.go create mode 100644 vendor/github.com/redis/go-redis/v9/internal/pool/want_conn.go create mode 100644 vendor/github.com/redis/go-redis/v9/internal/proto/redis_errors.go create mode 100644 vendor/github.com/redis/go-redis/v9/internal/redis.go create mode 100644 vendor/github.com/redis/go-redis/v9/internal/routing/aggregator.go create mode 100644 vendor/github.com/redis/go-redis/v9/internal/routing/policy.go create mode 100644 vendor/github.com/redis/go-redis/v9/internal/routing/shard_picker.go create mode 100644 vendor/github.com/redis/go-redis/v9/internal/semaphore.go create mode 100644 vendor/github.com/redis/go-redis/v9/internal/util/atomic_max.go create mode 100644 vendor/github.com/redis/go-redis/v9/internal/util/atomic_min.go create mode 100644 vendor/github.com/redis/go-redis/v9/internal/util/convert.go create mode 100644 vendor/github.com/redis/go-redis/v9/maintnotifications/FEATURES.md create mode 100644 vendor/github.com/redis/go-redis/v9/maintnotifications/README.md create mode 100644 vendor/github.com/redis/go-redis/v9/maintnotifications/circuit_breaker.go create mode 100644 vendor/github.com/redis/go-redis/v9/maintnotifications/config.go create mode 100644 vendor/github.com/redis/go-redis/v9/maintnotifications/errors.go create mode 100644 vendor/github.com/redis/go-redis/v9/maintnotifications/example_hooks.go create mode 100644 vendor/github.com/redis/go-redis/v9/maintnotifications/handoff_worker.go create mode 100644 vendor/github.com/redis/go-redis/v9/maintnotifications/hooks.go create mode 100644 vendor/github.com/redis/go-redis/v9/maintnotifications/manager.go create mode 100644 vendor/github.com/redis/go-redis/v9/maintnotifications/pool_hook.go create mode 100644 vendor/github.com/redis/go-redis/v9/maintnotifications/push_notification_handler.go create mode 100644 vendor/github.com/redis/go-redis/v9/maintnotifications/state.go create mode 100644 vendor/github.com/redis/go-redis/v9/osscluster_router.go create mode 100644 vendor/github.com/redis/go-redis/v9/otel.go delete mode 100644 vendor/github.com/redis/go-redis/v9/package.json create mode 100644 vendor/github.com/redis/go-redis/v9/push/errors.go create mode 100644 vendor/github.com/redis/go-redis/v9/push/handler.go create mode 100644 vendor/github.com/redis/go-redis/v9/push/handler_context.go create mode 100644 vendor/github.com/redis/go-redis/v9/push/processor.go create mode 100644 vendor/github.com/redis/go-redis/v9/push/push.go create mode 100644 vendor/github.com/redis/go-redis/v9/push/registry.go create mode 100644 vendor/github.com/redis/go-redis/v9/push_notifications.go create mode 100644 vendor/github.com/redis/go-redis/v9/search_builders.go create mode 100644 vendor/github.com/redis/go-redis/v9/search_commands.go create mode 100644 vendor/github.com/redis/go-redis/v9/vectorset_commands.go create mode 100644 vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go create mode 100644 vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go create mode 100644 vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go create mode 100644 vendor/github.com/sv-tools/openapi/LICENSE create mode 100644 vendor/github.com/sv-tools/openapi/spec/bool_or_schema.go create mode 100644 vendor/github.com/sv-tools/openapi/spec/callback.go create mode 100644 vendor/github.com/sv-tools/openapi/spec/components.go create mode 100644 vendor/github.com/sv-tools/openapi/spec/contact.go create mode 100644 vendor/github.com/sv-tools/openapi/spec/discriminator.go create mode 100644 vendor/github.com/sv-tools/openapi/spec/encoding.go create mode 100644 vendor/github.com/sv-tools/openapi/spec/example.go create mode 100644 vendor/github.com/sv-tools/openapi/spec/extensions.go create mode 100644 vendor/github.com/sv-tools/openapi/spec/external-docs.go create mode 100644 vendor/github.com/sv-tools/openapi/spec/header.go create mode 100644 vendor/github.com/sv-tools/openapi/spec/info.go create mode 100644 vendor/github.com/sv-tools/openapi/spec/json_schema.go create mode 100644 vendor/github.com/sv-tools/openapi/spec/license.go create mode 100644 vendor/github.com/sv-tools/openapi/spec/link.go create mode 100644 vendor/github.com/sv-tools/openapi/spec/media_type.go create mode 100644 vendor/github.com/sv-tools/openapi/spec/oauth-flow.go create mode 100644 vendor/github.com/sv-tools/openapi/spec/oauth-flows.go create mode 100644 vendor/github.com/sv-tools/openapi/spec/openapi.go create mode 100644 vendor/github.com/sv-tools/openapi/spec/operation.go create mode 100644 vendor/github.com/sv-tools/openapi/spec/parameter.go create mode 100644 vendor/github.com/sv-tools/openapi/spec/path_item.go create mode 100644 vendor/github.com/sv-tools/openapi/spec/paths.go create mode 100644 vendor/github.com/sv-tools/openapi/spec/ref.go create mode 100644 vendor/github.com/sv-tools/openapi/spec/request_body.go create mode 100644 vendor/github.com/sv-tools/openapi/spec/response.go create mode 100644 vendor/github.com/sv-tools/openapi/spec/responses.go create mode 100644 vendor/github.com/sv-tools/openapi/spec/schema.go create mode 100644 vendor/github.com/sv-tools/openapi/spec/security-requirement.go create mode 100644 vendor/github.com/sv-tools/openapi/spec/security-scheme.go create mode 100644 vendor/github.com/sv-tools/openapi/spec/server.go create mode 100644 vendor/github.com/sv-tools/openapi/spec/server_variable.go create mode 100644 vendor/github.com/sv-tools/openapi/spec/single_or_array.go create mode 100644 vendor/github.com/sv-tools/openapi/spec/tag.go create mode 100644 vendor/github.com/sv-tools/openapi/spec/type_formats.go create mode 100644 vendor/github.com/sv-tools/openapi/spec/types.go create mode 100644 vendor/github.com/sv-tools/openapi/spec/xml.go create mode 100644 vendor/github.com/swaggo/swag/v2/.gitignore create mode 100644 vendor/github.com/swaggo/swag/v2/.goreleaser.yml create mode 100644 vendor/github.com/swaggo/swag/v2/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/swaggo/swag/v2/CONTRIBUTING.md create mode 100644 vendor/github.com/swaggo/swag/v2/Dockerfile create mode 100644 vendor/github.com/swaggo/swag/v2/Makefile create mode 100644 vendor/github.com/swaggo/swag/v2/PULL_REQUEST_TEMPLATE.md create mode 100644 vendor/github.com/swaggo/swag/v2/README.md create mode 100644 vendor/github.com/swaggo/swag/v2/README_pt.md create mode 100644 vendor/github.com/swaggo/swag/v2/README_zh-CN.md create mode 100644 vendor/github.com/swaggo/swag/v2/const.go create mode 100644 vendor/github.com/swaggo/swag/v2/doc.go create mode 100644 vendor/github.com/swaggo/swag/v2/enums.go create mode 100644 vendor/github.com/swaggo/swag/v2/extensions.go create mode 100644 vendor/github.com/swaggo/swag/v2/field_parser.go create mode 100644 vendor/github.com/swaggo/swag/v2/field_parserv3.go create mode 100644 vendor/github.com/swaggo/swag/v2/formatter.go create mode 100644 vendor/github.com/swaggo/swag/v2/generics.go create mode 100644 vendor/github.com/swaggo/swag/v2/genericsv3.go create mode 100644 vendor/github.com/swaggo/swag/v2/golist.go create mode 100644 vendor/github.com/swaggo/swag/v2/license create mode 100644 vendor/github.com/swaggo/swag/v2/operation.go create mode 100644 vendor/github.com/swaggo/swag/v2/operationv3.go create mode 100644 vendor/github.com/swaggo/swag/v2/package.go create mode 100644 vendor/github.com/swaggo/swag/v2/packages.go create mode 100644 vendor/github.com/swaggo/swag/v2/parser.go create mode 100644 vendor/github.com/swaggo/swag/v2/parserv3.go create mode 100644 vendor/github.com/swaggo/swag/v2/schema.go create mode 100644 vendor/github.com/swaggo/swag/v2/schemav3.go create mode 100644 vendor/github.com/swaggo/swag/v2/spec.go create mode 100644 vendor/github.com/swaggo/swag/v2/swagger.go create mode 100644 vendor/github.com/swaggo/swag/v2/types.go create mode 100644 vendor/github.com/swaggo/swag/v2/typesv3.go create mode 100644 vendor/github.com/swaggo/swag/v2/utils.go create mode 100644 vendor/github.com/swaggo/swag/v2/version.go create mode 100644 vendor/go.uber.org/atomic/.codecov.yml create mode 100644 vendor/go.uber.org/atomic/.gitignore create mode 100644 vendor/go.uber.org/atomic/CHANGELOG.md create mode 100644 vendor/go.uber.org/atomic/LICENSE.txt create mode 100644 vendor/go.uber.org/atomic/Makefile create mode 100644 vendor/go.uber.org/atomic/README.md create mode 100644 vendor/go.uber.org/atomic/bool.go create mode 100644 vendor/go.uber.org/atomic/bool_ext.go create mode 100644 vendor/go.uber.org/atomic/doc.go create mode 100644 vendor/go.uber.org/atomic/duration.go create mode 100644 vendor/go.uber.org/atomic/duration_ext.go create mode 100644 vendor/go.uber.org/atomic/error.go create mode 100644 vendor/go.uber.org/atomic/error_ext.go create mode 100644 vendor/go.uber.org/atomic/float32.go create mode 100644 vendor/go.uber.org/atomic/float32_ext.go create mode 100644 vendor/go.uber.org/atomic/float64.go create mode 100644 vendor/go.uber.org/atomic/float64_ext.go create mode 100644 vendor/go.uber.org/atomic/gen.go create mode 100644 vendor/go.uber.org/atomic/int32.go create mode 100644 vendor/go.uber.org/atomic/int64.go create mode 100644 vendor/go.uber.org/atomic/nocmp.go create mode 100644 vendor/go.uber.org/atomic/pointer_go118.go create mode 100644 vendor/go.uber.org/atomic/pointer_go118_pre119.go create mode 100644 vendor/go.uber.org/atomic/pointer_go119.go create mode 100644 vendor/go.uber.org/atomic/string.go create mode 100644 vendor/go.uber.org/atomic/string_ext.go create mode 100644 vendor/go.uber.org/atomic/time.go create mode 100644 vendor/go.uber.org/atomic/time_ext.go create mode 100644 vendor/go.uber.org/atomic/uint32.go create mode 100644 vendor/go.uber.org/atomic/uint64.go create mode 100644 vendor/go.uber.org/atomic/uintptr.go create mode 100644 vendor/go.uber.org/atomic/unsafe_pointer.go create mode 100644 vendor/go.uber.org/atomic/value.go delete mode 100644 vendor/golang.org/x/crypto/acme/version_go112.go create mode 100644 vendor/golang.org/x/mod/LICENSE create mode 100644 vendor/golang.org/x/mod/PATENTS create mode 100644 vendor/golang.org/x/mod/internal/lazyregexp/lazyre.go create mode 100644 vendor/golang.org/x/mod/module/module.go create mode 100644 vendor/golang.org/x/mod/module/pseudo.go create mode 100644 vendor/golang.org/x/mod/semver/semver.go create mode 100644 vendor/golang.org/x/net/http2/config.go create mode 100644 vendor/golang.org/x/net/http2/config_go125.go create mode 100644 vendor/golang.org/x/net/http2/config_go126.go delete mode 100644 vendor/golang.org/x/net/http2/testsync.go create mode 100644 vendor/golang.org/x/net/http2/unencrypted.go rename vendor/golang.org/x/net/http2/{writesched_priority.go => writesched_priority_rfc7540.go} (76%) create mode 100644 vendor/golang.org/x/net/http2/writesched_priority_rfc9218.go create mode 100644 vendor/golang.org/x/net/internal/httpcommon/ascii.go rename vendor/golang.org/x/net/{http2 => internal/httpcommon}/headermap.go (74%) create mode 100644 vendor/golang.org/x/net/internal/httpcommon/request.go create mode 100644 vendor/golang.org/x/sync/LICENSE create mode 100644 vendor/golang.org/x/sync/PATENTS create mode 100644 vendor/golang.org/x/sync/errgroup/errgroup.go create mode 100644 vendor/golang.org/x/sys/unix/auxv.go create mode 100644 vendor/golang.org/x/sys/unix/auxv_unsupported.go create mode 100644 vendor/golang.org/x/sys/unix/vgetrandom_linux.go rename vendor/golang.org/x/{tools/internal/versions/toolchain_go119.go => sys/unix/vgetrandom_unsupported.go} (56%) create mode 100644 vendor/golang.org/x/tools/imports/forward.go create mode 100644 vendor/golang.org/x/tools/internal/event/core/event.go create mode 100644 vendor/golang.org/x/tools/internal/event/core/export.go create mode 100644 vendor/golang.org/x/tools/internal/event/core/fast.go create mode 100644 vendor/golang.org/x/tools/internal/event/doc.go create mode 100644 vendor/golang.org/x/tools/internal/event/event.go create mode 100644 vendor/golang.org/x/tools/internal/event/keys/keys.go create mode 100644 vendor/golang.org/x/tools/internal/event/keys/standard.go create mode 100644 vendor/golang.org/x/tools/internal/event/keys/util.go create mode 100644 vendor/golang.org/x/tools/internal/event/label/label.go create mode 100644 vendor/golang.org/x/tools/internal/gocommand/invoke.go create mode 100644 vendor/golang.org/x/tools/internal/gocommand/invoke_notunix.go create mode 100644 vendor/golang.org/x/tools/internal/gocommand/invoke_unix.go create mode 100644 vendor/golang.org/x/tools/internal/gocommand/vendor.go create mode 100644 vendor/golang.org/x/tools/internal/gocommand/version.go create mode 100644 vendor/golang.org/x/tools/internal/gopathwalk/walk.go create mode 100644 vendor/golang.org/x/tools/internal/imports/fix.go create mode 100644 vendor/golang.org/x/tools/internal/imports/imports.go create mode 100644 vendor/golang.org/x/tools/internal/imports/mod.go create mode 100644 vendor/golang.org/x/tools/internal/imports/mod_cache.go create mode 100644 vendor/golang.org/x/tools/internal/imports/sortimports.go create mode 100644 vendor/golang.org/x/tools/internal/imports/source.go create mode 100644 vendor/golang.org/x/tools/internal/imports/source_env.go create mode 100644 vendor/golang.org/x/tools/internal/imports/source_modindex.go create mode 100644 vendor/golang.org/x/tools/internal/modindex/directories.go create mode 100644 vendor/golang.org/x/tools/internal/modindex/index.go create mode 100644 vendor/golang.org/x/tools/internal/modindex/lookup.go create mode 100644 vendor/golang.org/x/tools/internal/modindex/modindex.go create mode 100644 vendor/golang.org/x/tools/internal/modindex/symbols.go create mode 100644 vendor/golang.org/x/tools/internal/stdlib/deps.go create mode 100644 vendor/golang.org/x/tools/internal/stdlib/import.go create mode 100644 vendor/golang.org/x/tools/internal/stdlib/manifest.go create mode 100644 vendor/golang.org/x/tools/internal/stdlib/stdlib.go delete mode 100644 vendor/golang.org/x/tools/internal/versions/features.go delete mode 100644 vendor/golang.org/x/tools/internal/versions/gover.go delete mode 100644 vendor/golang.org/x/tools/internal/versions/toolchain.go delete mode 100644 vendor/golang.org/x/tools/internal/versions/toolchain_go120.go delete mode 100644 vendor/golang.org/x/tools/internal/versions/toolchain_go121.go delete mode 100644 vendor/golang.org/x/tools/internal/versions/types.go delete mode 100644 vendor/golang.org/x/tools/internal/versions/types_go121.go delete mode 100644 vendor/golang.org/x/tools/internal/versions/types_go122.go delete mode 100644 vendor/golang.org/x/tools/internal/versions/versions.go rename vendor/{github.com/ghodss => sigs.k8s.io}/yaml/.gitignore (88%) create mode 100644 vendor/sigs.k8s.io/yaml/.travis.yml create mode 100644 vendor/sigs.k8s.io/yaml/CONTRIBUTING.md rename vendor/{github.com/ghodss => sigs.k8s.io}/yaml/LICENSE (100%) create mode 100644 vendor/sigs.k8s.io/yaml/OWNERS rename vendor/{github.com/ghodss => sigs.k8s.io}/yaml/README.md (86%) create mode 100644 vendor/sigs.k8s.io/yaml/RELEASE.md create mode 100644 vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS create mode 100644 vendor/sigs.k8s.io/yaml/code-of-conduct.md rename vendor/{github.com/ghodss => sigs.k8s.io}/yaml/fields.go (99%) rename vendor/{github.com/ghodss => sigs.k8s.io}/yaml/yaml.go (66%) create mode 100644 vendor/sigs.k8s.io/yaml/yaml_go110.go diff --git a/go.mod b/go.mod index 1fa1a6bc..42258236 100644 --- a/go.mod +++ b/go.mod @@ -1,24 +1,24 @@ module git.gocasts.ir/ebhomengo/niki -go 1.23 +go 1.25.4 require ( - github.com/brianvoe/gofakeit/v7 v7.1.2 + github.com/brianvoe/gofakeit/v7 v7.14.1 github.com/go-ozzo/ozzo-validation v3.6.0+incompatible github.com/go-ozzo/ozzo-validation/v4 v4.3.0 - github.com/go-sql-driver/mysql v1.8.1 - github.com/golang-jwt/jwt/v4 v4.5.0 - github.com/kavenegar/kavenegar-go v0.0.0-20221124112814-40341057b5ca + github.com/go-sql-driver/mysql v1.9.3 + github.com/golang-jwt/jwt/v4 v4.5.2 + github.com/kavenegar/kavenegar-go v0.0.0-20240205151018-77039f51467d github.com/knadh/koanf v1.5.0 - github.com/labstack/echo-jwt/v4 v4.2.0 - github.com/labstack/echo/v4 v4.12.0 - github.com/ory/dockertest/v3 v3.11.0 - github.com/redis/go-redis/v9 v9.4.0 - github.com/rubenv/sql-migrate v1.6.0 - github.com/stretchr/testify v1.9.0 - github.com/swaggo/echo-swagger v1.4.1 - github.com/swaggo/swag v1.16.3 - golang.org/x/crypto v0.23.0 + github.com/labstack/echo-jwt/v4 v4.4.0 + github.com/labstack/echo/v4 v4.15.1 + github.com/ory/dockertest/v3 v3.12.0 + github.com/redis/go-redis/v9 v9.18.0 + github.com/rubenv/sql-migrate v1.8.1 + github.com/stretchr/testify v1.11.1 + github.com/swaggo/echo-swagger v1.5.2 + github.com/swaggo/swag v1.16.6 + golang.org/x/crypto v0.46.0 gopkg.in/natefinch/lumberjack.v2 v2.2.1 ) @@ -31,54 +31,60 @@ require ( github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/containerd/continuity v0.4.3 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/containerd/continuity v0.4.5 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect - github.com/docker/cli v26.1.4+incompatible // indirect + github.com/docker/cli v27.4.1+incompatible // indirect github.com/docker/docker v27.1.1+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/fatih/structs v1.1.0 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/ghodss/yaml v1.0.0 // indirect + github.com/fsnotify/fsnotify v1.4.9 // indirect github.com/go-gorp/gorp/v3 v3.1.0 // indirect - github.com/go-openapi/jsonpointer v0.21.0 // indirect - github.com/go-openapi/jsonreference v0.21.0 // indirect - github.com/go-openapi/spec v0.21.0 // indirect - github.com/go-openapi/swag v0.23.0 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/spec v0.20.9 // indirect + github.com/go-openapi/swag v0.22.3 // indirect + github.com/go-viper/mapstructure/v2 v2.1.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang-jwt/jwt v3.2.2+incompatible // indirect - github.com/golang-jwt/jwt/v5 v5.0.0 // indirect + github.com/golang-jwt/jwt/v5 v5.3.0 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/labstack/gommon v0.4.2 // indirect github.com/mailru/easyjson v0.7.7 // indirect - github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect + github.com/moby/sys/user v0.3.0 // indirect github.com/moby/term v0.5.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0 // indirect - github.com/opencontainers/runc v1.1.13 // indirect + github.com/opencontainers/runc v1.2.3 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/stretchr/objx v0.5.2 // indirect + github.com/sv-tools/openapi v0.2.1 // indirect github.com/swaggo/files/v2 v2.0.0 // indirect + github.com/swaggo/swag/v2 v2.0.0-rc4 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect github.com/valyala/fasttemplate v1.2.2 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect - golang.org/x/net v0.25.0 // indirect - golang.org/x/sys v0.21.0 // indirect - golang.org/x/text v0.15.0 // indirect - golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.21.0 // indirect + go.uber.org/atomic v1.11.0 // indirect + golang.org/x/mod v0.30.0 // indirect + golang.org/x/net v0.48.0 // indirect + golang.org/x/sync v0.19.0 // indirect + golang.org/x/sys v0.39.0 // indirect + golang.org/x/text v0.32.0 // indirect + golang.org/x/time v0.14.0 // indirect + golang.org/x/tools v0.39.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect + sigs.k8s.io/yaml v1.3.0 // indirect ) diff --git a/go.sum b/go.sum index 91d3d30c..940950dd 100644 --- a/go.sum +++ b/go.sum @@ -39,8 +39,8 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/brianvoe/gofakeit/v7 v7.1.2 h1:vSKaVScNhWVpf1rlyEKSvO8zKZfuDtGqoIHT//iNNb8= -github.com/brianvoe/gofakeit/v7 v7.1.2/go.mod h1:QXuPeBw164PJCzCUZVmgpgHJ3Llj49jSLVkKPMtxtxA= +github.com/brianvoe/gofakeit/v7 v7.14.1 h1:a7fe3fonbj0cW3wgl5VwIKfZtiH9C3cLnwcIXWT7sow= +github.com/brianvoe/gofakeit/v7 v7.14.1/go.mod h1:QXuPeBw164PJCzCUZVmgpgHJ3Llj49jSLVkKPMtxtxA= github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= @@ -49,15 +49,16 @@ github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK3 github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/containerd/continuity v0.4.3 h1:6HVkalIp+2u1ZLH1J/pYX2oBVXlJZvh1X1A7bEZ9Su8= -github.com/containerd/continuity v0.4.3/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ= +github.com/containerd/continuity v0.4.5 h1:ZRoN1sXq9u7V6QoHMcVWGhOwDFqZ4B9i5H6un1Wh0x4= +github.com/containerd/continuity v0.4.5/go.mod h1:/lNJvtJKUQStBzpVQ1+rasXO1LAWtUQssk28EZvJ3nE= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -65,8 +66,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= -github.com/docker/cli v26.1.4+incompatible h1:I8PHdc0MtxEADqYJZvhBrW9bo8gawKwwenxRM7/rLu8= -github.com/docker/cli v26.1.4+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v27.4.1+incompatible h1:VzPiUlRJ/xh+otB75gva3r05isHMo5wXDfPRi5/b4hI= +github.com/docker/cli v27.4.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/docker v27.1.1+incompatible h1:hO/M4MtV36kzKldqnA37IWhebRA+LnqqcqDja6kVaKY= github.com/docker/docker v27.1.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= @@ -83,10 +84,8 @@ github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5Kwzbycv github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs= github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw= @@ -97,32 +96,37 @@ github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= -github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= -github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= -github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= -github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY= -github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk= -github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= -github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/spec v0.20.9 h1:xnlYNQAwKd2VQRRfwTEI0DcK+2cbuvI/0c7jx3gA8/8= +github.com/go-openapi/spec v0.20.9/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-ozzo/ozzo-validation v3.6.0+incompatible h1:msy24VGS42fKO9K1vLz82/GeYW1cILu7Nuuj1N3BBkE= github.com/go-ozzo/ozzo-validation v3.6.0+incompatible/go.mod h1:gsEKFIVnabGBt6mXmxK0MoFy+cZoTJY6mu5Ll3LVLBU= github.com/go-ozzo/ozzo-validation/v4 v4.3.0 h1:byhDUpfEwjsVQb1vBunvIjh2BHQ9ead57VkAEY4V+Es= github.com/go-ozzo/ozzo-validation/v4 v4.3.0/go.mod h1:2NKgrcHl3z6cJs+3Oo940FPRiTzuqKbvfrL2RxCj6Ew= -github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= -github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= +github.com/go-sql-driver/mysql v1.9.3 h1:U/N249h2WzJ3Ukj8SowVFjdtZKfu9vlLZxjPXV1aweo= +github.com/go-sql-driver/mysql v1.9.3/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/go-viper/mapstructure/v2 v2.1.0 h1:gHnMa2Y/pIxElCH2GlZZ1lZSsn6XMtufpGyP1XxdC/w= +github.com/go-viper/mapstructure/v2 v2.1.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY= -github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= -github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= -github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang-jwt/jwt/v5 v5.0.0 h1:1n1XNM9hk7O9mnQoNBGolZvzebBQ7p93ULHRc28XJUE= -github.com/golang-jwt/jwt/v5 v5.0.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI= +github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= +github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -206,10 +210,12 @@ github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/ github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/kavenegar/kavenegar-go v0.0.0-20221124112814-40341057b5ca h1:aEyiDaExheG7fNpEYcILCVGgM7jlLzTVgxEQAGaepeM= -github.com/kavenegar/kavenegar-go v0.0.0-20221124112814-40341057b5ca/go.mod h1:CRhvvr4KNAyrg+ewrutOf+/QoHs7lztSoLjp+GqhYlA= +github.com/kavenegar/kavenegar-go v0.0.0-20240205151018-77039f51467d h1:5yPyBSS28Nojbr7pAkiXADGj6VpTXx73o6SsprKbSoo= +github.com/kavenegar/kavenegar-go v0.0.0-20240205151018-77039f51467d/go.mod h1:CRhvvr4KNAyrg+ewrutOf+/QoHs7lztSoLjp+GqhYlA= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/cpuid/v2 v2.0.9 h1:lgaqFMSdTdQYdZ04uHyN2d/eKdOMyi2YLSvlQIBFYa4= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/knadh/koanf v1.5.0 h1:q2TSd/3Pyc/5yP9ldIrSdIz26MCcyNQzW0pEAugLPNs= github.com/knadh/koanf v1.5.0/go.mod h1:Hgyjp4y8v44hpZtPzs7JZfRAW5AhN7KfZcwv1RYggDs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -217,37 +223,40 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxv github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/labstack/echo-jwt/v4 v4.2.0 h1:odSISV9JgcSCuhgQSV/6Io3i7nUmfM/QkBeR5GVJj5c= -github.com/labstack/echo-jwt/v4 v4.2.0/go.mod h1:MA2RqdXdEn4/uEglx0HcUOgQSyBaTh5JcaHIan3biwU= -github.com/labstack/echo/v4 v4.12.0 h1:IKpw49IMryVB2p1a4dzwlhP1O2Tf2E0Ir/450lH+kI0= -github.com/labstack/echo/v4 v4.12.0/go.mod h1:UP9Cr2DJXbOK3Kr9ONYzNowSh7HP0aG0ShAyycHSJvM= +github.com/labstack/echo-jwt/v4 v4.4.0 h1:nrXaEnJupfc2R4XChcLRDyghhMZup77F8nIzHnBK19U= +github.com/labstack/echo-jwt/v4 v4.4.0/go.mod h1:kYXWgWms9iFqI3ldR+HAEj/Zfg5rZtR7ePOgktG4Hjg= +github.com/labstack/echo/v4 v4.15.1 h1:S9keusg26gZpjMmPqB5hOEvNKnmd1lNmcHrbbH2lnFs= +github.com/labstack/echo/v4 v4.15.1/go.mod h1:xmw1clThob0BSVRX1CRQkGQ/vjwcpOMjQZSZa9fKA/c= github.com/labstack/gommon v0.4.2 h1:F8qTUNXgG1+6WQmqoUWnz8WiEU60mXVVw0P4ht1WRA0= github.com/labstack/gommon v0.4.2/go.mod h1:QlUFxVM+SNXhDL/Z7YhocGIBYOiwB0mXm1+1bAPHPyU= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-sqlite3 v1.14.15 h1:vfoHhTN1af61xCRSWzFIWzx2YskyMTwHLrExkBOjvxI= -github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= +github.com/mattn/go-sqlite3 v1.14.19 h1:fhGleo2h1p8tVChob4I9HpmVFIAkKGpiukdrgQbWfGI= +github.com/mattn/go-sqlite3 v1.14.19/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= @@ -269,6 +278,8 @@ github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zx github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo= +github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -277,16 +288,17 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/npillmayer/nestext v0.1.3/go.mod h1:h2lrijH8jpicr25dFY+oAJLyzlya6jhnuG+zWp9L0Uk= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= -github.com/opencontainers/runc v1.1.13 h1:98S2srgG9vw0zWcDpFMn5TRrh8kLxa/5OFUstuUhmRs= -github.com/opencontainers/runc v1.1.13/go.mod h1:R016aXacfp/gwQBYw2FDGa9m+n6atbLWrYY8hNMT/sA= -github.com/ory/dockertest/v3 v3.11.0 h1:OiHcxKAvSDUwsEVh2BjxQQc/5EHz9n0va9awCtNGuyA= -github.com/ory/dockertest/v3 v3.11.0/go.mod h1:VIPxS1gwT9NpPOrfD3rACs8Y9Z7yhzO4SB194iUDnUI= +github.com/opencontainers/runc v1.2.3 h1:fxE7amCzfZflJO2lHXf4y/y8M1BoAqp+FVmG19oYB80= +github.com/opencontainers/runc v1.2.3/go.mod h1:nSxcWUydXrsBZVYNSkTjoQ/N6rcyTtn+1SD5D4+kRIM= +github.com/ory/dockertest/v3 v3.12.0 h1:3oV9d0sDzlSQfHtIaB5k6ghUCVMVLpAY8hwrqoCyRCw= +github.com/ory/dockertest/v3 v3.12.0/go.mod h1:aKNDTva3cp8dwOWwb9cWuX84aH5akkxXRvO7KCwWVjE= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.7.0 h1:7utD74fnzVc/cpcyy8sjrlFr5vYpypUixARcHIMIGuI= @@ -317,14 +329,14 @@ github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/redis/go-redis/v9 v9.4.0 h1:Yzoz33UZw9I/mFhx4MNrB6Fk+XHO1VukNcCa1+lwyKk= -github.com/redis/go-redis/v9 v9.4.0/go.mod h1:hdY0cQFCN4fnSYT6TkisLufl/4W5UIXyv0b/CLO2V2M= +github.com/redis/go-redis/v9 v9.18.0 h1:pMkxYPkEbMPwRdenAzUNyFNrDgHx9U+DrBabWNfSRQs= +github.com/redis/go-redis/v9 v9.18.0/go.mod h1:k3ufPphLU5YXwNTUcCRXGxUoF1fqxnhFQmscfkCoDA0= github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= -github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= -github.com/rubenv/sql-migrate v1.6.0 h1:IZpcTlAx/VKXphWEpwWJ7BaMq05tYtE80zYz+8a5Il8= -github.com/rubenv/sql-migrate v1.6.0/go.mod h1:m3ilnKP7sNb4eYkLsp6cGdPOl4OBcXM6rcbzU+Oqc5k= +github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rubenv/sql-migrate v1.8.1 h1:EPNwCvjAowHI3TnZ+4fQu3a915OpnQoPAjTXCGOy2U0= +github.com/rubenv/sql-migrate v1.8.1/go.mod h1:BTIKBORjzyxZDS6dzoiw6eAFYJ1iNlGAtjn4LGeVjS8= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= @@ -338,21 +350,31 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/swaggo/echo-swagger v1.4.1 h1:Yf0uPaJWp1uRtDloZALyLnvdBeoEL5Kc7DtnjzO/TUk= -github.com/swaggo/echo-swagger v1.4.1/go.mod h1:C8bSi+9yH2FLZsnhqMZLIZddpUxZdBYuNHbtaS1Hljc= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/sv-tools/openapi v0.2.1 h1:ES1tMQMJFGibWndMagvdoo34T1Vllxr1Nlm5wz6b1aA= +github.com/sv-tools/openapi v0.2.1/go.mod h1:k5VuZamTw1HuiS9p2Wl5YIDWzYnHG6/FgPOSFXLAhGg= +github.com/swaggo/echo-swagger v1.5.2 h1:KUM4QuEO1r/maky6Ybb9wS5MFEkJUpXwPbK4wwBe5Uk= +github.com/swaggo/echo-swagger v1.5.2/go.mod h1:nt3Z+SlyzXNIQ4odFNlPzRdcNOFvkPJHf+t4sMLhNu4= github.com/swaggo/files/v2 v2.0.0 h1:hmAt8Dkynw7Ssz46F6pn8ok6YmGZqHSVLZ+HQM7i0kw= github.com/swaggo/files/v2 v2.0.0/go.mod h1:24kk2Y9NYEJ5lHuCra6iVwkMjIekMCaFq/0JQj66kyM= -github.com/swaggo/swag v1.16.3 h1:PnCYjPCah8FK4I26l2F/KQ4yz3sILcVUN3cTlBFA9Pg= -github.com/swaggo/swag v1.16.3/go.mod h1:DImHIuOFXKpMFAQjcC7FG4m3Dg4+QuUgUzJmKjI/gRk= +github.com/swaggo/swag v1.16.6 h1:qBNcx53ZaX+M5dxVyTrgQ0PJ/ACK+NzhwcbieTt+9yI= +github.com/swaggo/swag v1.16.6/go.mod h1:ngP2etMK5a0P3QBizic5MEwpRmluJZPHjXcMoj4Xesg= +github.com/swaggo/swag/v2 v2.0.0-rc4 h1:SZ8cK68gcV6cslwrJMIOqPkJELRwq4gmjvk77MrvHvY= +github.com/swaggo/swag/v2 v2.0.0-rc4/go.mod h1:Ow7Y8gF16BTCDn8YxZbyKn8FkMLRUHekv1kROJZpbvE= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo= @@ -367,10 +389,14 @@ github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0= +github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= go.etcd.io/etcd/client/pkg/v3 v3.5.4/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -378,8 +404,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= -golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU= +golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -389,8 +415,8 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= +golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -408,8 +434,8 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= -golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= -golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= +golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -422,8 +448,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= +golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -456,10 +482,9 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= -golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= +golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= @@ -467,11 +492,11 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= -golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= +golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= +golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -483,8 +508,8 @@ golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw= -golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= +golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -519,7 +544,9 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= @@ -535,6 +562,7 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= @@ -543,3 +571,5 @@ gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/vendor/github.com/brianvoe/gofakeit/v7/BENCHMARKS.md b/vendor/github.com/brianvoe/gofakeit/v7/BENCHMARKS.md index 0f6776c5..35e5c815 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/BENCHMARKS.md +++ b/vendor/github.com/brianvoe/gofakeit/v7/BENCHMARKS.md @@ -7,317 +7,356 @@ Table generated with tablesgenerator.com/markdown_tables File->Paste table data | Benchmark | Ops | CPU | MEM | MEM alloc | |---------------------------------------|----------|----------------|--------------|------------------| -| BenchmarkAddress-10 | 1369538 | 874.7 ns/op | 195 B/op | 5 allocs/op | -| BenchmarkStreet-10 | 3438403 | 347.9 ns/op | 25 B/op | 2 allocs/op | -| BenchmarkStreetNumber-10 | 8601847 | 138.2 ns/op | 4 B/op | 1 allocs/op | -| BenchmarkStreetPrefix-10 | 19814623 | 60.26 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkStreetName-10 | 19633909 | 60.78 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkStreetSuffix-10 | 19717612 | 60.19 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkCity-10 | 20219280 | 58.88 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkState-10 | 19526760 | 60.85 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkStateAbr-10 | 19634631 | 60.79 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkZip-10 | 7521580 | 157.7 ns/op | 5 B/op | 1 allocs/op | -| BenchmarkCountry-10 | 19451166 | 61.29 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkCountryAbr-10 | 19585867 | 60.82 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkLatitude-10 | 72309668 | 16.22 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkLongitude-10 | 72334910 | 16.23 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkLatitudeInRange-10 | 65830375 | 17.77 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkLongitudeInRange-10 | 66400602 | 17.77 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkPetName-10 | 30391768 | 39.19 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkAnimal-10 | 28761544 | 41.22 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkAnimalType-10 | 26955640 | 44.13 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkFarmAnimal-10 | 22307872 | 53.39 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkCat-10 | 24226416 | 49.13 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkDog-10 | 19702195 | 60.53 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkBird-10 | 17095884 | 70.22 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkAppName-10 | 3805696 | 314.4 ns/op | 25 B/op | 1 allocs/op | -| BenchmarkAppVersion-10 | 10250247 | 116.4 ns/op | 7 B/op | 1 allocs/op | -| BenchmarkAppAuthor-10 | 11592895 | 101.2 ns/op | 8 B/op | 0 allocs/op | -| BenchmarkUsername-10 | 8975020 | 132.9 ns/op | 16 B/op | 2 allocs/op | -| BenchmarkPassword-10 | 322147 | 3647 ns/op | 1656 B/op | 60 allocs/op | -| BenchmarkBeerName-10 | 27986706 | 42.27 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkBeerStyle-10 | 19460616 | 60.99 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkBeerHop-10 | 26915132 | 44.35 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkBeerYeast-10 | 24840991 | 47.98 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkBeerMalt-10 | 20806075 | 57.18 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkBeerIbu-10 | 41349307 | 28.99 ns/op | 8 B/op | 1 allocs/op | -| BenchmarkBeerAlcohol-10 | 6054163 | 197.8 ns/op | 28 B/op | 2 allocs/op | -| BenchmarkBeerBlg-10 | 5825622 | 205.6 ns/op | 37 B/op | 2 allocs/op | -| BenchmarkBook-10 | 6927696 | 171.9 ns/op | 48 B/op | 1 allocs/op | -| BenchmarkBookTitle-10 | 31594431 | 37.36 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkBookAuthor-10 | 29969000 | 39.91 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkBookGenre-10 | 24269676 | 48.77 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkCar-10 | 3795943 | 316.3 ns/op | 96 B/op | 1 allocs/op | -| BenchmarkCarType-10 | 26309082 | 43.81 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkCarFuelType-10 | 26414821 | 45.18 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkCarTransmissionType-10 | 24309171 | 48.83 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkCarMaker-10 | 23505099 | 51.01 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkCarModel-10 | 19055469 | 62.82 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkCelebrityActor-10 | 19915483 | 57.84 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkCelebrityBusiness-10 | 20186090 | 67.55 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkCelebritySport-10 | 14223360 | 84.47 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkColor-10 | 21535978 | 54.16 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkNiceColors-10 | 71414755 | 16.16 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkSafeColor-10 | 24683570 | 46.53 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkHexColor-10 | 4815675 | 250.3 ns/op | 24 B/op | 3 allocs/op | -| BenchmarkRGBColor-10 | 19453399 | 61.67 ns/op | 24 B/op | 1 allocs/op | -| BenchmarkCompany-10 | 25604892 | 46.66 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkCompanySuffix-10 | 24647574 | 48.83 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkBlurb-10 | 20634126 | 58.88 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkBuzzWord-10 | 23034157 | 51.84 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkBS-10 | 21803314 | 55.08 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkJob-10 | 4121804 | 292.0 ns/op | 64 B/op | 1 allocs/op | -| BenchmarkJobTitle-10 | 24344308 | 47.51 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkJobDescriptor-10 | 24049240 | 50.12 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkJobLevel-10 | 19349389 | 62.45 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkSlogan-10 | 4499653 | 263.1 ns/op | 41 B/op | 1 allocs/op | -| BenchmarkCSVLookup100-10 | 1184 | 1014597 ns/op | 713620 B/op | 9923 allocs/op | -| BenchmarkEmoji-10 | 24200866 | 49.72 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkEmojiDescription-10 | 22978600 | 52.18 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkEmojiCategory-10 | 21228057 | 56.57 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkEmojiAlias-10 | 17616240 | 68.45 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkEmojiTag-10 | 19253190 | 62.21 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkError-10 | 1637725 | 736.5 ns/op | 288 B/op | 8 allocs/op | -| BenchmarkErrorObject-10 | 6755540 | 177.7 ns/op | 32 B/op | 3 allocs/op | -| BenchmarkErrorDatabase-10 | 5794706 | 200.2 ns/op | 63 B/op | 3 allocs/op | -| BenchmarkErrorGRPC-10 | 6063802 | 196.8 ns/op | 64 B/op | 3 allocs/op | -| BenchmarkErrorHTTP-10 | 3956130 | 302.2 ns/op | 158 B/op | 4 allocs/op | -| BenchmarkErrorHTTPClient-10 | 6025258 | 196.4 ns/op | 52 B/op | 3 allocs/op | -| BenchmarkErrorHTTPServer-10 | 5969395 | 202.1 ns/op | 59 B/op | 3 allocs/op | -| BenchmarkErrorRuntime-10 | 4786108 | 248.3 ns/op | 150 B/op | 3 allocs/op | -| BenchmarkErrorValidation-10 | 1811821 | 667.8 ns/op | 277 B/op | 7 allocs/op | -| BenchmarkFileMimeType-10 | 26273320 | 45.47 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkFileExtension-10 | 22216770 | 53.94 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkCusip-10 | 6778542 | 176.4 ns/op | 16 B/op | 1 allocs/op | -| BenchmarkIsin-10 | 1844566 | 652.1 ns/op | 525 B/op | 7 allocs/op | -| BenchmarkFruit-10 | 20381516 | 58.81 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkVegetable-10 | 19638996 | 61.11 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkBreakfast-10 | 9425649 | 127.2 ns/op | 32 B/op | 1 allocs/op | -| BenchmarkLunch-10 | 8996594 | 133.6 ns/op | 34 B/op | 1 allocs/op | -| BenchmarkDinner-10 | 9427389 | 126.6 ns/op | 36 B/op | 1 allocs/op | -| BenchmarkDrink-10 | 8552294 | 140.4 ns/op | 7 B/op | 2 allocs/op | -| BenchmarkSnack-10 | 7678719 | 156.7 ns/op | 32 B/op | 1 allocs/op | -| BenchmarkDessert-10 | 8907098 | 134.0 ns/op | 31 B/op | 2 allocs/op | -| BenchmarkGamertag-10 | 2474312 | 483.9 ns/op | 83 B/op | 5 allocs/op | -| BenchmarkDice-10 | 47727080 | 25.22 ns/op | 8 B/op | 1 allocs/op | -| BenchmarkGenerate/package-10 | 423741 | 2822 ns/op | 1187 B/op | 29 allocs/op | -| BenchmarkGenerate/Complex-10 | 138112 | 8653 ns/op | 4553 B/op | 80 allocs/op | -| BenchmarkFixedWidthLookup100-10 | 2072 | 583512 ns/op | 489975 B/op | 8701 allocs/op | -| BenchmarkRegex-10 | 633699 | 1914 ns/op | 1632 B/op | 27 allocs/op | -| BenchmarkRegexEmail-10 | 205447 | 5893 ns/op | 4084 B/op | 90 allocs/op | -| BenchmarkMap-10 | 337576 | 3596 ns/op | 1111 B/op | 16 allocs/op | -| BenchmarkHackerPhrase-10 | 166683 | 7209 ns/op | 3107 B/op | 50 allocs/op | -| BenchmarkHackerAbbreviation-10 | 25295019 | 47.33 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkHackerAdjective-10 | 24022460 | 49.76 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkHackerNoun-10 | 22496308 | 53.31 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkHackerVerb-10 | 18546052 | 64.53 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkHackeringVerb-10 | 20298242 | 59.05 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkReplaceWithNumbers-10 | 1402717 | 852.8 ns/op | 296 B/op | 10 allocs/op | -| BenchmarkHipsterWord-10 | 25151432 | 47.83 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkHipsterSentence-10 | 1314279 | 907.8 ns/op | 288 B/op | 3 allocs/op | -| BenchmarkHipsterParagraph-10 | 67437 | 17682 ns/op | 10521 B/op | 48 allocs/op | -| BenchmarkInputName-10 | 20759898 | 57.98 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkSvg-10 | 225738 | 5181 ns/op | 8876 B/op | 52 allocs/op | -| BenchmarkImageURL-10 | 15524359 | 77.15 ns/op | 38 B/op | 3 allocs/op | -| BenchmarkImage-10 | 63 | 18773091 ns/op | 2457691 B/op | 307202 allocs/op | -| BenchmarkImageJpeg-10 | 39 | 29498291 ns/op | 2982478 B/op | 307217 allocs/op | -| BenchmarkImagePng-10 | 16 | 68552771 ns/op | 5899010 B/op | 307270 allocs/op | -| BenchmarkDomainName-10 | 3001479 | 402.2 ns/op | 95 B/op | 5 allocs/op | -| BenchmarkDomainSuffix-10 | 21476332 | 56.03 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkURL-10 | 1289262 | 934.6 ns/op | 277 B/op | 10 allocs/op | -| BenchmarkHTTPMethod-10 | 19895946 | 60.56 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkIPv4Address-10 | 6088518 | 196.5 ns/op | 16 B/op | 1 allocs/op | -| BenchmarkIPv6Address-10 | 2580320 | 462.0 ns/op | 111 B/op | 8 allocs/op | -| BenchmarkMacAddress-10 | 3281300 | 364.7 ns/op | 24 B/op | 1 allocs/op | -| BenchmarkHTTPStatusCode-10 | 16597051 | 72.18 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkHTTPStatusCodeSimple-10 | 17250238 | 69.52 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkLogLevel-10 | 20608036 | 58.20 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkUserAgent-10 | 1946059 | 615.5 ns/op | 298 B/op | 5 allocs/op | -| BenchmarkChromeUserAgent-10 | 2619324 | 458.2 ns/op | 184 B/op | 5 allocs/op | -| BenchmarkFirefoxUserAgent-10 | 1601706 | 753.8 ns/op | 362 B/op | 6 allocs/op | -| BenchmarkSafariUserAgent-10 | 1569805 | 764.4 ns/op | 551 B/op | 7 allocs/op | -| BenchmarkOperaUserAgent-10 | 2378972 | 504.7 ns/op | 212 B/op | 5 allocs/op | -| BenchmarkJSONLookup100-10 | 928 | 1276230 ns/op | 813725 B/op | 12134 allocs/op | -| BenchmarkLanguage-10 | 23873984 | 50.34 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkLanguageAbbreviation-10 | 25025524 | 47.93 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkLanguageBCP-10 | 21895112 | 54.74 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkProgrammingLanguage-10 | 21169636 | 56.70 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkLoremIpsumWord-10 | 23980356 | 49.92 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkLoremIpsumSentence-10 | 1344384 | 894.8 ns/op | 219 B/op | 2 allocs/op | -| BenchmarkLoremIpsumParagraph-10 | 66643 | 17916 ns/op | 8483 B/op | 40 allocs/op | -| BenchmarkMinecraftOre-10 | 15077451 | 79.85 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkMinecraftWood-10 | 14422303 | 83.44 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkMinecraftArmorTier-10 | 15262417 | 78.70 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkMinecraftArmorPart-10 | 15340200 | 78.11 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkMinecraftWeapon-10 | 15107792 | 79.78 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkMinecraftTool-10 | 14428170 | 83.15 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkMinecraftDye-10 | 14657188 | 81.95 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkMinecraftFood-10 | 14860236 | 81.01 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkMinecraftAnimal-10 | 15281302 | 78.35 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkMinecraftVillagerJob-10 | 14586627 | 82.14 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkMinecraftVillagerStation-10 | 14678592 | 81.82 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkMinecraftVillagerLevel-10 | 14314164 | 83.76 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkMinecraftMobPassive-10 | 15132750 | 79.32 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkMinecraftMobNeutral-10 | 13802880 | 87.23 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkMinecraftMobHostile-10 | 13141233 | 91.06 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkMinecraftMobBoss-10 | 15245322 | 78.79 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkMinecraftBiome-10 | 14943789 | 79.86 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkMinecraftWeather-10 | 12681386 | 94.55 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkBool-10 | 73596490 | 16.18 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkUUID-10 | 4128735 | 288.7 ns/op | 48 B/op | 1 allocs/op | -| BenchmarkShuffleAnySlice-10 | 3149857 | 380.0 ns/op | 24 B/op | 1 allocs/op | -| BenchmarkFlipACoin-10 | 74457853 | 16.17 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkMovie-10 | 9234234 | 129.3 ns/op | 32 B/op | 1 allocs/op | -| BenchmarkMovieName-10 | 25314163 | 47.82 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkMovieGenre-10 | 24570799 | 48.81 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkNumber-10 | 74087221 | 16.21 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkUint8-10 | 73790145 | 16.35 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkUint16-10 | 74334474 | 16.27 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkUint32-10 | 71804154 | 16.72 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkUint64-10 | 71385904 | 16.64 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkUintRange-10 | 73982353 | 16.13 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkInt8-10 | 73927286 | 16.14 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkInt16-10 | 74022668 | 16.19 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkInt32-10 | 72009002 | 16.64 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkInt64-10 | 72375081 | 16.59 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkIntRange-10 | 74396306 | 16.20 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkFloat32-10 | 73950822 | 16.20 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkFloat32Range-10 | 73622833 | 16.18 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkFloat64-10 | 73076970 | 16.31 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkFloat64Range-10 | 73385329 | 16.33 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkShuffleInts-10 | 9151563 | 131.8 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkRandomInt-10 | 72188592 | 16.63 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkRandomUint-10 | 72293332 | 16.64 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkHexUint-10 | 14888452 | 80.93 ns/op | 16 B/op | 2 allocs/op | -| BenchmarkCurrency-10 | 14366668 | 83.15 ns/op | 32 B/op | 1 allocs/op | -| BenchmarkCurrencyShort-10 | 24445954 | 48.68 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkCurrencyLong-10 | 23560556 | 50.65 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkPrice-10 | 73693664 | 16.33 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkCreditCard-10 | 1000000 | 1153 ns/op | 264 B/op | 15 allocs/op | -| BenchmarkCreditCardType-10 | 32410167 | 36.93 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkCreditCardNumber-10 | 1511084 | 799.1 ns/op | 183 B/op | 10 allocs/op | -| BenchmarkCreditCardExp-10 | 11014600 | 108.5 ns/op | 5 B/op | 1 allocs/op | -| BenchmarkCreditCardCvv-10 | 20325733 | 59.31 ns/op | 3 B/op | 1 allocs/op | -| BenchmarkAchRouting-10 | 7338657 | 164.0 ns/op | 16 B/op | 1 allocs/op | -| BenchmarkAchAccount-10 | 5646235 | 212.0 ns/op | 16 B/op | 1 allocs/op | -| BenchmarkBitcoinAddress-10 | 517399 | 2306 ns/op | 715 B/op | 37 allocs/op | -| BenchmarkBitcoinPrivateKey-10 | 1276884 | 943.2 ns/op | 184 B/op | 5 allocs/op | -| BenchmarkName-10 | 7771977 | 152.6 ns/op | 16 B/op | 1 allocs/op | -| BenchmarkFirstName-10 | 23523357 | 50.98 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkMiddleName-10 | 17589612 | 68.17 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkLastName-10 | 20825980 | 57.63 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkNamePrefix-10 | 25542308 | 46.65 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkNameSuffix-10 | 21972974 | 54.56 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkSSN-10 | 31829850 | 37.71 ns/op | 16 B/op | 1 allocs/op | -| BenchmarkGender-10 | 73621140 | 16.25 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkHobby-10 | 17347129 | 69.06 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkPerson-10 | 317911 | 3693 ns/op | 837 B/op | 33 allocs/op | -| BenchmarkContact-10 | 1843221 | 650.8 ns/op | 136 B/op | 6 allocs/op | -| BenchmarkPhone-10 | 6786794 | 176.2 ns/op | 16 B/op | 1 allocs/op | -| BenchmarkPhoneFormatted-10 | 4674930 | 256.2 ns/op | 16 B/op | 1 allocs/op | -| BenchmarkEmail-10 | 2794358 | 431.1 ns/op | 88 B/op | 4 allocs/op | -| BenchmarkTeams-10 | 1576238 | 763.8 ns/op | 672 B/op | 10 allocs/op | -| BenchmarkProduct-10 | 206918 | 5813 ns/op | 1069 B/op | 31 allocs/op | -| BenchmarkProductName-10 | 2313364 | 517.4 ns/op | 103 B/op | 5 allocs/op | -| BenchmarkProductDescription-10 | 348346 | 3434 ns/op | 549 B/op | 8 allocs/op | -| BenchmarkProductCategory-10 | 25139860 | 47.73 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkProductFeature-10 | 21264922 | 56.46 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkProductMaterial-10 | 18142828 | 66.24 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkProductUPC-10 | 1399148 | 859.1 ns/op | 96 B/op | 11 allocs/op | -| BenchmarkSchool-10 | 4161710 | 287.6 ns/op | 34 B/op | 1 allocs/op | -| BenchmarkLetter-10 | 73457020 | 16.29 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkLetterN-10 | 5060271 | 238.8 ns/op | 64 B/op | 2 allocs/op | -| BenchmarkVowel-10 | 58685206 | 20.87 ns/op | 4 B/op | 1 allocs/op | -| BenchmarkDigit-10 | 73944177 | 16.20 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkDigitN-10 | 5051070 | 236.6 ns/op | 64 B/op | 2 allocs/op | -| BenchmarkNumerify-10 | 6794545 | 176.4 ns/op | 16 B/op | 1 allocs/op | -| BenchmarkLexify-10 | 11113212 | 108.3 ns/op | 8 B/op | 1 allocs/op | -| BenchmarkShuffleStrings-10 | 9924429 | 121.0 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkRandomString-10 | 73420688 | 16.34 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkTemplate-10 | 2488 | 477349 ns/op | 280877 B/op | 4611 allocs/op | -| BenchmarkDate-10 | 10292476 | 116.2 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkPastDate-10 | 18285830 | 65.48 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkFutureDate-10 | 18399240 | 65.13 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkDateRange-10 | 8406979 | 142.7 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkMonth-10 | 74105902 | 16.26 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkMonthString-10 | 73647870 | 16.26 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkWeekDay-10 | 73990911 | 16.24 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkDay-10 | 73435291 | 16.21 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkYear-10 | 73950066 | 16.21 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkHour-10 | 74219916 | 16.21 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkMinute-10 | 74349634 | 16.21 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkSecond-10 | 73787313 | 16.29 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkNanoSecond-10 | 74299380 | 16.15 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkTimeZone-10 | 19105237 | 62.83 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkTimeZoneFull-10 | 16170054 | 74.27 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkTimeZoneAbv-10 | 20725029 | 58.23 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkTimeZoneOffset-10 | 14597666 | 81.13 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkTimeZoneRegion-10 | 15733551 | 76.25 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkWeighted-10 | 28507484 | 40.42 ns/op | 16 B/op | 1 allocs/op | -| BenchmarkAdjective-10 | 6726474 | 178.3 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkAdjectiveDescriptive-10 | 16486224 | 73.39 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkAdjectiveQuantitative-10 | 15290762 | 78.51 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkAdjectiveProper-10 | 16535046 | 72.42 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkAdjectiveDemonstrative-10 | 16448917 | 73.41 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkAdjectivePossessive-10 | 15715839 | 73.22 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkAdjectiveInterrogative-10 | 15543478 | 77.43 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkAdjectiveIndefinite-10 | 16306894 | 73.50 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkAdverb-10 | 7139924 | 168.7 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkAdverbManner-10 | 17139112 | 70.37 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkAdverbDegree-10 | 16213138 | 73.70 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkAdverbPlace-10 | 17268267 | 69.67 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkAdverbTimeDefinite-10 | 16273309 | 73.70 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkAdverbTimeIndefinite-10 | 15822297 | 74.26 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkAdverbFrequencyDefinite-10 | 16344181 | 73.30 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkAdverbFrequencyIndefinite-10 | 16118569 | 74.27 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkComment-10 | 1000000 | 1146 ns/op | 258 B/op | 6 allocs/op | -| BenchmarkConnective-10 | 7132710 | 168.3 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkConnectiveTime-10 | 15339457 | 78.08 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkConnectiveComparative-10 | 16188842 | 74.04 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkConnectiveComplaint-10 | 14127903 | 85.00 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkConnectiveListing-10 | 16073437 | 74.65 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkConnectiveCasual-10 | 13771904 | 87.06 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkConnectiveExamplify-10 | 15763296 | 76.03 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkWord-10 | 8047610 | 148.5 ns/op | 3 B/op | 0 allocs/op | -| BenchmarkSentenceSimple-10 | 682924 | 1707 ns/op | 590 B/op | 11 allocs/op | -| BenchmarkInterjection-10 | 16295702 | 73.50 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkNoun-10 | 6711976 | 179.3 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkNounCommon-10 | 17117466 | 69.83 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkNounConcrete-10 | 17144979 | 69.81 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkNounAbstract-10 | 16839790 | 71.16 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkNounCollectivePeople-10 | 16360652 | 73.24 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkNounCollectiveAnimal-10 | 16453287 | 72.79 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkNounCollectiveThing-10 | 16397232 | 72.97 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkNounCountable-10 | 17171895 | 69.78 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkNounUncountable-10 | 17193412 | 69.75 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkNounProper-10 | 10644372 | 112.0 ns/op | 7 B/op | 0 allocs/op | -| BenchmarkNounDeterminer-10 | 17003730 | 70.44 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkPhrase-10 | 23481584 | 51.12 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkPhraseNoun-10 | 2961691 | 405.1 ns/op | 104 B/op | 2 allocs/op | -| BenchmarkPhraseVerb-10 | 1422132 | 845.1 ns/op | 232 B/op | 6 allocs/op | -| BenchmarkPhraseAdverb-10 | 7617193 | 153.3 ns/op | 9 B/op | 0 allocs/op | -| BenchmarkPhrasePreposition-10 | 2336155 | 513.3 ns/op | 123 B/op | 3 allocs/op | -| BenchmarkPreposition-10 | 9244665 | 129.9 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkPrepositionSimple-10 | 16397623 | 73.11 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkPrepositionDouble-10 | 16107751 | 74.19 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkPrepositionCompound-10 | 16364900 | 73.10 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkPronoun-10 | 6436707 | 186.4 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkPronounPersonal-10 | 16997427 | 70.53 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkPronounObject-10 | 15303380 | 78.27 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkPronounPossessive-10 | 15323908 | 78.10 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkPronounReflective-10 | 15258552 | 78.45 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkPronounIndefinite-10 | 16053780 | 74.69 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkPronounDemonstrative-10 | 16476726 | 72.73 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkPronounInterrogative-10 | 15526576 | 77.15 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkPronounRelative-10 | 14159284 | 84.64 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkSentence-10 | 721934 | 1642 ns/op | 219 B/op | 3 allocs/op | -| BenchmarkParagraph-10 | 39356 | 30481 ns/op | 6687 B/op | 53 allocs/op | -| BenchmarkQuestion-10 | 1757269 | 683.1 ns/op | 243 B/op | 3 allocs/op | -| BenchmarkQuote-10 | 1522988 | 787.2 ns/op | 261 B/op | 3 allocs/op | -| BenchmarkVerb-10 | 8924802 | 127.6 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkVerbAction-10 | 17150564 | 69.83 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkVerbTransitive-10 | 17328488 | 69.21 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkVerbIntransitive-10 | 16427985 | 72.98 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkVerbLinking-10 | 17754280 | 67.52 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkVerbHelping-10 | 17118238 | 70.31 ns/op | 0 B/op | 0 allocs/op | -| BenchmarkXMLLookup100-10 | 937 | 1279022 ns/op | 862536 B/op | 11370 allocs/op | \ No newline at end of file +| BenchmarkAddress-10 | 1103170 | 1084 ns/op | 223 B/op | 6 allocs/op | +| BenchmarkStreet-10 | 2678632 | 426.1 ns/op | 25 B/op | 2 allocs/op | +| BenchmarkStreetNumber-10 | 7752144 | 154.4 ns/op | 4 B/op | 1 allocs/op | +| BenchmarkStreetPrefix-10 | 14321246 | 84.29 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkStreetName-10 | 14315438 | 84.03 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkStreetSuffix-10 | 13827625 | 83.90 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkUnit-10 | 7394977 | 162.7 ns/op | 14 B/op | 2 allocs/op | +| BenchmarkCity-10 | 14257396 | 84.31 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkState-10 | 14311333 | 83.57 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkStateAbr-10 | 14235634 | 84.14 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkZip-10 | 7180789 | 166.8 ns/op | 5 B/op | 1 allocs/op | +| BenchmarkCountry-10 | 14644250 | 82.44 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCountryAbr-10 | 14065713 | 83.73 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkLatitude-10 | 74722898 | 16.06 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkLongitude-10 | 74551914 | 16.04 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkLatitudeInRange-10 | 68917987 | 17.34 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkLongitudeInRange-10 | 68985835 | 17.37 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkAirlineAircraftType-10 | 13969006 | 85.89 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkAirlineAirplane-10 | 14245006 | 84.17 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkAirlineAirport-10 | 14062609 | 84.47 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkAirlineAirportIATA-10 | 14078275 | 85.46 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkAirlineFlightNumber-10 | 8181994 | 147.6 ns/op | 45 B/op | 4 allocs/op | +| BenchmarkAirlineRecordLocator-10 | 8466958 | 142.1 ns/op | 23 B/op | 1 allocs/op | +| BenchmarkAirlineSeat-10 | 14182210 | 84.84 ns/op | 19 B/op | 2 allocs/op | +| BenchmarkPetName-10 | 14235944 | 82.89 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkAnimal-10 | 14401180 | 83.02 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkAnimalType-10 | 14429384 | 82.93 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkFarmAnimal-10 | 14249454 | 83.95 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCat-10 | 14469751 | 83.29 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkDog-10 | 14395026 | 82.36 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkBird-10 | 14887028 | 82.10 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkAppName-10 | 3364251 | 351.7 ns/op | 25 B/op | 1 allocs/op | +| BenchmarkAppVersion-10 | 10653699 | 112.7 ns/op | 7 B/op | 1 allocs/op | +| BenchmarkAppAuthor-10 | 7753665 | 155.4 ns/op | 8 B/op | 0 allocs/op | +| BenchmarkUsername-10 | 1387273 | 910.2 ns/op | 822 B/op | 13 allocs/op | +| BenchmarkPassword-10 | 452104 | 2519 ns/op | 128 B/op | 2 allocs/op | +| BenchmarkBeerName-10 | 14425518 | 82.98 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkBeerStyle-10 | 14450242 | 83.06 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkBeerHop-10 | 14424109 | 83.10 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkBeerYeast-10 | 14369450 | 84.90 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkBeerMalt-10 | 14445836 | 83.21 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkBeerIbu-10 | 43577666 | 27.39 ns/op | 8 B/op | 1 allocs/op | +| BenchmarkBeerAlcohol-10 | 6503149 | 184.8 ns/op | 4 B/op | 1 allocs/op | +| BenchmarkBeerBlg-10 | 6394459 | 187.2 ns/op | 13 B/op | 1 allocs/op | +| BenchmarkBook-10 | 4396315 | 272.0 ns/op | 48 B/op | 1 allocs/op | +| BenchmarkBookTitle-10 | 14470020 | 84.03 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkBookAuthor-10 | 14473611 | 83.25 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkBookGenre-10 | 14471212 | 83.33 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCar-10 | 2628781 | 456.6 ns/op | 96 B/op | 1 allocs/op | +| BenchmarkCarType-10 | 14205280 | 84.16 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCarFuelType-10 | 14108670 | 83.96 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCarTransmissionType-10 | 13697488 | 88.10 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCarMaker-10 | 14143714 | 84.80 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCarModel-10 | 14127410 | 84.76 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCelebrityActor-10 | 13975066 | 85.48 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCelebrityBusiness-10 | 14261449 | 84.07 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCelebritySport-10 | 14069204 | 85.40 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkColor-10 | 14250574 | 84.13 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkNiceColors-10 | 100000000 | 11.42 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkSafeColor-10 | 14290966 | 84.07 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkHexColor-10 | 6276794 | 190.9 ns/op | 24 B/op | 3 allocs/op | +| BenchmarkRGBColor-10 | 28464741 | 42.39 ns/op | 24 B/op | 1 allocs/op | +| BenchmarkCompany-10 | 13964258 | 86.33 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCompanySuffix-10 | 13848536 | 86.55 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkBlurb-10 | 13809465 | 88.33 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkBuzzWord-10 | 13909864 | 86.45 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkBS-10 | 13955137 | 86.23 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkJob-10 | 3353815 | 357.2 ns/op | 64 B/op | 1 allocs/op | +| BenchmarkJobTitle-10 | 14383609 | 83.58 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkJobDescriptor-10 | 14196513 | 84.28 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkJobLevel-10 | 14159757 | 84.76 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkSlogan-10 | 3303717 | 340.9 ns/op | 41 B/op | 1 allocs/op | +| BenchmarkCSVLookup100-10 | 1672 | 706289 ns/op | 831725 B/op | 10251 allocs/op | +| BenchmarkDate-10 | 14483701 | 83.01 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkPastDate-10 | 24398281 | 49.04 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkFutureDate-10 | 24536008 | 48.87 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkDateRange-10 | 9339344 | 128.1 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMonth-10 | 100000000 | 11.42 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMonthString-10 | 98138764 | 12.11 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkWeekDay-10 | 98253607 | 12.13 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkDay-10 | 100000000 | 11.43 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkYear-10 | 100000000 | 11.44 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkHour-10 | 100000000 | 11.46 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinute-10 | 100000000 | 11.48 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkSecond-10 | 100000000 | 11.43 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkNanoSecond-10 | 100000000 | 11.55 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkTimeZone-10 | 14662479 | 81.60 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkTimeZoneFull-10 | 14709021 | 81.05 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkTimeZoneAbv-10 | 14627991 | 81.31 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkTimeZoneOffset-10 | 12801638 | 93.70 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkTimeZoneRegion-10 | 14714446 | 81.50 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkEmoji-10 | 10143355 | 118.7 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkEmojiCategory-10 | 14940362 | 81.38 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkEmojiAlias-10 | 14637633 | 82.16 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkEmojiTag-10 | 14625763 | 82.15 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkEmojiFlag-10 | 14470842 | 82.81 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkEmojiAnimal-10 | 14617693 | 82.11 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkEmojiFood-10 | 14477235 | 82.84 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkEmojiPlant-10 | 14502708 | 82.58 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkEmojiMusic-10 | 14580860 | 82.40 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkEmojiSentence-10 | 2058350 | 583.2 ns/op | 450 B/op | 7 allocs/op | +| BenchmarkError-10 | 2990094 | 399.0 ns/op | 282 B/op | 6 allocs/op | +| BenchmarkErrorObject-10 | 7202356 | 165.3 ns/op | 32 B/op | 3 allocs/op | +| BenchmarkErrorDatabase-10 | 6873763 | 173.0 ns/op | 63 B/op | 3 allocs/op | +| BenchmarkErrorGRPC-10 | 6894307 | 171.9 ns/op | 64 B/op | 3 allocs/op | +| BenchmarkErrorHTTP-10 | 5222224 | 229.4 ns/op | 158 B/op | 4 allocs/op | +| BenchmarkErrorHTTPClient-10 | 6889472 | 175.2 ns/op | 52 B/op | 3 allocs/op | +| BenchmarkErrorHTTPServer-10 | 6682477 | 177.1 ns/op | 59 B/op | 3 allocs/op | +| BenchmarkErrorRuntime-10 | 5930980 | 206.6 ns/op | 150 B/op | 3 allocs/op | +| BenchmarkErrorValidation-10 | 3319014 | 358.1 ns/op | 274 B/op | 5 allocs/op | +| BenchmarkFileMimeType-10 | 14300468 | 83.87 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkFileExtension-10 | 14363823 | 83.48 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCusip-10 | 9081240 | 131.9 ns/op | 16 B/op | 1 allocs/op | +| BenchmarkIsin-10 | 2656158 | 451.8 ns/op | 469 B/op | 4 allocs/op | +| BenchmarkFruit-10 | 14394954 | 83.51 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkVegetable-10 | 14364404 | 83.47 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkBreakfast-10 | 10058257 | 117.5 ns/op | 39 B/op | 1 allocs/op | +| BenchmarkLunch-10 | 10161496 | 118.3 ns/op | 41 B/op | 1 allocs/op | +| BenchmarkDinner-10 | 10117947 | 117.6 ns/op | 43 B/op | 1 allocs/op | +| BenchmarkDrink-10 | 10450734 | 113.9 ns/op | 16 B/op | 2 allocs/op | +| BenchmarkSnack-10 | 10065492 | 119.0 ns/op | 39 B/op | 1 allocs/op | +| BenchmarkDessert-10 | 10256098 | 118.2 ns/op | 38 B/op | 2 allocs/op | +| BenchmarkGamertag-10 | 2809584 | 429.9 ns/op | 81 B/op | 5 allocs/op | +| BenchmarkDice-10 | 69207811 | 17.31 ns/op | 8 B/op | 1 allocs/op | +| BenchmarkGenerate/simple_single_function-10 | 3665461 | 325.1 ns/op | 270 B/op | 6 allocs/op | +| BenchmarkGenerate/simple_multiple_functions-10 | 944131 | 1298 ns/op | 882 B/op | 16 allocs/op | +| BenchmarkGenerate/function_with_params-10 | 1730911 | 691.4 ns/op | 1144 B/op | 16 allocs/op | +| BenchmarkGenerate/multiple_functions_with_params-10 | 537613 | 2206 ns/op | 2721 B/op | 37 allocs/op | +| BenchmarkGenerate/mixed_letters_numbers-10 | 9572214 | 125.2 ns/op | 32 B/op | 2 allocs/op | +| BenchmarkGenerate/complex_mixed-10 | 543666 | 2158 ns/op | 2310 B/op | 32 allocs/op | +| BenchmarkGenerate/no_replacements-10 | 12480820 | 95.38 ns/op | 192 B/op | 4 allocs/op | +| BenchmarkGenerate/many_functions-10 | 233031 | 5238 ns/op | 3486 B/op | 54 allocs/op | +| BenchmarkGenerate/nested_params-10 | 1316234 | 906.3 ns/op | 1373 B/op | 23 allocs/op | +| BenchmarkGenerate/nested_complex-10 | 1000000 | 1068 ns/op | 1745 B/op | 25 allocs/op | +| BenchmarkGenerate/bio_template-10 | 733396 | 1648 ns/op | 1472 B/op | 20 allocs/op | +| BenchmarkGenerate/sentence_template-10 | 3021042 | 407.2 ns/op | 488 B/op | 8 allocs/op | +| BenchmarkGenerate/long_string_many_replacements-10 | 306052 | 3878 ns/op | 3586 B/op | 44 allocs/op | +| BenchmarkFixedWidthLookup100-10 | 3188 | 368023 ns/op | 453353 B/op | 6099 allocs/op | +| BenchmarkRegex-10 | 1000000 | 1179 ns/op | 1632 B/op | 27 allocs/op | +| BenchmarkRegexEmail-10 | 288193 | 4169 ns/op | 4084 B/op | 90 allocs/op | +| BenchmarkMap-10 | 411193 | 2769 ns/op | 1112 B/op | 16 allocs/op | +| BenchmarkHackerPhrase-10 | 492094 | 2459 ns/op | 2452 B/op | 26 allocs/op | +| BenchmarkHackerAbbreviation-10 | 14372289 | 83.53 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkHackerAdjective-10 | 14220135 | 84.27 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkHackerNoun-10 | 14309001 | 83.26 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkHackerVerb-10 | 14295102 | 84.19 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkHackeringVerb-10 | 14516464 | 82.27 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkReplaceWithNumbers-10 | 2076896 | 579.9 ns/op | 264 B/op | 9 allocs/op | +| BenchmarkHipsterWord-10 | 14007025 | 85.50 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkHipsterSentence-10 | 1501389 | 797.2 ns/op | 701 B/op | 11 allocs/op | +| BenchmarkHipsterParagraph-10 | 409591 | 2942 ns/op | 2625 B/op | 43 allocs/op | +| BenchmarkInputName-10 | 14354660 | 83.41 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkSvg-10 | 311258 | 3814 ns/op | 8868 B/op | 52 allocs/op | +| BenchmarkID-10 | 32851885 | 36.10 ns/op | 24 B/op | 1 allocs/op | +| BenchmarkUUID-10 | 26154878 | 45.74 ns/op | 48 B/op | 1 allocs/op | +| BenchmarkImage-10 | 87 | 13643686 ns/op | 2457691 B/op | 307202 allocs/op | +| BenchmarkImageJpeg-10 | 51 | 22983315 ns/op | 2982473 B/op | 307217 allocs/op | +| BenchmarkImagePng-10 | 20 | 55524785 ns/op | 5898540 B/op | 307239 allocs/op | +| BenchmarkDomainName-10 | 2861862 | 418.8 ns/op | 95 B/op | 5 allocs/op | +| BenchmarkDomainSuffix-10 | 14735926 | 81.42 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkUrlSlug-10 | 2498754 | 480.0 ns/op | 81 B/op | 2 allocs/op | +| BenchmarkURL-10 | 1328094 | 904.6 ns/op | 265 B/op | 10 allocs/op | +| BenchmarkHTTPMethod-10 | 14741282 | 81.47 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkIPv4Address-10 | 7809050 | 154.2 ns/op | 16 B/op | 1 allocs/op | +| BenchmarkIPv6Address-10 | 3424746 | 351.1 ns/op | 111 B/op | 8 allocs/op | +| BenchmarkMacAddress-10 | 4204674 | 283.4 ns/op | 24 B/op | 1 allocs/op | +| BenchmarkHTTPStatusCode-10 | 15362457 | 78.36 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkHTTPStatusCodeSimple-10 | 15321136 | 78.45 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkLogLevel-10 | 13574922 | 88.47 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkUserAgent-10 | 2095276 | 575.4 ns/op | 297 B/op | 5 allocs/op | +| BenchmarkChromeUserAgent-10 | 2614332 | 456.8 ns/op | 184 B/op | 5 allocs/op | +| BenchmarkFirefoxUserAgent-10 | 1735813 | 693.0 ns/op | 362 B/op | 6 allocs/op | +| BenchmarkSafariUserAgent-10 | 1836490 | 673.5 ns/op | 551 B/op | 7 allocs/op | +| BenchmarkOperaUserAgent-10 | 2413814 | 526.1 ns/op | 212 B/op | 5 allocs/op | +| BenchmarkJSONLookup100-10 | 1254 | 931207 ns/op | 918187 B/op | 12418 allocs/op | +| BenchmarkLanguage-10 | 14730628 | 81.42 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkLanguageAbbreviation-10 | 14698053 | 81.49 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkLanguageBCP-10 | 14691934 | 82.75 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkProgrammingLanguage-10 | 14665668 | 81.63 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftOre-10 | 14312521 | 83.79 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftWood-10 | 14329014 | 83.67 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftArmorTier-10 | 14261343 | 84.36 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftArmorPart-10 | 14111434 | 84.73 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftWeapon-10 | 14238732 | 83.65 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftTool-10 | 14331830 | 83.64 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftDye-10 | 14355182 | 84.94 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftFood-10 | 14337074 | 83.67 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftAnimal-10 | 14324002 | 83.56 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftVillagerJob-10 | 14366955 | 83.59 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftVillagerStation-10 | 14775799 | 81.52 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftVillagerLevel-10 | 14433811 | 83.97 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftMobPassive-10 | 14265412 | 83.62 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftMobNeutral-10 | 14339287 | 83.59 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftMobHostile-10 | 14349654 | 83.63 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftMobBoss-10 | 14802699 | 81.51 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftBiome-10 | 14390689 | 84.08 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMinecraftWeather-10 | 14607906 | 82.08 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkBool-10 | 98531938 | 11.99 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkShuffleAnySlice-10 | 3848493 | 313.9 ns/op | 24 B/op | 1 allocs/op | +| BenchmarkFlipACoin-10 | 100000000 | 11.89 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMovie-10 | 5639511 | 190.9 ns/op | 32 B/op | 1 allocs/op | +| BenchmarkMovieName-10 | 14299531 | 84.06 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMovieGenre-10 | 14346801 | 83.66 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkNumber-10 | 100000000 | 11.46 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkUint8-10 | 99954187 | 11.95 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkUint16-10 | 100000000 | 11.98 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkUint32-10 | 100000000 | 11.51 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkUint64-10 | 100000000 | 11.52 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkUintRange-10 | 100000000 | 11.40 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkInt8-10 | 97158124 | 12.11 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkInt16-10 | 98414097 | 12.44 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkInt32-10 | 100000000 | 11.53 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkInt64-10 | 100000000 | 11.53 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkIntRange-10 | 100000000 | 11.41 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkFloat32-10 | 100000000 | 11.43 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkFloat32Range-10 | 100000000 | 11.56 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkFloat64-10 | 100000000 | 11.57 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkFloat64Range-10 | 100000000 | 11.65 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkShuffleInts-10 | 12829964 | 93.72 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkRandomInt-10 | 100000000 | 11.42 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkRandomUint-10 | 100000000 | 11.39 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkHexUint-10 | 23160154 | 51.70 ns/op | 8 B/op | 1 allocs/op | +| BenchmarkCurrency-10 | 13259960 | 87.44 ns/op | 32 B/op | 1 allocs/op | +| BenchmarkCurrencyShort-10 | 14788378 | 81.25 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCurrencyLong-10 | 14777679 | 81.77 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkPrice-10 | 100000000 | 11.55 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCreditCard-10 | 1269178 | 929.1 ns/op | 274 B/op | 15 allocs/op | +| BenchmarkCreditCardType-10 | 43169155 | 27.39 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkCreditCardNumber-10 | 1920832 | 623.8 ns/op | 185 B/op | 10 allocs/op | +| BenchmarkCreditCardExp-10 | 11255587 | 106.8 ns/op | 5 B/op | 1 allocs/op | +| BenchmarkCreditCardCvv-10 | 28679431 | 41.74 ns/op | 3 B/op | 1 allocs/op | +| BenchmarkAchRouting-10 | 10390467 | 115.0 ns/op | 16 B/op | 1 allocs/op | +| BenchmarkAchAccount-10 | 8082610 | 148.6 ns/op | 16 B/op | 1 allocs/op | +| BenchmarkBitcoinAddress-10 | 919413 | 1315 ns/op | 145 B/op | 3 allocs/op | +| BenchmarkBitcoinPrivateKey-10 | 1843315 | 650.5 ns/op | 184 B/op | 5 allocs/op | +| BenchmarkBankName-10 | 14444061 | 83.06 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkBankType-10 | 14377713 | 83.89 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkName-10 | 5686338 | 210.6 ns/op | 16 B/op | 1 allocs/op | +| BenchmarkFirstName-10 | 14735256 | 81.63 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMiddleName-10 | 14506162 | 82.00 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkLastName-10 | 14682049 | 81.70 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkNamePrefix-10 | 14524393 | 82.38 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkNameSuffix-10 | 14671279 | 81.88 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkSSN-10 | 42014109 | 28.54 ns/op | 16 B/op | 1 allocs/op | +| BenchmarkEIN-10 | 3682759 | 294.6 ns/op | 80 B/op | 14 allocs/op | +| BenchmarkGender-10 | 100000000 | 11.99 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkHobby-10 | 14439556 | 82.81 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkSocialMedia-10 | 1478293 | 815.7 ns/op | 430 B/op | 11 allocs/op | +| BenchmarkBio-10 | 938670 | 1282 ns/op | 1190 B/op | 15 allocs/op | +| BenchmarkPerson-10 | 349646 | 3466 ns/op | 818 B/op | 31 allocs/op | +| BenchmarkContact-10 | 1817460 | 678.4 ns/op | 136 B/op | 6 allocs/op | +| BenchmarkPhone-10 | 9426168 | 127.4 ns/op | 16 B/op | 1 allocs/op | +| BenchmarkPhoneFormatted-10 | 5080152 | 236.4 ns/op | 16 B/op | 1 allocs/op | +| BenchmarkEmail-10 | 2392788 | 500.2 ns/op | 88 B/op | 4 allocs/op | +| BenchmarkTeams-10 | 2302561 | 517.0 ns/op | 672 B/op | 10 allocs/op | +| BenchmarkProduct-10 | 218653 | 5445 ns/op | 3088 B/op | 46 allocs/op | +| BenchmarkProductName-10 | 2300114 | 522.6 ns/op | 103 B/op | 5 allocs/op | +| BenchmarkProductDescription-10 | 461224 | 2594 ns/op | 2465 B/op | 23 allocs/op | +| BenchmarkProductCategory-10 | 14565750 | 82.60 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkProductFeature-10 | 14421858 | 82.71 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkProductMaterial-10 | 14525638 | 82.69 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkProductUPC-10 | 1704061 | 721.7 ns/op | 96 B/op | 11 allocs/op | +| BenchmarkProductAudience-10 | 6250908 | 190.6 ns/op | 32 B/op | 1 allocs/op | +| BenchmarkProductDimension-10 | 14143437 | 84.98 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkProductUseCase-10 | 14313904 | 82.51 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkProductBenefit-10 | 14576446 | 82.46 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkProductSuffix-10 | 14219770 | 84.26 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkProductISBN-10 | 5517997 | 200.1 ns/op | 32 B/op | 3 allocs/op | +| BenchmarkSchool-10 | 4244638 | 283.1 ns/op | 34 B/op | 1 allocs/op | +| BenchmarkSong-10 | 4338772 | 277.3 ns/op | 48 B/op | 1 allocs/op | +| BenchmarkSongName-10 | 14409870 | 83.61 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkSongArtist-10 | 14364440 | 83.60 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkSongGenre-10 | 14394457 | 85.02 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkLetter-10 | 100000000 | 11.51 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkLetterN-10 | 7059724 | 168.8 ns/op | 64 B/op | 2 allocs/op | +| BenchmarkVowel-10 | 81354326 | 14.79 ns/op | 4 B/op | 1 allocs/op | +| BenchmarkDigit-10 | 100000000 | 11.42 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkDigitN-10 | 7008775 | 168.7 ns/op | 64 B/op | 2 allocs/op | +| BenchmarkNumerify-10 | 9234306 | 129.6 ns/op | 16 B/op | 1 allocs/op | +| BenchmarkLexify-10 | 15390994 | 78.03 ns/op | 8 B/op | 1 allocs/op | +| BenchmarkShuffleStrings-10 | 14075007 | 85.33 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkRandomString-10 | 100000000 | 11.51 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkTemplate-10 | 3873 | 311023 ns/op | 248973 B/op | 3083 allocs/op | +| BenchmarkComment-10 | 1331073 | 903.1 ns/op | 583 B/op | 9 allocs/op | +| BenchmarkPhrase-10 | 14345750 | 84.65 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkPhraseNoun-10 | 3605448 | 333.1 ns/op | 88 B/op | 1 allocs/op | +| BenchmarkPhraseVerb-10 | 1643992 | 733.3 ns/op | 196 B/op | 4 allocs/op | +| BenchmarkPhraseAdverb-10 | 7439331 | 161.1 ns/op | 9 B/op | 0 allocs/op | +| BenchmarkPhrasePreposition-10 | 2622950 | 455.8 ns/op | 107 B/op | 2 allocs/op | +| BenchmarkSentence-10 | 1243213 | 969.4 ns/op | 895 B/op | 12 allocs/op | +| BenchmarkParagraph-10 | 336364 | 3553 ns/op | 3322 B/op | 46 allocs/op | +| BenchmarkQuestion-10 | 1708732 | 702.1 ns/op | 549 B/op | 10 allocs/op | +| BenchmarkQuote-10 | 1638612 | 731.6 ns/op | 569 B/op | 10 allocs/op | +| BenchmarkLoremIpsumSentence-10 | 1000000 | 1064 ns/op | 219 B/op | 2 allocs/op | +| BenchmarkLoremIpsumParagraph-10 | 59647 | 20014 ns/op | 8480 B/op | 40 allocs/op | +| BenchmarkWeighted-10 | 53570133 | 22.38 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkAdjective-10 | 7898755 | 151.5 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkAdjectiveDescriptive-10 | 14585032 | 82.31 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkAdjectiveQuantitative-10 | 14615534 | 82.15 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkAdjectiveProper-10 | 14826856 | 81.02 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkAdjectiveDemonstrative-10 | 14628786 | 82.13 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkAdjectivePossessive-10 | 14488378 | 82.17 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkAdjectiveInterrogative-10 | 14578668 | 82.15 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkAdjectiveIndefinite-10 | 14588925 | 82.19 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkAdverb-10 | 7883319 | 152.4 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkAdverbManner-10 | 14757386 | 83.12 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkAdverbDegree-10 | 12683223 | 81.75 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkAdverbPlace-10 | 14620564 | 81.22 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkAdverbTimeDefinite-10 | 14570636 | 82.21 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkAdverbTimeIndefinite-10 | 14585497 | 82.29 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkAdverbFrequencyDefinite-10 | 14568910 | 83.32 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkAdverbFrequencyIndefinite-10 | 14624752 | 82.14 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkConnective-10 | 8215815 | 146.0 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkConnectiveTime-10 | 14964218 | 80.41 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkConnectiveComparative-10 | 14394342 | 83.41 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkConnectiveComplaint-10 | 14189211 | 81.65 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkConnectiveListing-10 | 14673902 | 81.97 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkConnectiveCasual-10 | 14582056 | 82.15 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkConnectiveExamplify-10 | 14361904 | 83.44 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkWord-10 | 9736791 | 123.2 ns/op | 3 B/op | 0 allocs/op | +| BenchmarkInterjection-10 | 14839546 | 82.93 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkLoremIpsumWord-10 | 14265787 | 84.27 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkNoun-10 | 7625799 | 157.4 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkNounCommon-10 | 14696373 | 81.43 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkNounConcrete-10 | 14775216 | 81.41 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkNounAbstract-10 | 14736424 | 81.55 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkNounCollectivePeople-10 | 14574639 | 82.05 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkNounCollectiveAnimal-10 | 14297954 | 82.31 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkNounCollectiveThing-10 | 14612716 | 82.48 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkNounCountable-10 | 14706775 | 81.53 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkNounUncountable-10 | 14912088 | 80.10 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkNounProper-10 | 10122214 | 118.6 ns/op | 7 B/op | 0 allocs/op | +| BenchmarkNounDeterminer-10 | 15038943 | 80.18 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkPreposition-10 | 9446143 | 127.1 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkPrepositionSimple-10 | 14623304 | 82.20 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkPrepositionDouble-10 | 14616758 | 82.33 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkPrepositionCompound-10 | 14646828 | 82.19 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkPronoun-10 | 7619226 | 157.1 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkPronounPersonal-10 | 15007472 | 80.06 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkPronounObject-10 | 14734660 | 81.50 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkPronounPossessive-10 | 14589732 | 82.16 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkPronounReflective-10 | 14572369 | 82.18 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkPronounIndefinite-10 | 14549400 | 81.99 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkPronounDemonstrative-10 | 14405077 | 83.21 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkPronounInterrogative-10 | 14522790 | 82.20 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkPronounRelative-10 | 15074666 | 80.10 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkVerb-10 | 9462745 | 127.1 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkVerbAction-10 | 14725183 | 81.37 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkVerbTransitive-10 | 15013738 | 80.16 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkVerbIntransitive-10 | 14609581 | 82.23 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkVerbLinking-10 | 14814585 | 81.20 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkVerbHelping-10 | 14679265 | 81.18 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkXMLLookup100-10 | 1240 | 992659 ns/op | 978256 B/op | 11687 allocs/op | \ No newline at end of file diff --git a/vendor/github.com/brianvoe/gofakeit/v7/README.md b/vendor/github.com/brianvoe/gofakeit/v7/README.md index 246bb9c0..964f8c44 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/README.md +++ b/vendor/github.com/brianvoe/gofakeit/v7/README.md @@ -4,9 +4,13 @@ Random data generator written in go -[![ko-fi](https://ko-fi.com/img/githubbutton_sm.svg)](https://ko-fi.com/G2G0R5EJT) +## Support -Buy Me A Coffee +[![ko-fi](https://ko-fi.com/img/githubbutton_sm.svg)](https://ko-fi.com/G2G0R5EJT) Buy Me A Coffee + +## Merch + +[![Merch](https://raw.githubusercontent.com/brianvoe/gofakeit/master/merch.png)](https://gofakeit-buy-shop.fourthwall.com) ## Features @@ -41,17 +45,17 @@ go get github.com/brianvoe/gofakeit/v7 ```go import "github.com/brianvoe/gofakeit/v7" -gofakeit.Name() // Markus Moen -gofakeit.Email() // alaynawuckert@kozey.biz -gofakeit.Phone() // (570)245-7485 -gofakeit.BS() // front-end -gofakeit.BeerName() // Duvel -gofakeit.Color() // MediumOrchid -gofakeit.Company() // Moen, Pagac and Wuckert -gofakeit.CreditCardNumber() // 4287271570245748 -gofakeit.HackerPhrase() // Connecting the array won't do anything, we need to generate the haptic COM driver! -gofakeit.JobTitle() // Director -gofakeit.CurrencyShort() // USD +gofakeit.Name() // Markus Moen +gofakeit.Email() // alaynawuckert@kozey.biz +gofakeit.Phone() // (570)245-7485 +gofakeit.BS() // front-end +gofakeit.BeerName() // Duvel +gofakeit.Color() // MediumOrchid +gofakeit.Company() // Moen, Pagac and Wuckert +gofakeit.CreditCardNumber(nil) // 4287271570245748 +gofakeit.HackerPhrase() // Connecting the array won't do anything, we need to generate the haptic COM driver! +gofakeit.JobTitle() // Director +gofakeit.CurrencyShort() // USD ``` [See full list of functions](#functions) @@ -91,7 +95,7 @@ import ( faker := gofakeit.New(0) // NewFaker takes in a source and whether or not it should be thread safe -faker := gofakeit.NewFaker(source rand.Source, threadSafe bool) +faker := gofakeit.NewFaker(src rand.Source, lock bool) // PCG Pseudo faker := gofakeit.NewFaker(rand.NewPCG(11, 11), true) @@ -142,7 +146,7 @@ type Foo struct { Int int Pointer *int Name string `fake:"{firstname}"` // Any available function all lowercase - Sentence string `fake:"{sentence:3}"` // Can call with parameters + Sentence string `fake:"{sentence}"` RandStr string `fake:"{randomstring:[hello,world]}"` Number string `fake:"{number:1,10}"` // Comma separated for multiple values Regex string `fake:"{regex:[abcdef]{5}}"` // Generate string from regex @@ -171,7 +175,7 @@ fmt.Println(f.Int) // -7825289004089916589 fmt.Println(*f.Pointer) // -343806609094473732 fmt.Println(f.Name) // fred fmt.Println(f.Sentence) // Record river mind. -fmt.Println(fStr) // world +fmt.Println(f.RandStr) // world fmt.Println(f.Number) // 4 fmt.Println(f.Regex) // cbdfc fmt.Println(f.Map) // map[PxLIo:52 lxwnqhqc:846] @@ -210,7 +214,7 @@ func (c *Friend) Fake(f *gofakeit.Faker) (any, error) { type Age time.Time func (c *Age) Fake(f *gofakeit.Faker) (any, error) { - return f.DateRange(time.Now().AddDate(-100, 0, 0), time.Now().AddDate(-18, 0, 0)), nil + return Age(f.DateRange(time.Now().AddDate(-100, 0, 0), time.Now().AddDate(-18, 0, 0))), nil } // This is the struct that we cannot modify to add struct tags @@ -221,8 +225,8 @@ type User struct { var u User gofakeit.Struct(&u) -fmt.Printf("%s", f.Name) // billy -fmt.Printf("%s", f.Age) // 1990-12-07 04:14:25.685339029 +0000 UTC +fmt.Println(u.Name) // billy +fmt.Println(time.Time(*u.Age)) // 1990-12-07 04:14:25.685339029 +0000 UTC ``` ## Custom Functions @@ -240,7 +244,7 @@ gofakeit.AddFuncLookup("friendname", gofakeit.Info{ Description: "Random friend name", Example: "bill", Output: "string", - Generate: func(f *Faker, m *gofakeit.MapParams, info *gofakeit.Info) (any, error) { + Generate: func(f *gofakeit.Faker, m *gofakeit.MapParams, info *gofakeit.Info) (any, error) { return f.RandomString([]string{"bill", "bob", "sally"}), nil }, }) @@ -254,7 +258,7 @@ gofakeit.AddFuncLookup("jumbleword", gofakeit.Info{ Params: []gofakeit.Param{ {Field: "word", Type: "string", Description: "Word you want to jumble"}, }, - Generate: func(f *Faker, m *gofakeit.MapParams, info *gofakeit.Info) (any, error) { + Generate: func(f *gofakeit.Faker, m *gofakeit.MapParams, info *gofakeit.Info) (any, error) { word, err := info.GetString(m, "word") if err != nil { return nil, err @@ -273,8 +277,8 @@ type Foo struct { var f Foo gofakeit.Struct(&f) -fmt.Printf("%s", f.FriendName) // bill -fmt.Printf("%s", f.JumbleWord) // loredlowlh +fmt.Println(f.FriendName) // bill +fmt.Println(f.JumbleWord) // loredlowlh ``` ## Templates @@ -328,17 +332,17 @@ func main() { {{RandomString (SliceString "Warm regards" "Best wishes" "Sincerely")}} {{$person:=Person}} {{$person.FirstName}} {{$person.LastName}} - {{$person.Email}} - {{$person.Phone}} + {{$person.Contact.Email}} + {{$person.Contact.Phone}} ` - value, err := gofakeit.Template(template, &TemplateOptions{Data: 5}) + value, err := gofakeit.Template(template, &gofakeit.TemplateOptions{Data: 5}) if err != nil { fmt.Println(err) } - fmt.Println(string(value)) + fmt.Println(value) } ``` @@ -353,6 +357,7 @@ Greetings! Quia voluptatem voluptatem voluptatem. Quia voluptatem voluptatem voluptatem. Quia voluptatem voluptatem voluptatem. Warm regards + Kaitlyn Krajcik kaitlynkrajcik@krajcik 570-245-7485 @@ -385,8 +390,14 @@ EmailText(co *EmailOptions) (string, error) FixedWidth(co *FixedWidthOptions) (string, error) ``` -### Product +### ID +```go +ID() string +UUID() string +``` + +### Product ```go Product() *ProductInfo @@ -401,6 +412,7 @@ ProductDimension() string ProductUseCase() string ProductBenefit() string ProductSuffix() string +ProductISBN(opts *ISBNOptions) string ``` @@ -415,7 +427,10 @@ FirstName() string MiddleName() string LastName() string Gender() string +Age() int +Ethnicity() string SSN() string +EIN() string Hobby() string Contact() *ContactInfo Email() string @@ -455,6 +470,7 @@ StreetName() string StreetNumber() string StreetPrefix() string StreetSuffix() string +Unit() string Zip() string Latitude() float64 LatitudeInRange(min, max float64) (float64, error) @@ -561,9 +577,9 @@ ConnectiveExamplify() string // Words Word() string -// Sentences -Sentence(wordCount int) string -Paragraph(paragraphCount int, sentenceCount int, wordCount int, separator string) string +// Text +Sentence() string +Paragraph() string LoremIpsumWord() string LoremIpsumSentence(wordCount int) string LoremIpsumParagraph(paragraphCount int, sentenceCount int, wordCount int, separator string) string @@ -588,7 +604,6 @@ Dessert() string ```go Bool() bool -UUID() string Weighted(options []any, weights []float32) (any, error) FlipACoin() string RandomMapKey(mapI any) any @@ -617,6 +632,7 @@ ImagePng(width int, height int) []byte ```go URL() string +UrlSlug(words int) string DomainName() string DomainSuffix() string IPv4Address() string @@ -632,6 +648,7 @@ ChromeUserAgent() string FirefoxUserAgent() string OperaUserAgent() string SafariUserAgent() string +APIUserAgent() string ``` ### HTML @@ -680,6 +697,8 @@ AchRouting() string AchAccount() string BitcoinAddress() string BitcoinPrivateKey() string +BankName() string +BankType() string ``` ### Finance @@ -719,8 +738,8 @@ HackerVerb() string ```go HipsterWord() string -HipsterSentence(wordCount int) string -HipsterParagraph(paragraphCount int, sentenceCount int, wordCount int, separator string) string +HipsterSentence() string +HipsterParagraph() string ``` ### App @@ -747,10 +766,29 @@ Bird() string ```go Emoji() string -EmojiDescription() string EmojiCategory() string EmojiAlias() string EmojiTag() string +EmojiFlag() string +EmojiAnimal() string +EmojiFood() string +EmojiPlant() string +EmojiMusic() string +EmojiVehicle() string +EmojiSport() string +EmojiFace() string +EmojiHand() string +EmojiClothing() string +EmojiLandmark() string +EmojiElectronics() string +EmojiGame() string +EmojiTools() string +EmojiWeather() string +EmojiJob() string +EmojiPerson() string +EmojiGesture() string +EmojiCostume() string +EmojiSentence() string ``` ### Language @@ -759,7 +797,6 @@ EmojiTag() string Language() string LanguageAbbreviation() string ProgrammingLanguage() string -ProgrammingLanguageBest() string ``` ### Number @@ -857,7 +894,6 @@ ErrorGRPC() error ErrorHTTP() error ErrorHTTPClient() error ErrorHTTPServer() error -ErrorInput() error ErrorRuntime() error ``` @@ -866,3 +902,12 @@ ErrorRuntime() error ```go School() string ``` + +### Song + +```go +Song() *SongInfo +SongName() string +SongArtist() string +SongGenre() string +``` \ No newline at end of file diff --git a/vendor/github.com/brianvoe/gofakeit/v7/address.go b/vendor/github.com/brianvoe/gofakeit/v7/address.go index f8e4e7b8..c894e591 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/address.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/address.go @@ -9,6 +9,7 @@ import ( type AddressInfo struct { Address string `json:"address" xml:"address"` Street string `json:"street" xml:"street"` + Unit string `json:"unit" xml:"unit"` City string `json:"city" xml:"city"` State string `json:"state" xml:"state"` Zip string `json:"zip" xml:"zip"` @@ -29,9 +30,20 @@ func address(f *Faker) *AddressInfo { state := state(f) zip := zip(f) + // 30% chance to include a unit in the address + var unitStr string + var unitField string + if randIntRange(f, 1, 10) <= 3 { + unitStr = ", " + unit(f) + unitField = unit(f) + } + + addressStr := street + unitStr + ", " + city + ", " + state + " " + zip + return &AddressInfo{ - Address: street + ", " + city + ", " + state + " " + zip, + Address: addressStr, Street: street, + Unit: unitField, City: city, State: state, Zip: zip, @@ -93,6 +105,18 @@ func (f *Faker) StreetSuffix() string { return streetSuffix(f) } func streetSuffix(f *Faker) string { return getRandValue(f, []string{"address", "street_suffix"}) } +// Unit will generate a random unit string +func Unit() string { return unit(GlobalFaker) } + +// Unit will generate a random unit string +func (f *Faker) Unit() string { return unit(f) } + +func unit(f *Faker) string { + unitType := getRandValue(f, []string{"address", "unit"}) + unitNumber := replaceWithNumbers(f, "###") + return unitType + " " + unitNumber +} + // City will generate a random city string func City() string { return city(GlobalFaker) } @@ -199,8 +223,9 @@ func addAddressLookup() { Category: "address", Description: "Residential location including street, city, state, country and postal code", Example: `{ - "address": "364 Unionsville, Norfolk, Ohio 99536", + "address": "364 Unionsville, Apt 123, Norfolk, Ohio 99536", "street": "364 Unionsville", + "apartment": "Apt 123", "city": "Norfolk", "state": "Ohio", "zip": "99536", @@ -210,6 +235,8 @@ func addAddressLookup() { }`, Output: "map[string]any", ContentType: "application/json", + Aliases: []string{"full address", "residential address", "mailing address", "street address", "home address"}, + Keywords: []string{"residential", "location", "street", "city", "state", "country", "postal", "code", "mailing", "home", "house", "apartment", "zipcode", "coordinates"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return address(f), nil }, @@ -221,6 +248,8 @@ func addAddressLookup() { Description: "Part of a country with significant population, often a central hub for culture and commerce", Example: "Marcelside", Output: "string", + Aliases: []string{"city name", "urban area", "municipality name", "town name", "metropolitan area"}, + Keywords: []string{"town", "municipality", "urban", "area", "population", "hub", "culture", "commerce", "metropolitan", "settlement", "community", "district"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return city(f), nil }, @@ -232,6 +261,8 @@ func addAddressLookup() { Description: "Nation with its own government and defined territory", Example: "United States of America", Output: "string", + Aliases: []string{"country name", "nation name", "sovereign state", "national territory", "independent country"}, + Keywords: []string{"nation", "government", "territory", "sovereign", "independent", "state", "republic", "kingdom", "empire", "federation", "commonwealth"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return country(f), nil }, @@ -243,6 +274,8 @@ func addAddressLookup() { Description: "Shortened 2-letter form of a country's name", Example: "US", Output: "string", + Aliases: []string{"country code", "iso alpha-2", "iso3166-1 alpha-2", "two-letter country", "country short code"}, + Keywords: []string{"country", "abbreviation", "shortened", "2-letter", "nation", "iso", "code", "alpha-2", "iso3166-1", "standard", "international", "identifier"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return countryAbr(f), nil }, @@ -254,6 +287,8 @@ func addAddressLookup() { Description: "Governmental division within a country, often having its own laws and government", Example: "Illinois", Output: "string", + Aliases: []string{"state name", "province name", "region name", "administrative division", "territory name"}, + Keywords: []string{"province", "region", "division", "governmental", "territory", "area", "laws", "government", "administrative", "subdivision", "district", "county"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return state(f), nil }, @@ -262,9 +297,11 @@ func addAddressLookup() { AddFuncLookup("stateabr", Info{ Display: "State Abbreviation", Category: "address", - Description: "Shortened 2-letter form of a country's state", + Description: "Shortened 2-letter form of a state or province", Example: "IL", Output: "string", + Aliases: []string{"state code", "province code", "region code", "usps code", "iso3166-2 code"}, + Keywords: []string{"state", "abbreviation", "shortened", "2-letter", "region", "province", "country", "code", "usps", "iso3166-2", "identifier"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return stateAbr(f), nil }, @@ -276,6 +313,8 @@ func addAddressLookup() { Description: "Public road in a city or town, typically with houses and buildings on each side", Example: "364 East Rapidsborough", Output: "string", + Aliases: []string{"street address", "shipping address", "billing address", "mailing address", "address line 1", "line 1", "road address", "avenue address", "drive address", "thoroughfare address"}, + Keywords: []string{"address", "road", "avenue", "drive", "lane", "way", "public", "thoroughfare", "boulevard", "court", "place", "circle", "terrace", "highway"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return street(f), nil }, @@ -287,6 +326,8 @@ func addAddressLookup() { Description: "Name given to a specific road or street", Example: "View", Output: "string", + Aliases: []string{"street title", "road name", "avenue name", "drive name", "thoroughfare name"}, + Keywords: []string{"street", "name", "road", "avenue", "drive", "lane", "way", "thoroughfare", "specific", "title", "designation", "label", "identifier"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return streetName(f), nil }, @@ -298,6 +339,8 @@ func addAddressLookup() { Description: "Numerical identifier assigned to a street", Example: "13645", Output: "string", + Aliases: []string{"house number", "building number", "address number", "street identifier", "numerical address"}, + Keywords: []string{"street", "number", "identifier", "numerical", "address", "location", "building", "assigned", "house", "digit", "numeric", "sequence", "position"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return streetNumber(f), nil }, @@ -306,9 +349,11 @@ func addAddressLookup() { AddFuncLookup("streetprefix", Info{ Display: "Street Prefix", Category: "address", - Description: "Directional or descriptive term preceding a street name, like 'East' or 'Main'", - Example: "Lake", + Description: "Directional or descriptive term preceding a street name (e.g., 'East', 'N')", + Example: "East", Output: "string", + Aliases: []string{"directional prefix", "street prefix", "name prefix", "road prefix", "thoroughfare prefix"}, + Keywords: []string{"street", "prefix", "directional", "north", "south", "east", "west", "n", "s", "e", "w"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return streetPrefix(f), nil }, @@ -317,20 +362,37 @@ func addAddressLookup() { AddFuncLookup("streetsuffix", Info{ Display: "Street Suffix", Category: "address", - Description: "Designation at the end of a street name indicating type, like 'Avenue' or 'Street'", - Example: "land", + Description: "Designation at the end of a street name indicating type (e.g., 'Ave', 'St')", + Example: "Ave", Output: "string", + Aliases: []string{"street type", "road type", "avenue suffix", "thoroughfare suffix", "street ending"}, + Keywords: []string{"street", "suffix", "designation", "type", "ave", "st", "rd", "dr", "ln", "blvd", "ct", "pl", "cir", "ter", "hwy"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return streetSuffix(f), nil }, }) + AddFuncLookup("unit", Info{ + Display: "Unit", + Category: "address", + Description: "Unit identifier within a building, such as apartment number, suite, or office", + Example: "Apt 123", + Output: "string", + Aliases: []string{"apartment unit", "suite number", "office number", "building unit", "room number", "address line 2", "line 2"}, + Keywords: []string{"apartment", "suite", "office", "identifier", "building", "number", "within", "room", "floor", "level", "section", "compartment"}, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { + return unit(f), nil + }, + }) + AddFuncLookup("zip", Info{ Display: "Zip", Category: "address", Description: "Numerical code for postal address sorting, specific to a geographic area", Example: "13645", Output: "string", + Aliases: []string{"zip code", "postal code", "mail code", "delivery code"}, + Keywords: []string{"postal", "postcode", "code", "address", "sorting", "geographic", "area", "numerical", "mailing", "delivery", "zone", "district", "region", "identifier"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return zip(f), nil }, @@ -342,6 +404,8 @@ func addAddressLookup() { Description: "Geographic coordinate specifying north-south position on Earth's surface", Example: "-73.534056", Output: "float", + Aliases: []string{"lat coordinate", "north-south coordinate", "geographic latitude", "earth latitude", "position latitude"}, + Keywords: []string{"lat", "coordinate", "north-south", "degrees", "gps", "wgs84", "geodesy", "parallel", "equator", "pole"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return latitude(f), nil }, @@ -353,6 +417,8 @@ func addAddressLookup() { Description: "Latitude number between the given range (default min=0, max=90)", Example: "22.921026", Output: "float", + Aliases: []string{"latitude bounds", "lat range", "north-south range", "geographic bounds", "coordinate range"}, + Keywords: []string{"lat", "range", "min", "max", "degrees", "gps", "wgs84", "bounds", "interval"}, Params: []Param{ {Field: "min", Display: "Min", Type: "float", Default: "0", Description: "Minimum range"}, {Field: "max", Display: "Max", Type: "float", Default: "90", Description: "Maximum range"}, @@ -362,18 +428,11 @@ func addAddressLookup() { if err != nil { return nil, err } - max, err := info.GetFloat64(m, "max") if err != nil { return nil, err } - - rangeOut, err := latitudeInRange(f, min, max) - if err != nil { - return nil, err - } - - return rangeOut, nil + return latitudeInRange(f, min, max) }, }) @@ -383,6 +442,8 @@ func addAddressLookup() { Description: "Geographic coordinate indicating east-west position on Earth's surface", Example: "-147.068112", Output: "float", + Aliases: []string{"long coordinate", "east-west coordinate", "geographic longitude", "earth longitude", "position longitude"}, + Keywords: []string{"lon", "coordinate", "east-west", "degrees", "gps", "wgs84", "geodesy", "meridian", "idl"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return longitude(f), nil }, @@ -394,6 +455,8 @@ func addAddressLookup() { Description: "Longitude number between the given range (default min=0, max=180)", Example: "-8.170450", Output: "float", + Aliases: []string{"longitude bounds", "long range", "east-west range", "geographic bounds", "coordinate range"}, + Keywords: []string{"longitude", "lon", "range", "min", "max", "degrees", "gps", "wgs84", "bounds", "interval"}, Params: []Param{ {Field: "min", Display: "Min", Type: "float", Default: "0", Description: "Minimum range"}, {Field: "max", Display: "Max", Type: "float", Default: "180", Description: "Maximum range"}, @@ -403,18 +466,12 @@ func addAddressLookup() { if err != nil { return nil, err } - max, err := info.GetFloat64(m, "max") if err != nil { return nil, err } - - rangeOut, err := longitudeInRange(f, min, max) - if err != nil { - return nil, err - } - - return rangeOut, nil + return longitudeInRange(f, min, max) }, }) + } diff --git a/vendor/github.com/brianvoe/gofakeit/v7/airline.go b/vendor/github.com/brianvoe/gofakeit/v7/airline.go new file mode 100644 index 00000000..9446e497 --- /dev/null +++ b/vendor/github.com/brianvoe/gofakeit/v7/airline.go @@ -0,0 +1,176 @@ +package gofakeit + +import ( + "fmt" + "strings" +) + +// AirlineAircraftType will generate a random aircraft type +func AirlineAircraftType() string { return airlineAircraftType(GlobalFaker) } + +// AirlineAircraftType will generate a random aircraft type +func (f *Faker) AirlineAircraftType() string { return airlineAircraftType(f) } + +func airlineAircraftType(f *Faker) string { + return getRandValue(f, []string{"airline", "aircraft_type"}) +} + +// AirlineAirplane will generate a random airplane model +func AirlineAirplane() string { return airlineAirplane(GlobalFaker) } + +// AirlineAirplane will generate a random airplane model +func (f *Faker) AirlineAirplane() string { return airlineAirplane(f) } + +func airlineAirplane(f *Faker) string { + return getRandValue(f, []string{"airline", "airplane"}) +} + +// AirlineAirport will generate a random airport name +func AirlineAirport() string { return airlineAirport(GlobalFaker) } + +// AirlineAirport will generate a random airport name +func (f *Faker) AirlineAirport() string { return airlineAirport(f) } + +func airlineAirport(f *Faker) string { + return getRandValue(f, []string{"airline", "airport"}) +} + +// AirlineAirportIATA will generate a random airport IATA code +func AirlineAirportIATA() string { return airlineAirportIATA(GlobalFaker) } + +// AirlineAirportIATA will generate a random airport IATA code +func (f *Faker) AirlineAirportIATA() string { return airlineAirportIATA(f) } + +func airlineAirportIATA(f *Faker) string { + return getRandValue(f, []string{"airline", "iata"}) +} + +// AirlineFlightNumber will generate a random flight number +func AirlineFlightNumber() string { return airlineFlightNumber(GlobalFaker) } + +// AirlineFlightNumber will generate a random flight number +func (f *Faker) AirlineFlightNumber() string { return airlineFlightNumber(f) } + +func airlineFlightNumber(f *Faker) string { + // Generate a 2-letter airline code followed by 1-4 digit flight number + return fmt.Sprintf("%s%d", strings.ToUpper(letterN(f, 2)), f.Number(1, 9999)) +} + +// AirlineRecordLocator will generate a random record locator (booking reference) +func AirlineRecordLocator() string { return airlineRecordLocator(GlobalFaker) } + +// AirlineRecordLocator will generate a random record locator (booking reference) +func (f *Faker) AirlineRecordLocator() string { return airlineRecordLocator(f) } + +func airlineRecordLocator(f *Faker) string { + // Generate a 6-character uppercase alphanumeric record locator + return strings.ToUpper(letterN(f, 6)) +} + +// AirlineSeat will generate a random seat assignment +func AirlineSeat() string { return airlineSeat(GlobalFaker) } + +// AirlineSeat will generate a random seat assignment +func (f *Faker) AirlineSeat() string { return airlineSeat(f) } + +func airlineSeat(f *Faker) string { + // Generate seat like "12A", "23F", etc. + // Row: 1-60, Seat: A-K (excluding I) + row := f.Number(1, 60) + seats := []string{"A", "B", "C", "D", "E", "F", "G", "H", "J", "K"} + seat := seats[f.Number(0, len(seats)-1)] + return fmt.Sprintf("%d%s", row, seat) +} + +func addAirlineLookup() { + AddFuncLookup("airlineaircrafttype", Info{ + Display: "Airline Aircraft Type", + Category: "airline", + Description: "Distinct category that defines the particular model or series of an aircraft", + Example: "narrowbody", + Output: "string", + Aliases: []string{"aircraft category", "plane type", "airplane classification"}, + Keywords: []string{"airline", "aircraft", "type", "category", "narrowbody", "widebody", "regional", "plane", "airplane"}, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { + return airlineAircraftType(f), nil + }, + }) + + AddFuncLookup("airlineairplane", Info{ + Display: "Airline Airplane", + Category: "airline", + Description: "Specific model and manufacturer of an aircraft used for air travel", + Example: "Airbus A320", + Output: "string", + Aliases: []string{"aircraft model", "plane model", "airplane name"}, + Keywords: []string{"airline", "airplane", "aircraft", "model", "airbus", "boeing", "embraer", "bombardier", "manufacturer"}, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { + return airlineAirplane(f), nil + }, + }) + + AddFuncLookup("airlineairport", Info{ + Display: "Airline Airport", + Category: "airline", + Description: "Facility where aircraft take off and land, including terminals and runways", + Example: "Hartsfield-Jackson Atlanta International Airport", + Output: "string", + Aliases: []string{"airport name", "aerodrome", "airfield"}, + Keywords: []string{"airline", "airport", "facility", "terminal", "runway", "international", "travel", "aviation"}, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { + return airlineAirport(f), nil + }, + }) + + AddFuncLookup("airlineairportiata", Info{ + Display: "Airline Airport IATA", + Category: "airline", + Description: "Three-letter code assigned to airports by the International Air Transport Association", + Example: "ATL", + Output: "string", + Aliases: []string{"airport code", "iata code", "airport abbreviation"}, + Keywords: []string{"airline", "airport", "iata", "code", "three-letter", "abbreviation", "international", "aviation"}, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { + return airlineAirportIATA(f), nil + }, + }) + + AddFuncLookup("airlineflightnumber", Info{ + Display: "Airline Flight Number", + Category: "airline", + Description: "Unique identifier for a specific flight operated by an airline", + Example: "AA1234", + Output: "string", + Aliases: []string{"flight code", "flight identifier", "flight designation"}, + Keywords: []string{"airline", "flight", "number", "identifier", "code", "designation", "aviation", "travel"}, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { + return airlineFlightNumber(f), nil + }, + }) + + AddFuncLookup("airlinerecordlocator", Info{ + Display: "Airline Record Locator", + Category: "airline", + Description: "Unique alphanumeric code used to identify and retrieve a flight booking", + Example: "ABCDEF", + Output: "string", + Aliases: []string{"booking reference", "confirmation code", "reservation code", "pnr"}, + Keywords: []string{"airline", "record", "locator", "booking", "reference", "confirmation", "reservation", "code", "alphanumeric"}, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { + return airlineRecordLocator(f), nil + }, + }) + + AddFuncLookup("airlineseat", Info{ + Display: "Airline Seat", + Category: "airline", + Description: "Designated location within an aircraft assigned to a passenger", + Example: "12A", + Output: "string", + Aliases: []string{"seat assignment", "seat number", "seat location"}, + Keywords: []string{"airline", "seat", "assignment", "location", "passenger", "aircraft", "row", "position"}, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { + return airlineSeat(f), nil + }, + }) +} diff --git a/vendor/github.com/brianvoe/gofakeit/v7/animal.go b/vendor/github.com/brianvoe/gofakeit/v7/animal.go index b70438b6..91615077 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/animal.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/animal.go @@ -105,6 +105,8 @@ func addAnimalLookup() { Description: "Affectionate nickname given to a pet", Example: "Ozzy Pawsborne", Output: "string", + Aliases: []string{"pet nickname", "animal name", "companion name", "friendly name", "affectionate name"}, + Keywords: []string{"pet", "name", "nickname", "affectionate", "animal", "companion", "friendly", "cute", "funny", "playful", "loving", "adorable"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return petName(f), nil }, @@ -116,6 +118,8 @@ func addAnimalLookup() { Description: "Living creature with the ability to move, eat, and interact with its environment", Example: "elk", Output: "string", + Aliases: []string{"wild animal", "living creature", "wildlife species", "animal species", "creature name"}, + Keywords: []string{"creature", "living", "move", "eat", "environment", "wildlife", "species", "fauna", "beast", "organism", "vertebrate", "invertebrate"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return animal(f), nil }, @@ -127,6 +131,8 @@ func addAnimalLookup() { Description: "Type of animal, such as mammals, birds, reptiles, etc.", Example: "amphibians", Output: "string", + Aliases: []string{"animal classification", "species type", "taxonomic group", "animal category", "biological class"}, + Keywords: []string{"animal", "type", "mammals", "birds", "reptiles", "amphibians", "classification", "taxonomy", "phylum", "class", "order", "family", "genus", "species"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return animalType(f), nil }, @@ -138,6 +144,8 @@ func addAnimalLookup() { Description: "Animal name commonly found on a farm", Example: "Chicken", Output: "string", + Aliases: []string{"livestock animal", "barnyard animal", "agricultural animal", "domestic animal", "farm livestock"}, + Keywords: []string{"farm", "animal", "livestock", "domestic", "agriculture", "commonly", "cattle", "barnyard", "herd", "poultry", "swine", "sheep", "goat", "horse", "pig"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return farmAnimal(f), nil }, @@ -149,6 +157,8 @@ func addAnimalLookup() { Description: "Various breeds that define different cats", Example: "Chausie", Output: "string", + Aliases: []string{"cat breed", "feline breed", "domestic cat", "pet cat", "kitty breed"}, + Keywords: []string{"breed", "feline", "pet", "domestic", "various", "persian", "siamese", "maine", "coon", "tabby", "calico", "tuxedo", "kitten"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return cat(f), nil }, @@ -160,6 +170,8 @@ func addAnimalLookup() { Description: "Various breeds that define different dogs", Example: "Norwich Terrier", Output: "string", + Aliases: []string{"dog breed", "canine breed", "domestic dog", "pet dog", "fido breed"}, + Keywords: []string{"breed", "canine", "pet", "domestic", "various", "labrador", "retriever", "terrier", "shepherd", "bulldog", "poodle", "puppy", "hound"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return dog(f), nil }, @@ -171,6 +183,8 @@ func addAnimalLookup() { Description: "Distinct species of birds", Example: "goose", Output: "string", + Aliases: []string{"bird species", "avian species", "feathered animal", "winged creature", "bird type"}, + Keywords: []string{"species", "avian", "feather", "wing", "distinct", "sparrow", "eagle", "hawk", "owl", "duck", "goose", "parrot", "finch", "robin"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return bird(f), nil }, diff --git a/vendor/github.com/brianvoe/gofakeit/v7/app.go b/vendor/github.com/brianvoe/gofakeit/v7/app.go index 9c66cd5c..44c414f9 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/app.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/app.go @@ -67,6 +67,8 @@ func addAppLookup() { Description: "Software program designed for a specific purpose or task on a computer or mobile device", Example: "Parkrespond", Output: "string", + Aliases: []string{"software name", "application name", "mobile app name", "program title", "app title"}, + Keywords: []string{"app", "name", "software", "program", "application", "mobile", "device", "computer", "ios", "android", "desktop", "web", "platform", "title"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return appName(f), nil }, @@ -78,6 +80,8 @@ func addAppLookup() { Description: "Particular release of an application in Semantic Versioning format", Example: "1.12.14", Output: "string", + Aliases: []string{"semantic version", "app release", "software version", "version number", "release version"}, + Keywords: []string{"app", "version", "release", "semantic", "versioning", "application", "major", "minor", "patch", "build", "number", "format", "tag"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return appVersion(f), nil }, @@ -89,6 +93,8 @@ func addAppLookup() { Description: "Person or group creating and developing an application", Example: "Qado Energy, Inc.", Output: "string", + Aliases: []string{"app developer", "software author", "application creator", "program developer", "app creator"}, + Keywords: []string{"app", "author", "developer", "creator", "person", "company", "group", "creating", "programmer", "coder", "engineer", "team", "organization", "studio", "publisher"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return appAuthor(f), nil }, diff --git a/vendor/github.com/brianvoe/gofakeit/v7/auth.go b/vendor/github.com/brianvoe/gofakeit/v7/auth.go index 53afa3e5..e89fa751 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/auth.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/auth.go @@ -11,7 +11,13 @@ func (f *Faker) Username() string { } func username(f *Faker) string { - return getRandValue(f, []string{"person", "last"}) + replaceWithNumbers(f, "####") + username := getRandValue(f, []string{"auth", "username"}) + username, err := generate(f, username) + if err != nil { + return username // fallback to raw template if generation fails + } + + return username } // Password will generate a random password. @@ -32,55 +38,91 @@ func password(f *Faker, lower bool, upper bool, numeric bool, special bool, spac num = 5 } - // Setup weights - items := make([]any, 0) - weights := make([]float32, 0) - if lower { - items = append(items, "l") - weights = append(weights, 4) - } - if upper { - items = append(items, "u") - weights = append(weights, 4) - } - if numeric { - items = append(items, "n") - weights = append(weights, 3) - } - if special { - items = append(items, "e") - weights = append(weights, 2) - } - if space { - items = append(items, "a") - weights = append(weights, 1) + type charGroup struct { + chars string + weight int + isSpace bool } + defaultNonSpace := [...]charGroup{ + {chars: lowerStr, weight: 4}, + {chars: upperStr, weight: 4}, + {chars: numericStr, weight: 3}, + } + const defaultNonSpaceWeight = 4 + 4 + 3 + + var ( + activeBuf [5]charGroup + nonSpaceBuf [5]charGroup + active []charGroup + totalWeight int + nonSpace []charGroup + nonSpaceWeight int + ) + + appendGroup := func(enabled bool, chars string, weight int, isSpace bool) { + if !enabled { + return + } + active = append(active, charGroup{chars: chars, weight: weight, isSpace: isSpace}) + totalWeight += weight + if !isSpace { + nonSpace = append(nonSpace, charGroup{chars: chars, weight: weight}) + nonSpaceWeight += weight + } + } + + active = activeBuf[:0] + nonSpace = nonSpaceBuf[:0] + appendGroup(lower, lowerStr, 4, false) + appendGroup(upper, upperStr, 4, false) + appendGroup(numeric, numericStr, 3, false) + appendGroup(special, specialSafeStr, 2, false) + appendGroup(space, spaceStr, 1, true) + // If no items are selected then default to lower, upper, numeric - if len(items) == 0 { - items = append(items, "l", "u", "n") - weights = append(weights, 4, 4, 3) + if len(active) == 0 { + active = defaultNonSpace[:] + totalWeight = defaultNonSpaceWeight + nonSpace = active + nonSpaceWeight = totalWeight + } else if nonSpaceWeight == 0 { + // No non-space characters were added (only spaces); fall back to defaults. + nonSpace = defaultNonSpace[:] + nonSpaceWeight = defaultNonSpaceWeight + } + + draw := func(groups []charGroup, total int) byte { + if total <= 0 { + groups = defaultNonSpace[:] + total = defaultNonSpaceWeight + } + + r := f.IntN(total) + for _, g := range groups { + if r < g.weight { + return g.chars[f.IntN(len(g.chars))] + } + r -= g.weight + } + + // Should never be reached, but fall back to the last group just in case. + g := groups[len(groups)-1] + return g.chars[f.IntN(len(g.chars))] } // Create byte slice b := make([]byte, num) - for i := 0; i <= num-1; i++ { - // Run weighted - weight, _ := weighted(f, items, weights) + // Guarantee at least one character from each enabled non-space group so + // the password always satisfies the requested character-set criteria. + for i, g := range nonSpace { + b[i] = g.chars[f.IntN(len(g.chars))] + } - switch weight.(string) { - case "l": - b[i] = lowerStr[f.Int64()%int64(len(lowerStr))] - case "u": - b[i] = upperStr[f.Int64()%int64(len(upperStr))] - case "n": - b[i] = numericStr[f.Int64()%int64(len(numericStr))] - case "e": - b[i] = specialSafeStr[f.Int64()%int64(len(specialSafeStr))] - case "a": - b[i] = spaceStr[f.Int64()%int64(len(spaceStr))] - } + // Fill the remaining positions randomly from all active groups. + for i := len(nonSpace); i < num; i++ { + b[i] = draw(active, totalWeight) } // Shuffle bytes @@ -91,10 +133,10 @@ func password(f *Faker, lower bool, upper bool, numeric bool, special bool, spac // Replace first or last character if it's a space, and other options are available if b[0] == ' ' { - b[0] = password(f, lower, upper, numeric, special, false, 1)[0] + b[0] = draw(nonSpace, nonSpaceWeight) } if b[len(b)-1] == ' ' { - b[len(b)-1] = password(f, lower, upper, numeric, special, false, 1)[0] + b[len(b)-1] = draw(nonSpace, nonSpaceWeight) } return string(b) @@ -107,6 +149,18 @@ func addAuthLookup() { Description: "Unique identifier assigned to a user for accessing an account or system", Example: "Daniel1364", Output: "string", + Aliases: []string{ + "user name", + "login name", + "account username", + "account login", + "screen name", + "user handle", + }, + Keywords: []string{ + "login", "handle", "userid", "screenname", + "user", "account", "credential", "signin", "alias", "profile", "uid", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return username(f), nil }, @@ -118,6 +172,19 @@ func addAuthLookup() { Description: "Secret word or phrase used to authenticate access to a system or account", Example: "EEP+wwpk 4lU-eHNXlJZ4n K9%v&TZ9e", Output: "string", + Aliases: []string{ + "user password", + "account password", + "login password", + "secret phrase", + "auth secret", + }, + Keywords: []string{ + "passphrase", "pwd", "secret", + "credential", "authentication", "auth", + "security", "signin", "login", + "access", "key", "token", "hash", "encryption", + }, Params: []Param{ {Field: "lower", Display: "Lower", Type: "bool", Default: "true", Description: "Whether or not to add lower case characters"}, {Field: "upper", Display: "Upper", Type: "bool", Default: "true", Description: "Whether or not to add upper case characters"}, diff --git a/vendor/github.com/brianvoe/gofakeit/v7/beer.go b/vendor/github.com/brianvoe/gofakeit/v7/beer.go index a6bc493e..9b407d70 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/beer.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/beer.go @@ -123,6 +123,8 @@ func addBeerLookup() { Description: "Specific brand or variety of beer", Example: "Duvel", Output: "string", + Aliases: []string{"brand", "brewery", "label", "craft", "microbrew"}, + Keywords: []string{"beer", "name", "variety", "specific", "alcoholic", "beverage", "lager", "ale", "stout", "pilsner", "ipa"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return beerName(f), nil }, @@ -134,6 +136,8 @@ func addBeerLookup() { Description: "Distinct characteristics and flavors of beer", Example: "European Amber Lager", Output: "string", + Aliases: []string{"style", "type", "category", "classification", "variety"}, + Keywords: []string{"beer", "characteristics", "flavors", "distinct", "lager", "ale", "stout", "pilsner", "porter", "wheat", "amber"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return beerStyle(f), nil }, @@ -145,6 +149,8 @@ func addBeerLookup() { Description: "The flower used in brewing to add flavor, aroma, and bitterness to beer", Example: "Glacier", Output: "string", + Aliases: []string{"hop", "flower", "plant", "cone", "vine"}, + Keywords: []string{"beer", "brewing", "flavor", "aroma", "bitterness", "ingredient", "humulus", "lupulus", "cascade", "citra", "mosaic"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return beerHop(f), nil }, @@ -156,6 +162,8 @@ func addBeerLookup() { Description: "Microorganism used in brewing to ferment sugars, producing alcohol and carbonation in beer", Example: "1388 - Belgian Strong Ale", Output: "string", + Aliases: []string{"yeast", "fungus", "microorganism", "culture", "strain"}, + Keywords: []string{"beer", "brewing", "ferment", "sugars", "alcohol", "carbonation", "ingredient", "saccharomyces", "cerevisiae", "belgian", "ale"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return beerYeast(f), nil }, @@ -167,6 +175,8 @@ func addBeerLookup() { Description: "Processed barley or other grains, provides sugars for fermentation and flavor to beer", Example: "Munich", Output: "string", + Aliases: []string{"malt", "barley", "grain", "cereal", "kernel"}, + Keywords: []string{"beer", "sugars", "fermentation", "flavor", "processed", "ingredient", "munich", "pale", "crystal", "roasted", "wheat", "rye"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return beerMalt(f), nil }, @@ -178,6 +188,8 @@ func addBeerLookup() { Description: "Measures the alcohol content in beer", Example: "2.7%", Output: "string", + Aliases: []string{"alcohol", "abv", "strength", "proof", "percentage"}, + Keywords: []string{"beer", "content", "measure", "volume", "concentration", "level", "degree", "potency"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return beerAlcohol(f), nil }, @@ -189,6 +201,8 @@ func addBeerLookup() { Description: "Scale measuring bitterness of beer from hops", Example: "29 IBU", Output: "string", + Aliases: []string{"ibu", "bitterness", "scale", "units", "measurement"}, + Keywords: []string{"beer", "hops", "measuring", "international", "bittering", "alpha", "acid", "level", "intensity"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return beerIbu(f), nil }, @@ -200,6 +214,8 @@ func addBeerLookup() { Description: "Scale indicating the concentration of extract in worts", Example: "6.4°Blg", Output: "string", + Aliases: []string{"blg", "density", "gravity", "extract", "concentration"}, + Keywords: []string{"beer", "worts", "scale", "indicating", "balling", "plato", "sugar", "soluble", "solids", "degree"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return beerBlg(f), nil }, diff --git a/vendor/github.com/brianvoe/gofakeit/v7/book.go b/vendor/github.com/brianvoe/gofakeit/v7/book.go index fd8e0910..4125524a 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/book.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/book.go @@ -48,6 +48,8 @@ func addBookLookup() { }`, Output: "map[string]string", ContentType: "application/json", + Aliases: []string{"printed", "pages", "bound", "subjects", "stories", "literature", "text"}, + Keywords: []string{"written", "work", "consisting", "anna", "karenina", "toni", "morrison", "thriller"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return book(f), nil }, @@ -59,6 +61,8 @@ func addBookLookup() { Description: "The specific name given to a book", Example: "Hamlet", Output: "string", + Aliases: []string{"title", "name", "specific", "given", "heading"}, + Keywords: []string{"book", "identification", "hamlet", "naming", "designation", "label", "caption"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return bookTitle(f), nil }, @@ -70,6 +74,8 @@ func addBookLookup() { Description: "The individual who wrote or created the content of a book", Example: "Mark Twain", Output: "string", + Aliases: []string{"author", "writer", "creator", "individual", "content", "literary"}, + Keywords: []string{"book", "wrote", "created", "mark", "twain", "composer", "originator", "penned"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return bookAuthor(f), nil }, @@ -81,6 +87,8 @@ func addBookLookup() { Description: "Category or type of book defined by its content, style, or form", Example: "Adventure", Output: "string", + Aliases: []string{"type", "content", "style", "form", "literature", "classification"}, + Keywords: []string{"book", "category", "defined", "adventure", "fiction", "non-fiction", "mystery", "romance", "sci-fi"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return bookGenre(f), nil }, diff --git a/vendor/github.com/brianvoe/gofakeit/v7/car.go b/vendor/github.com/brianvoe/gofakeit/v7/car.go index def82dc4..6f8760db 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/car.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/car.go @@ -84,6 +84,8 @@ func addCarLookup() { }`, Output: "map[string]any", ContentType: "application/json", + Aliases: []string{"vehicle", "automobile", "transportation", "motor", "wheeled"}, + Keywords: []string{"used", "passenger", "mini", "gasoline", "automatic", "fiat", "freestyle", "fwd"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return car(f), nil }, @@ -95,6 +97,8 @@ func addCarLookup() { Description: "Classification of cars based on size, use, or body style", Example: "Passenger car mini", Output: "string", + Aliases: []string{"classification", "size", "body", "style", "vehicle", "category"}, + Keywords: []string{"car", "based", "passenger", "mini", "suv", "sedan", "hatchback", "convertible", "coupe"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return carType(f), nil }, @@ -106,6 +110,8 @@ func addCarLookup() { Description: "Type of energy source a car uses", Example: "CNG", Output: "string", + Aliases: []string{"energy", "source", "power", "vehicle"}, + Keywords: []string{"car", "fuel", "uses", "cng", "gasoline", "diesel", "electric", "hybrid", "hydrogen", "ethanol"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return carFuelType(f), nil }, @@ -117,6 +123,8 @@ func addCarLookup() { Description: "Mechanism a car uses to transmit power from the engine to the wheels", Example: "Manual", Output: "string", + Aliases: []string{"mechanism", "power", "engine", "wheels", "vehicle"}, + Keywords: []string{"car", "transmission", "transmit", "manual", "automatic", "cvt", "semi-automatic", "gearbox", "clutch"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return carTransmissionType(f), nil }, @@ -128,6 +136,8 @@ func addCarLookup() { Description: "Company or brand that manufactures and designs cars", Example: "Nissan", Output: "string", + Aliases: []string{"company", "brand", "manufacturer", "designer", "vehicle", "producer"}, + Keywords: []string{"car", "maker", "manufactures", "nissan", "toyota", "honda", "ford", "bmw", "mercedes", "audi"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return carMaker(f), nil }, @@ -139,6 +149,19 @@ func addCarLookup() { Description: "Specific design or version of a car produced by a manufacturer", Example: "Aveo", Output: "string", + Aliases: []string{ + "vehicle model", + "auto model", + "car type", + "car version", + "automobile model", + }, + Keywords: []string{ + "car", "model", "vehicle", "auto", "automobile", + "type", "edition", "variant", "series", + "sedan", "suv", "hatchback", "coupe", "convertible", + "civic", "camry", "accord", "corolla", "mustang", "prius", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return carModel(f), nil }, diff --git a/vendor/github.com/brianvoe/gofakeit/v7/celebrity.go b/vendor/github.com/brianvoe/gofakeit/v7/celebrity.go index b00036d8..74b525ab 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/celebrity.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/celebrity.go @@ -33,6 +33,8 @@ func addCelebrityLookup() { Description: "Famous person known for acting in films, television, or theater", Example: "Brad Pitt", Output: "string", + Aliases: []string{"actor", "famous", "films", "television", "theater", "entertainment"}, + Keywords: []string{"celebrity", "known", "brad", "pitt", "hollywood", "movie", "star", "performer", "artist"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return celebrityActor(f), nil }, @@ -44,6 +46,8 @@ func addCelebrityLookup() { Description: "High-profile individual known for significant achievements in business or entrepreneurship", Example: "Elon Musk", Output: "string", + Aliases: []string{"business", "entrepreneur", "high-profile", "achievements", "executive"}, + Keywords: []string{"celebrity", "significant", "elon", "musk", "ceo", "founder", "investor", "tycoon", "magnate"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return celebrityBusiness(f), nil }, @@ -55,6 +59,8 @@ func addCelebrityLookup() { Description: "Famous athlete known for achievements in a particular sport", Example: "Michael Phelps", Output: "string", + Aliases: []string{"athlete", "famous", "achievements", "competition", "athletic", "player"}, + Keywords: []string{"celebrity", "particular", "michael", "phelps", "olympics", "champion", "medalist", "record", "holder"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return celebritySport(f), nil }, diff --git a/vendor/github.com/brianvoe/gofakeit/v7/color.go b/vendor/github.com/brianvoe/gofakeit/v7/color.go index fd8d2568..ca817806 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/color.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/color.go @@ -63,6 +63,8 @@ func addColorLookup() { Description: "Hue seen by the eye, returns the name of the color like red or blue", Example: "MediumOrchid", Output: "string", + Aliases: []string{"color name", "hue name", "visual color", "shade name", "color label"}, + Keywords: []string{"hue", "chroma", "shade", "tone", "css", "name", "visual", "appearance", "pigment", "spectrum", "palette", "tint", "saturation", "brightness", "rgb", "hex"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return color(f), nil }, @@ -71,10 +73,12 @@ func addColorLookup() { AddFuncLookup("nicecolors", Info{ Display: "Nice Colors", Category: "color", - Description: "Attractive and appealing combinations of colors, returns an list of color hex codes", + Description: "Attractive and appealing combinations of colors, returns a list of color hex codes", Example: `["#cfffdd","#b4dec1","#5c5863","#a85163","#ff1f4c"]`, Output: "[]string", ContentType: "application/json", + Aliases: []string{"color palette", "nice palette", "harmonious colors", "aesthetic palette", "design colors"}, + Keywords: []string{"nice", "colors", "palette", "array", "hex", "design", "aesthetic", "beautiful", "harmonious", "scheme", "ui", "ux"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return niceColors(f), nil }, @@ -86,6 +90,8 @@ func addColorLookup() { Description: "Colors displayed consistently on different web browsers and devices", Example: "black", Output: "string", + Aliases: []string{"web safe color", "browser safe", "cross platform color", "universal color", "standard color"}, + Keywords: []string{"safe", "color", "cross-platform", "css", "html", "compatible", "browser", "device", "universal", "stable", "standard"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return safeColor(f), nil }, @@ -94,9 +100,11 @@ func addColorLookup() { AddFuncLookup("hexcolor", Info{ Display: "Hex Color", Category: "color", - Description: "Six-digit code representing a color in the color model", + Description: "Six-digit hexadecimal code representing a color in the RGB color model", Example: "#a99fb4", Output: "string", + Aliases: []string{"hex color code", "css hex", "html hex", "web hex", "hexadecimal color"}, + Keywords: []string{"hex", "hexcolor", "color", "rgb", "six-digit", "web", "css", "html", "design", "hexadecimal", "hash", "code"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return hexColor(f), nil }, @@ -109,8 +117,11 @@ func addColorLookup() { Example: "[85, 224, 195]", Output: "[]int", ContentType: "application/json", + Aliases: []string{"rgb triplet", "rgb array", "rgb value", "red green blue", "rgb color code"}, + Keywords: []string{"rgb", "color", "red", "green", "blue", "triplet", "digital", "screen", "display", "primary", "additive", "value", "css"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return rgbColor(f), nil }, }) + } diff --git a/vendor/github.com/brianvoe/gofakeit/v7/company.go b/vendor/github.com/brianvoe/gofakeit/v7/company.go index 64167295..fc628f63 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/company.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/company.go @@ -117,6 +117,18 @@ func addCompanyLookup() { Description: "Designated official name of a business or organization", Example: "Moen, Pagac and Wuckert", Output: "string", + Aliases: []string{ + "business name", + "company name", + "organization name", + "corporate name", + "legal entity", + }, + Keywords: []string{ + "business", "organization", "corporation", + "enterprise", "firm", "entity", "brand", + "employer", "vendor", "partner", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return company(f), nil }, @@ -128,6 +140,19 @@ func addCompanyLookup() { Description: "Suffix at the end of a company name, indicating business structure, like 'Inc.' or 'LLC'", Example: "Inc", Output: "string", + Aliases: []string{ + "business suffix", + "legal suffix", + "company ending", + "corporate suffix", + "entity suffix", + }, + Keywords: []string{ + "suffix", "ending", "company", "business", "entity", + "inc", "incorporated", "llc", "ltd", "limited", + "corp", "corporation", "plc", "gmbh", "sarl", + "legal", "structure", "designation", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return companySuffix(f), nil }, @@ -139,6 +164,18 @@ func addCompanyLookup() { Description: "Random bs company word", Example: "front-end", Output: "string", + Aliases: []string{ + "business jargon", + "corporate jargon", + "marketing buzzword", + "tech buzzword", + "consulting speak", + }, + Keywords: []string{ + "jargon", "buzzwords", "synergy", "leverage", + "disrupt", "innovate", "scalable", "agile", "optimize", + "pipeline", "roadmap", "vision", "strategy", "corporate", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return bs(f), nil }, @@ -150,6 +187,19 @@ func addCompanyLookup() { Description: "Brief description or summary of a company's purpose, products, or services", Example: "word", Output: "string", + Aliases: []string{ + "company blurb", + "company summary", + "company description", + "short overview", + "about text", + }, + Keywords: []string{ + "summary", "overview", "description", + "company", "profile", "about", "intro", + "purpose", "mission", "vision", "statement", + "services", "products", "offerings", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return blurb(f), nil }, @@ -161,6 +211,19 @@ func addCompanyLookup() { Description: "Trendy or overused term often used in business to sound impressive", Example: "disintermediate", Output: "string", + Aliases: []string{ + "business buzzword", + "corporate buzzword", + "trendy term", + "catchphrase", + "marketing phrase", + }, + Keywords: []string{ + "jargon", "hype", "trend", "phrase", + "term", "corporate", "management", "marketing", + "innovation", "paradigm", "disruptive", "visionary", + "fashionable", "impressive", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return buzzWord(f), nil }, @@ -169,7 +232,7 @@ func addCompanyLookup() { AddFuncLookup("job", Info{ Display: "Job", Category: "company", - Description: "Position or role in employment, involving specific tasks and responsibilities", + Description: "Occupation or role in employment, involving specific tasks and responsibilities", Example: `{ "company": "ClearHealthCosts", "title": "Agent", @@ -178,6 +241,22 @@ func addCompanyLookup() { }`, Output: "map[string]string", ContentType: "application/json", + Aliases: []string{ + "job role", + "job position", + "employment role", + "work role", + "career role", + "occupation", + "occupation role", + "occupation job", + }, + Keywords: []string{ + "role", "position", "employment", "work", + "career", "profession", "title", + "responsibilities", "tasks", "duties", + "staff", "employee", "hiring", "positioning", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return job(f), nil }, @@ -189,6 +268,18 @@ func addCompanyLookup() { Description: "Specific title for a position or role within a company or organization", Example: "Director", Output: "string", + Aliases: []string{ + "job designation", + "position title", + "role title", + "employment title", + "official title", + }, + Keywords: []string{ + "job", "title", "designation", "position", "role", + "occupation", "profession", "career", + "company", "organization", "staff", "employee", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return jobTitle(f), nil }, @@ -200,6 +291,18 @@ func addCompanyLookup() { Description: "Word used to describe the duties, requirements, and nature of a job", Example: "Central", Output: "string", + Aliases: []string{ + "job modifier", + "job adjective", + "role descriptor", + "title descriptor", + "position descriptor", + }, + Keywords: []string{ + "descriptor", "modifier", "adjective", "qualifier", + "job", "role", "title", "position", + "label", "term", "descriptive", "characterization", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return jobDescriptor(f), nil }, @@ -211,6 +314,20 @@ func addCompanyLookup() { Description: "Random job level", Example: "Assurance", Output: "string", + Aliases: []string{ + "seniority level", + "career level", + "position level", + "role level", + "job grade", + "job band", + }, + Keywords: []string{ + "level", "seniority", "rank", "tier", "grade", "band", + "entry", "junior", "associate", "mid", "senior", + "lead", "staff", "principal", "manager", "director", + "executive", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return jobLevel(f), nil }, @@ -222,6 +339,19 @@ func addCompanyLookup() { Description: "Catchphrase or motto used by a company to represent its brand or values", Example: "Universal seamless Focus, interactive.", Output: "string", + Aliases: []string{ + "company slogan", + "brand slogan", + "brand tagline", + "company motto", + "advertising slogan", + }, + Keywords: []string{ + "tagline", "motto", "catchphrase", + "brand", "company", "marketing", "advertising", + "identity", "values", "mission", "vision", + "strapline", "promo", "campaign", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return slogan(f), nil }, diff --git a/vendor/github.com/brianvoe/gofakeit/v7/csv.go b/vendor/github.com/brianvoe/gofakeit/v7/csv.go index 7f31ec2f..dff0df90 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/csv.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/csv.go @@ -46,7 +46,7 @@ func csvFunc(f *Faker, co *CSVOptions) ([]byte, error) { } // Check fields - if co.Fields == nil || len(co.Fields) <= 0 { + if len(co.Fields) <= 0 { return nil, errors.New("must pass fields in order to build json object(s)") } @@ -134,6 +134,12 @@ func addFileCSVLookup() { 2,Osborne,Hilll,XPJ9OVNbs5lm`, Output: "[]byte", ContentType: "text/csv", + Aliases: []string{ + "comma separated", "csv file", "data table", "flat file", "spreadsheet format", "tabular data", + }, + Keywords: []string{ + "comma", "separated", "values", "format", "data", "spreadsheet", "entries", "rows", "columns", "dataset", "records", + }, Params: []Param{ {Field: "delimiter", Display: "Delimiter", Type: "string", Default: ",", Description: "Separator in between row values"}, {Field: "rowcount", Display: "Row Count", Type: "int", Default: "100", Description: "Number of rows"}, diff --git a/vendor/github.com/brianvoe/gofakeit/v7/data/address.go b/vendor/github.com/brianvoe/gofakeit/v7/data/address.go index 98d88e82..b8861109 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/data/address.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/data/address.go @@ -6,6 +6,7 @@ var Address = map[string][]string{ "street_prefix": {"North", "East", "West", "South", "New", "Lake", "Port"}, "street_name": {"Alley", "Avenue", "Branch", "Bridge", "Brook", "Brooks", "Burg", "Burgs", "Bypass", "Camp", "Canyon", "Cape", "Causeway", "Center", "Centers", "Circle", "Circles", "Cliff", "Cliffs", "Club", "Common", "Corner", "Corners", "Course", "Court", "Courts", "Cove", "Coves", "Creek", "Crescent", "Crest", "Crossing", "Crossroad", "Curve", "Dale", "Dam", "Divide", "Drive", "Drive", "Drives", "Estate", "Estates", "Expressway", "Extension", "Extensions", "Fall", "Falls", "Ferry", "Field", "Fields", "Flat", "Flats", "Ford", "Fords", "Forest", "Forge", "Forges", "Fork", "Forks", "Fort", "Freeway", "Garden", "Gardens", "Gateway", "Glen", "Glens", "Green", "Greens", "Grove", "Groves", "Harbor", "Harbors", "Haven", "Heights", "Highway", "Hill", "Hills", "Hollow", "Inlet", "Inlet", "Island", "Island", "Islands", "Islands", "Isle", "Isle", "Junction", "Junctions", "Key", "Keys", "Knoll", "Knolls", "Lake", "Lakes", "Land", "Landing", "Lane", "Light", "Lights", "Loaf", "Lock", "Locks", "Locks", "Lodge", "Lodge", "Loop", "Mall", "Manor", "Manors", "Meadow", "Meadows", "Mews", "Mill", "Mills", "Mission", "Mission", "Motorway", "Mount", "Mountain", "Mountain", "Mountains", "Mountains", "Neck", "Orchard", "Oval", "Overpass", "Park", "Parks", "Parkway", "Parkways", "Pass", "Passage", "Path", "Pike", "Pine", "Pines", "Place", "Plain", "Plains", "Plains", "Plaza", "Plaza", "Point", "Points", "Port", "Port", "Ports", "Ports", "Prairie", "Prairie", "Radial", "Ramp", "Ranch", "Rapid", "Rapids", "Rest", "Ridge", "Ridges", "River", "Road", "Road", "Roads", "Roads", "Route", "Row", "Rue", "Run", "Shoal", "Shoals", "Shore", "Shores", "Skyway", "Spring", "Springs", "Springs", "Spur", "Spurs", "Square", "Square", "Squares", "Squares", "Station", "Station", "Stravenue", "Stravenue", "Stream", "Stream", "Street", "Street", "Streets", "Summit", "Summit", "Terrace", "Throughway", "Trace", "Track", "Trafficway", "Trail", "Trail", "Tunnel", "Tunnel", "Turnpike", "Turnpike", "Underpass", "Union", "Unions", "Valley", "Valleys", "Via", "Viaduct", "View", "Views", "Village", "Village", "Villages", "Ville", "Vista", "Vista", "Walk", "Walks", "Wall", "Way", "Ways", "Well", "Wells"}, "street_suffix": {"town", "ton", "land", "ville", "berg", "burgh", "borough", "bury", "view", "port", "mouth", "stad", "furt", "chester", "mouth", "fort", "haven", "side", "shire"}, + "unit": {"Apt", "Apartment", "Suite", "Ste", "Unit", "Floor", "Fl", "Room", "Rm", "Office", "Ofc", "Studio", "Loft", "Penthouse", "Ph"}, "city": {"New York City", "Los Angeles", "Chicago", "Houston", "Philadelphia", "Phoenix", "San Antonio", "San Diego", "Dallas", "San Jose", "Austin", "Jacksonville", "Indianapolis", "San Francisco", "Columbus", "Fort Worth", "Charlotte", "Detroit", "El Paso", "Memphis", "Boston", "Seattle", "Denver", "Washington", "Nashville-Davidson", "Baltimore", "Louisville/Jefferson", "Portland", "Oklahoma", "Milwaukee", "Las Vegas", "Albuquerque", "Tucson", "Fresno", "Sacramento", "Long Beach", "Kansas", "Mesa", "Virginia Beach", "Atlanta", "Colorado Springs", "Raleigh", "Omaha", "Miami", "Oakland", "Tulsa", "Minneapolis", "Cleveland", "Wichita", "Arlington", "New Orleans", "Bakersfield", "Tampa", "Honolulu", "Anaheim", "Aurora", "Santa Ana", "St. Louis", "Riverside", "Corpus Christi", "Pittsburgh", "Lexington-Fayette", "Stockton", "Cincinnati", "St. Paul", "Toledo", "Newark", "Greensboro", "Plano", "Henderson", "Lincoln", "Buffalo", "Fort Wayne", "Jersey", "Chula Vista", "Orlando", "St. Petersburg", "Norfolk", "Chandler", "Laredo", "Madison", "Durham", "Lubbock", "Winston-Salem", "Garland", "Glendale", "Hialeah", "Reno", "Baton Rouge", "Irvine", "Chesapeake", "Irving", "Scottsdale", "North Las Vegas", "Fremont", "San Bernardino", "Boise", "Birmingham"}, "state": {"Alabama", "Alaska", "Arizona", "Arkansas", "California", "Colorado", "Connecticut", "Delaware", "Florida", "Georgia", "Hawaii", "Idaho", "Illinois", "Indiana", "Iowa", "Kansas", "Kentucky", "Louisiana", "Maine", "Maryland", "Massachusetts", "Michigan", "Minnesota", "Mississippi", "Missouri", "Montana", "Nebraska", "Nevada", "New Hampshire", "New Jersey", "New Mexico", "New York", "North Carolina", "North Dakota", "Ohio", "Oklahoma", "Oregon", "Pennsylvania", "Rhode Island", "South Carolina", "South Dakota", "Tennessee", "Texas", "Utah", "Vermont", "Virginia", "Washington", "West Virginia", "Wisconsin", "Wyoming"}, "state_abr": {"AL", "AK", "AZ", "AR", "CA", "CO", "CT", "DE", "FL", "GA", "HI", "ID", "IL", "IN", "IA", "KS", "KY", "LA", "ME", "MD", "MA", "MI", "MN", "MS", "MO", "MT", "NE", "NV", "NH", "NJ", "NM", "NY", "NC", "ND", "OH", "OK", "OR", "PA", "RI", "SC", "SD", "TN", "TX", "UT", "VT", "VA", "WA", "WV", "WI", "WY", "AE", "AA", "AP"}, diff --git a/vendor/github.com/brianvoe/gofakeit/v7/data/airline.go b/vendor/github.com/brianvoe/gofakeit/v7/data/airline.go new file mode 100644 index 00000000..1b21ee4d --- /dev/null +++ b/vendor/github.com/brianvoe/gofakeit/v7/data/airline.go @@ -0,0 +1,120 @@ +package data + +// Airline consists of various airline information +var Airline = map[string][]string{ + "aircraft_type": { + "narrowbody", + "widebody", + "regional", + }, + "airplane": { + "Airbus A220", "Airbus A319", "Airbus A320", "Airbus A321", "Airbus A330", + "Airbus A340", "Airbus A350", "Airbus A380", + "Boeing 717", "Boeing 737", "Boeing 747", "Boeing 757", "Boeing 767", + "Boeing 777", "Boeing 787", + "Embraer E170", "Embraer E175", "Embraer E190", "Embraer E195", + "Bombardier CRJ200", "Bombardier CRJ700", "Bombardier CRJ900", "Bombardier CRJ1000", + "ATR 42", "ATR 72", + "De Havilland Dash 8", + "McDonnell Douglas MD-80", "McDonnell Douglas MD-90", + }, + "airport": { + "Hartsfield-Jackson Atlanta International Airport", + "Los Angeles International Airport", + "O'Hare International Airport", + "Dallas/Fort Worth International Airport", + "Denver International Airport", + "John F. Kennedy International Airport", + "San Francisco International Airport", + "Seattle-Tacoma International Airport", + "McCarran International Airport", + "Orlando International Airport", + "Miami International Airport", + "Charlotte Douglas International Airport", + "Newark Liberty International Airport", + "Phoenix Sky Harbor International Airport", + "George Bush Intercontinental Airport", + "Minneapolis-St Paul International Airport", + "Detroit Metropolitan Airport", + "Boston Logan International Airport", + "Fort Lauderdale-Hollywood International Airport", + "Philadelphia International Airport", + "LaGuardia Airport", + "Baltimore/Washington International Airport", + "Salt Lake City International Airport", + "Ronald Reagan Washington National Airport", + "Midway International Airport", + "San Diego International Airport", + "Tampa International Airport", + "Portland International Airport", + "Honolulu International Airport", + "Austin-Bergstrom International Airport", + "Nashville International Airport", + "Raleigh-Durham International Airport", + "Sacramento International Airport", + "Kansas City International Airport", + "Cleveland Hopkins International Airport", + "Indianapolis International Airport", + "San Jose International Airport", + "Cincinnati/Northern Kentucky International Airport", + "Pittsburgh International Airport", + "St. Louis Lambert International Airport", + "Heathrow Airport", + "Charles de Gaulle Airport", + "Dubai International Airport", + "Tokyo Haneda Airport", + "Los Angeles International Airport", + "Frankfurt Airport", + "Istanbul Airport", + "Guangzhou Baiyun International Airport", + "Amsterdam Airport Schiphol", + "Hong Kong International Airport", + "Singapore Changi Airport", + "Incheon International Airport", + "Shanghai Pudong International Airport", + "Munich Airport", + "Barcelona-El Prat Airport", + "Sydney Kingsford Smith Airport", + "Toronto Pearson International Airport", + "Vancouver International Airport", + "Zurich Airport", + "Vienna International Airport", + "Brussels Airport", + "Copenhagen Airport", + "Stockholm Arlanda Airport", + "Oslo Airport", + "Helsinki-Vantaa Airport", + "Athens International Airport", + "Rome Fiumicino Airport", + "Milan Malpensa Airport", + "Madrid-Barajas Airport", + "Lisbon Portela Airport", + "Dublin Airport", + "Manchester Airport", + "Edinburgh Airport", + "Melbourne Airport", + "Brisbane Airport", + "Auckland Airport", + "Johannesburg O.R. Tambo International Airport", + "Cairo International Airport", + "Nairobi Jomo Kenyatta International Airport", + "São Paulo-Guarulhos International Airport", + "Mexico City International Airport", + "Buenos Aires Ezeiza International Airport", + "Santiago International Airport", + "Lima Jorge Chávez International Airport", + "Bogotá El Dorado International Airport", + "Panama City Tocumen International Airport", + }, + "iata": { + "ATL", "LAX", "ORD", "DFW", "DEN", "JFK", "SFO", "SEA", "LAS", "MCO", + "MIA", "CLT", "EWR", "PHX", "IAH", "MSP", "DTW", "BOS", "FLL", "PHL", + "LGA", "BWI", "SLC", "DCA", "MDW", "SAN", "TPA", "PDX", "HNL", "AUS", + "BNA", "RDU", "SMF", "MCI", "CLE", "IND", "SJC", "CVG", "PIT", "STL", + "LHR", "CDG", "DXB", "HND", "FRA", "IST", "CAN", "AMS", "HKG", "SIN", + "ICN", "PVG", "MUC", "BCN", "SYD", "YYZ", "YVR", "ZRH", "VIE", "BRU", + "CPH", "ARN", "OSL", "HEL", "ATH", "FCO", "MXP", "MAD", "LIS", "DUB", + "MAN", "EDI", "MEL", "BNE", "AKL", "JNB", "CAI", "NBO", "GRU", "MEX", + "EZE", "SCL", "LIM", "BOG", "PTY", + }, +} diff --git a/vendor/github.com/brianvoe/gofakeit/v7/data/auth.go b/vendor/github.com/brianvoe/gofakeit/v7/data/auth.go new file mode 100644 index 00000000..2a3bcdde --- /dev/null +++ b/vendor/github.com/brianvoe/gofakeit/v7/data/auth.go @@ -0,0 +1,35 @@ +package data + +// Aut +var Auth = map[string][]string{ + "username": { + "{firstname}{number:0,9999}", + "{lastname}{number:0,9999}", + "{firstname}.{lastname}", + "{firstname}_{lastname}", + "{adjective}{animal}", + "{color}{animal}", + "{firstname}{lastname}", + "the{lastname}", + "mr{lastname}", + "ms{lastname}", + "dr{lastname}", + "{petname}{number:0,999}", + "{noun}{number:0,999}", + "{adjective}_{noun}", + "{color}_{noun}", + "{animal}_{number:10,99}", + "{animal}.{number:10,99}", + "{gamertag}", + "{gamertag}{number:0,99}", + "{firstname}{adjective}", + "{adjective}{firstname}", + "{firstname}{color}", + "{city}{number:0,99}", + "{programminglanguage}{number:0,999}", + "{jobtitle}{number:0,999}", + "{firstname}###", + "{lastname}_???", + "{firstname}.{number:10,99}", + }, +} diff --git a/vendor/github.com/brianvoe/gofakeit/v7/data/bank.go b/vendor/github.com/brianvoe/gofakeit/v7/data/bank.go new file mode 100644 index 00000000..47e81b26 --- /dev/null +++ b/vendor/github.com/brianvoe/gofakeit/v7/data/bank.go @@ -0,0 +1,67 @@ +package data + +var Bank = map[string][]string{ + "name": { + "Agricultural Bank of China", + "BNP Paribas", + "Banco Bilbao Vizcaya Argentaria", + "Banco Santander", + "Bank of America", + "Bank of China", + "Bank of Communications", + "Barclays", + "Capital One Financial Corporation", + "China Citic Bank", + "China Construction Bank Corporation", + "China Everbright Bank", + "China Merchants Bank", + "China Minsheng Bank", + "Citigroup", + "Commonwealth Bank Group", + "Credit Agricole Group", + "Credit Mutuel", + "Deutsche Bank", + "Goldman Sachs", + "Groupe BPCE", + "HDFC Bank", + "HSBC Holdings", + "Hua Xia Bank", + "ING Group", + "Industrial Bank", + "Industrial and Commercial Bank of China", + "Intesa Sanpaolo", + "JP Morgan Chase & Co", + "Lloyds Banking Group", + "Mitsubishi UFJ Financial Group", + "Mizuho Financial Group", + "Morgan Stanley", + "PNC Financial Services Group", + "Ping An Bank", + "Postal Savings Bank of China", + "Rabobank Group", + "Royal Bank of Canada", + "Sberbank", + "Scotiabank", + "Shanghai Pudong Development Bank", + "Societe Generale", + "State Bank of India", + "Sumitomo Mitsui Financial Group", + "Toronto Dominion Bank", + "Truist Bank", + "UBS", + "US Bancorp", + "UniCredit", + "Wells Fargo & Co", + }, + "type": { + "Central Bank", + "Commercial Bank", + "Cooperative Bank", + "Investment Bank", + "Online Bank", + "Policy Bank", + "Private Bank", + "Retail Bank", + "Savings Bank", + }, +} diff --git a/vendor/github.com/brianvoe/gofakeit/v7/data/currency.go b/vendor/github.com/brianvoe/gofakeit/v7/data/currency.go index 13b80199..792fc710 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/data/currency.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/data/currency.go @@ -2,6 +2,6 @@ package data // Currency consists of currency information var Currency = map[string][]string{ - "short": {"AED", "AFN", "ALL", "AMD", "ANG", "AOA", "ARS", "AUD", "AWG", "AZN", "BAM", "BBD", "BDT", "BGN", "BHD", "BIF", "BMD", "BND", "BOB", "BRL", "BSD", "BTN", "BWP", "BYR", "BZD", "CAD", "CDF", "CHF", "CLP", "CNY", "COP", "CRC", "CUC", "CUP", "CVE", "CZK", "DJF", "DKK", "DOP", "DZD", "EGP", "ERN", "ETB", "EUR", "FJD", "FKP", "GBP", "GEL", "GGP", "GHS", "GIP", "GMD", "GNF", "GTQ", "GYD", "HKD", "HNL", "HRK", "HTG", "HUF", "IDR", "ILS", "IMP", "INR", "IQD", "IRR", "ISK", "JEP", "JMD", "JOD", "JPY", "KES", "KGS", "KHR", "KMF", "KPW", "KRW", "KWD", "KYD", "KZT", "LAK", "LBP", "LKR", "LRD", "LSL", "LTL", "LYD", "MAD", "MDL", "MGA", "MKD", "MMK", "MNT", "MOP", "MRO", "MUR", "MVR", "MWK", "MXN", "MYR", "MZN", "NAD", "NGN", "NIO", "NOK", "NPR", "NZD", "OMR", "PAB", "PEN", "PGK", "PHP", "PKR", "PLN", "PYG", "QAR", "RON", "RSD", "RUB", "RWF", "SAR", "SBD", "SCR", "SDG", "SEK", "SGD", "SHP", "SLL", "SOS", "SPL", "SRD", "STD", "SVC", "SYP", "SZL", "THB", "TJS", "TMT", "TND", "TOP", "TRY", "TTD", "TVD", "TWD", "TZS", "UAH", "UGX", "USD", "UYU", "UZS", "VEF", "VND", "VUV", "WST", "XAF", "XCD", "XDR", "XOF", "XPF", "YER", "ZAR", "ZMW", "ZWD"}, - "long": {"United Arab Emirates Dirham", "Afghanistan Afghani", "Albania Lek", "Armenia Dram", "Netherlands Antilles Guilder", "Angola Kwanza", "Argentina Peso", "Australia Dollar", "Aruba Guilder", "Azerbaijan New Manat", "Bosnia and Herzegovina Convertible Marka", "Barbados Dollar", "Bangladesh Taka", "Bulgaria Lev", "Bahrain Dinar", "Burundi Franc", "Bermuda Dollar", "Brunei Darussalam Dollar", "Bolivia Boliviano", "Brazil Real", "Bahamas Dollar", "Bhutan Ngultrum", "Botswana Pula", "Belarus Ruble", "Belize Dollar", "Canada Dollar", "Congo/Kinshasa Franc", "Switzerland Franc", "Chile Peso", "China Yuan Renminbi", "Colombia Peso", "Costa Rica Colon", "Cuba Convertible Peso", "Cuba Peso", "Cape Verde Escudo", "Czech Republic Koruna", "Djibouti Franc", "Denmark Krone", "Dominican Republic Peso", "Algeria Dinar", "Egypt Pound", "Eritrea Nakfa", "Ethiopia Birr", "Euro Member Countries", "Fiji Dollar", "Falkland Islands (Malvinas) Pound", "United Kingdom Pound", "Georgia Lari", "Guernsey Pound", "Ghana Cedi", "Gibraltar Pound", "Gambia Dalasi", "Guinea Franc", "Guatemala Quetzal", "Guyana Dollar", "Hong Kong Dollar", "Honduras Lempira", "Croatia Kuna", "Haiti Gourde", "Hungary Forint", "Indonesia Rupiah", "Israel Shekel", "Isle of Man Pound", "India Rupee", "Iraq Dinar", "Iran Rial", "Iceland Krona", "Jersey Pound", "Jamaica Dollar", "Jordan Dinar", "Japan Yen", "Kenya Shilling", "Kyrgyzstan Som", "Cambodia Riel", "Comoros Franc", "Korea (North) Won", "Korea (South) Won", "Kuwait Dinar", "Cayman Islands Dollar", "Kazakhstan Tenge", "Laos Kip", "Lebanon Pound", "Sri Lanka Rupee", "Liberia Dollar", "Lesotho Loti", "Lithuania Litas", "Libya Dinar", "Morocco Dirham", "Moldova Leu", "Madagascar Ariary", "Macedonia Denar", "Myanmar (Burma) Kyat", "Mongolia Tughrik", "Macau Pataca", "Mauritania Ouguiya", "Mauritius Rupee", "Maldives (Maldive Islands) Rufiyaa", "Malawi Kwacha", "Mexico Peso", "Malaysia Ringgit", "Mozambique Metical", "Namibia Dollar", "Nigeria Naira", "Nicaragua Cordoba", "Norway Krone", "Nepal Rupee", "New Zealand Dollar", "Oman Rial", "Panama Balboa", "Peru Nuevo Sol", "Papua New Guinea Kina", "Philippines Peso", "Pakistan Rupee", "Poland Zloty", "Paraguay Guarani", "Qatar Riyal", "Romania New Leu", "Serbia Dinar", "Russia Ruble", "Rwanda Franc", "Saudi Arabia Riyal", "Solomon Islands Dollar", "Seychelles Rupee", "Sudan Pound", "Sweden Krona", "Singapore Dollar", "Saint Helena Pound", "Sierra Leone Leone", "Somalia Shilling", "Seborga Luigino", "Suriname Dollar", "São Tomé and Príncipe Dobra", "El Salvador Colon", "Syria Pound", "Swaziland Lilangeni", "Thailand Baht", "Tajikistan Somoni", "Turkmenistan Manat", "Tunisia Dinar", "Tonga Pa'anga", "Turkey Lira", "Trinidad and Tobago Dollar", "Tuvalu Dollar", "Taiwan New Dollar", "Tanzania Shilling", "Ukraine Hryvnia", "Uganda Shilling", "United States Dollar", "Uruguay Peso", "Uzbekistan Som", "Venezuela Bolivar", "Viet Nam Dong", "Vanuatu Vatu", "Samoa Tala", "Communauté Financière Africaine (BEAC) CFA Franc BEAC", "East Caribbean Dollar", "International Monetary Fund (IMF) Special Drawing Rights", "Communauté Financière Africaine (BCEAO) Franc", "Comptoirs Français du Pacifique (CFP) Franc", "Yemen Rial", "South Africa Rand", "Zambia Kwacha", "Zimbabwe Dollar"}, + "short": {"AED", "AFN", "ALL", "AMD", "ANG", "AOA", "ARS", "AUD", "AWG", "AZN", "BAM", "BBD", "BDT", "BGN", "BHD", "BIF", "BMD", "BND", "BOB", "BRL", "BSD", "BTN", "BWP", "BYN", "BZD", "CAD", "CDF", "CHF", "CLP", "CNY", "COP", "CRC", "CUC", "CUP", "CVE", "CZK", "DJF", "DKK", "DOP", "DZD", "EGP", "ERN", "ETB", "EUR", "FJD", "FKP", "GBP", "GEL", "GHS", "GIP", "GMD", "GNF", "GTQ", "GYD", "HKD", "HNL", "HTG", "HUF", "IDR", "ILS", "INR", "IQD", "IRR", "ISK", "JMD", "JOD", "JPY", "KES", "KGS", "KHR", "KMF", "KPW", "KRW", "KWD", "KYD", "KZT", "LAK", "LBP", "LKR", "LRD", "LSL", "LYD", "MAD", "MDL", "MGA", "MKD", "MMK", "MNT", "MOP", "MRU", "MUR", "MVR", "MWK", "MXN", "MYR", "MZN", "NAD", "NGN", "NIO", "NOK", "NPR", "NZD", "OMR", "PAB", "PEN", "PGK", "PHP", "PKR", "PLN", "PYG", "QAR", "RON", "RSD", "RUB", "RWF", "SAR", "SBD", "SCR", "SDG", "SEK", "SGD", "SHP", "SLL", "SOS", "SRD", "STN", "SVC", "SYP", "SZL", "THB", "TJS", "TMT", "TND", "TOP", "TRY", "TTD", "TWD", "TZS", "UAH", "UGX", "USD", "UYU", "UZS", "VES", "VND", "VUV", "WST", "XAF", "XCD", "XDR", "XOF", "XPF", "YER", "ZAR", "ZMW", "ZWL"}, + "long": {"United Arab Emirates Dirham", "Afghanistan Afghani", "Albania Lek", "Armenia Dram", "Netherlands Antilles Guilder", "Angola Kwanza", "Argentina Peso", "Australia Dollar", "Aruba Guilder", "Azerbaijan New Manat", "Bosnia and Herzegovina Convertible Marka", "Barbados Dollar", "Bangladesh Taka", "Bulgaria Lev", "Bahrain Dinar", "Burundi Franc", "Bermuda Dollar", "Brunei Darussalam Dollar", "Bolivia Boliviano", "Brazil Real", "Bahamas Dollar", "Bhutan Ngultrum", "Botswana Pula", "Belarus Ruble", "Belize Dollar", "Canada Dollar", "Congo/Kinshasa Franc", "Switzerland Franc", "Chile Peso", "China Yuan Renminbi", "Colombia Peso", "Costa Rica Colon", "Cuba Convertible Peso", "Cuba Peso", "Cape Verde Escudo", "Czech Republic Koruna", "Djibouti Franc", "Denmark Krone", "Dominican Republic Peso", "Algeria Dinar", "Egypt Pound", "Eritrea Nakfa", "Ethiopia Birr", "Euro Member Countries", "Fiji Dollar", "Falkland Islands (Malvinas) Pound", "United Kingdom Pound", "Georgia Lari", "Ghana Cedi", "Gibraltar Pound", "Gambia Dalasi", "Guinea Franc", "Guatemala Quetzal", "Guyana Dollar", "Hong Kong Dollar", "Honduras Lempira", "Haiti Gourde", "Hungary Forint", "Indonesia Rupiah", "Israel Shekel", "India Rupee", "Iraq Dinar", "Iran Rial", "Iceland Krona", "Jamaica Dollar", "Jordan Dinar", "Japan Yen", "Kenya Shilling", "Kyrgyzstan Som", "Cambodia Riel", "Comoros Franc", "Korea (North) Won", "Korea (South) Won", "Kuwait Dinar", "Cayman Islands Dollar", "Kazakhstan Tenge", "Laos Kip", "Lebanon Pound", "Sri Lanka Rupee", "Liberia Dollar", "Lesotho Loti", "Libya Dinar", "Morocco Dirham", "Moldova Leu", "Madagascar Ariary", "Macedonia Denar", "Myanmar (Burma) Kyat", "Mongolia Tughrik", "Macau Pataca", "Mauritania Ouguiya", "Mauritius Rupee", "Maldives (Maldive Islands) Rufiyaa", "Malawi Kwacha", "Mexico Peso", "Malaysia Ringgit", "Mozambique Metical", "Namibia Dollar", "Nigeria Naira", "Nicaragua Cordoba", "Norway Krone", "Nepal Rupee", "New Zealand Dollar", "Oman Rial", "Panama Balboa", "Peru Nuevo Sol", "Papua New Guinea Kina", "Philippines Peso", "Pakistan Rupee", "Poland Zloty", "Paraguay Guarani", "Qatar Riyal", "Romania New Leu", "Serbia Dinar", "Russia Ruble", "Rwanda Franc", "Saudi Arabia Riyal", "Solomon Islands Dollar", "Seychelles Rupee", "Sudan Pound", "Sweden Krona", "Singapore Dollar", "Saint Helena Pound", "Sierra Leone Leone", "Somalia Shilling", "Suriname Dollar", "São Tomé and Príncipe Dobra", "El Salvador Colon", "Syria Pound", "Swaziland Lilangeni", "Thailand Baht", "Tajikistan Somoni", "Turkmenistan Manat", "Tunisia Dinar", "Tonga Pa'anga", "Turkey Lira", "Trinidad and Tobago Dollar", "Taiwan New Dollar", "Tanzania Shilling", "Ukraine Hryvnia", "Uganda Shilling", "United States Dollar", "Uruguay Peso", "Uzbekistan Som", "Venezuela Bolivar", "Viet Nam Dong", "Vanuatu Vatu", "Samoa Tala", "Communauté Financière Africaine (BEAC) CFA Franc BEAC", "East Caribbean Dollar", "International Monetary Fund (IMF) Special Drawing Rights", "Communauté Financière Africaine (BCEAO) Franc", "Comptoirs Français du Pacifique (CFP) Franc", "Yemen Rial", "South Africa Rand", "Zambia Kwacha", "Zimbabwe Dollar"}, } diff --git a/vendor/github.com/brianvoe/gofakeit/v7/data/data.go b/vendor/github.com/brianvoe/gofakeit/v7/data/data.go index 51a4f496..9a85e1b6 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/data/data.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/data/data.go @@ -3,7 +3,9 @@ package data // Data consists of the main set of fake information var Data = map[string]map[string][]string{ "person": Person, + "auth": Auth, "address": Address, + "airline": Airline, "company": Company, "job": Job, "lorem": Lorem, @@ -22,7 +24,7 @@ var Data = map[string]map[string][]string{ "car": Car, "emoji": Emoji, "word": Word, - "sentence": Sentence, + "text": Text, "food": Food, "minecraft": Minecraft, "celebrity": Celebrity, @@ -31,7 +33,9 @@ var Data = map[string]map[string][]string{ "book": Books, "movie": Movies, "school": School, + "song": Songs, "product": Product, + "bank": Bank, } func List() map[string][]string { diff --git a/vendor/github.com/brianvoe/gofakeit/v7/data/emoji.go b/vendor/github.com/brianvoe/gofakeit/v7/data/emoji.go index 8f8ce80f..74a13bba 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/data/emoji.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/data/emoji.go @@ -4,3537 +4,158 @@ package data // Emoji consists of emoji information var Emoji = map[string][]string{ - "emoji": { - "😀", - "😃", - "😄", - "😁", - "😆", - "😅", - "🤣", - "😂", - "🙂", - "🙃", - "😉", - "😊", - "😇", - "🥰", - "😍", - "🤩", - "😘", - "😗", - "☺️", - "😚", - "😙", - "😋", - "😛", - "😜", - "🤪", - "😝", - "🤑", - "🤗", - "🤭", - "🤫", - "🤔", - "🤐", - "🤨", - "😐", - "😑", - "😶", - "😏", - "😒", - "🙄", - "😬", - "🤥", - "😌", - "😔", - "😪", - "🤤", - "😴", - "😷", - "🤒", - "🤕", - "🤢", - "🤮", - "🤧", - "🥵", - "🥶", - "🥴", - "😵", - "🤯", - "🤠", - "🥳", - "😎", - "🤓", - "🧐", - "😕", - "😟", - "🙁", - "☹️", - "😮", - "😯", - "😲", - "😳", - "🥺", - "😦", - "😧", - "😨", - "😰", - "😥", - "😢", - "😭", - "😱", - "😖", - "😣", - "😞", - "😓", - "😩", - "😫", - "🥱", - "😤", - "😡", - "😠", - "🤬", - "😈", - "👿", - "💀", - "☠️", - "💩", - "🤡", - "👹", - "👺", - "👻", - "👽", - "👾", - "🤖", - "😺", - "😸", - "😹", - "😻", - "😼", - "😽", - "🙀", - "😿", - "😾", - "🙈", - "🙉", - "🙊", - "💋", - "💌", - "💘", - "💝", - "💖", - "💗", - "💓", - "💞", - "💕", - "💟", - "❣️", - "💔", - "❤️", - "🧡", - "💛", - "💚", - "💙", - "💜", - "🤎", - "🖤", - "🤍", - "💯", - "💢", - "💥", - "💫", - "💦", - "💨", - "🕳️", - "💣", - "💬", - "🗨️", - "🗯️", - "💭", - "💤", - "👋", - "🤚", - "🖐️", - "✋", - "🖖", - "👌", - "🤏", - "✌️", - "🤞", - "🤟", - "🤘", - "🤙", - "👈", - "👉", - "👆", - "🖕", - "👇", - "☝️", - "👍", - "👎", - "✊", - "👊", - "🤛", - "🤜", - "👏", - "🙌", - "👐", - "🤲", - "🤝", - "🙏", - "✍️", - "💅", - "🤳", - "💪", - "🦾", - "🦿", - "🦵", - "🦶", - "👂", - "🦻", - "👃", - "🧠", - "🦷", - "🦴", - "👀", - "👁️", - "👅", - "👄", - "👶", - "🧒", - "👦", - "👧", - "🧑", - "👱", - "👨", - "🧔", - "👨‍🦰", - "👨‍🦱", - "👨‍🦳", - "👨‍🦲", - "👩", - "👩‍🦰", - "🧑‍🦰", - "👩‍🦱", - "🧑‍🦱", - "👩‍🦳", - "🧑‍🦳", - "👩‍🦲", - "🧑‍🦲", - "👱‍♀️", - "👱‍♂️", - "🧓", - "👴", - "👵", - "🙍", - "🙍‍♂️", - "🙍‍♀️", - "🙎", - "🙎‍♂️", - "🙎‍♀️", - "🙅", - "🙅‍♂️", - "🙅‍♀️", - "🙆", - "🙆‍♂️", - "🙆‍♀️", - "💁", - "💁‍♂️", - "💁‍♀️", - "🙋", - "🙋‍♂️", - "🙋‍♀️", - "🧏", - "🧏‍♂️", - "🧏‍♀️", - "🙇", - "🙇‍♂️", - "🙇‍♀️", - "🤦", - "🤦‍♂️", - "🤦‍♀️", - "🤷", - "🤷‍♂️", - "🤷‍♀️", - "🧑‍⚕️", - "👨‍⚕️", - "👩‍⚕️", - "🧑‍🎓", - "👨‍🎓", - "👩‍🎓", - "🧑‍🏫", - "👨‍🏫", - "👩‍🏫", - "🧑‍⚖️", - "👨‍⚖️", - "👩‍⚖️", - "🧑‍🌾", - "👨‍🌾", - "👩‍🌾", - "🧑‍🍳", - "👨‍🍳", - "👩‍🍳", - "🧑‍🔧", - "👨‍🔧", - "👩‍🔧", - "🧑‍🏭", - "👨‍🏭", - "👩‍🏭", - "🧑‍💼", - "👨‍💼", - "👩‍💼", - "🧑‍🔬", - "👨‍🔬", - "👩‍🔬", - "🧑‍💻", - "👨‍💻", - "👩‍💻", - "🧑‍🎤", - "👨‍🎤", - "👩‍🎤", - "🧑‍🎨", - "👨‍🎨", - "👩‍🎨", - "🧑‍✈️", - "👨‍✈️", - "👩‍✈️", - "🧑‍🚀", - "👨‍🚀", - "👩‍🚀", - "🧑‍🚒", - "👨‍🚒", - "👩‍🚒", - "👮", - "👮‍♂️", - "👮‍♀️", - "🕵️", - "💂", - "💂‍♂️", - "💂‍♀️", - "👷", - "👷‍♂️", - "👷‍♀️", - "🤴", - "👸", - "👳", - "👳‍♂️", - "👳‍♀️", - "👲", - "🧕", - "🤵", - "🤵‍♂️", - "🤵‍♀️", - "👰", - "👰‍♂️", - "👰‍♀️", - "🤰", - "🤱", - "👩‍🍼", - "👨‍🍼", - "🧑‍🍼", - "👼", - "🎅", - "🤶", - "🧑‍🎄", - "🦸", - "🦸‍♂️", - "🦸‍♀️", - "🦹", - "🦹‍♂️", - "🦹‍♀️", - "🧙", - "🧙‍♂️", - "🧙‍♀️", - "🧚", - "🧚‍♂️", - "🧚‍♀️", - "🧛", - "🧛‍♂️", - "🧛‍♀️", - "🧜", - "🧜‍♂️", - "🧜‍♀️", - "🧝", - "🧝‍♂️", - "🧝‍♀️", - "🧞", - "🧞‍♂️", - "🧞‍♀️", - "🧟", - "🧟‍♂️", - "🧟‍♀️", - "💆", - "💆‍♂️", - "💆‍♀️", - "💇", - "💇‍♂️", - "💇‍♀️", - "🚶", - "🚶‍♂️", - "🚶‍♀️", - "🧍", - "🧍‍♂️", - "🧍‍♀️", - "🧎", - "🧎‍♂️", - "🧎‍♀️", - "🧑‍🦯", - "👨‍🦯", - "👩‍🦯", - "🧑‍🦼", - "👨‍🦼", - "👩‍🦼", - "🧑‍🦽", - "👨‍🦽", - "👩‍🦽", - "🏃", - "🏃‍♂️", - "🏃‍♀️", - "💃", - "🕺", - "🕴️", - "👯", - "👯‍♂️", - "👯‍♀️", - "🧖", - "🧖‍♂️", - "🧖‍♀️", - "🧗", - "🧗‍♂️", - "🧗‍♀️", - "🤺", - "🏇", - "⛷️", - "🏂", - "🏌️", - "🏄", - "🚣", - "🚣‍♂️", - "🚣‍♀️", - "🏊", - "⛹️", - "🏋️", - "🚴", - "🚴‍♂️", - "🚴‍♀️", - "🚵", - "🚵‍♂️", - "🚵‍♀️", - "🤸", - "🤸‍♂️", - "🤸‍♀️", - "🤼", - "🤼‍♂️", - "🤼‍♀️", - "🤽", - "🤽‍♂️", - "🤽‍♀️", - "🤾", - "🤾‍♂️", - "🤾‍♀️", - "🤹", - "🤹‍♂️", - "🤹‍♀️", - "🧘", - "🧘‍♂️", - "🧘‍♀️", - "🛀", - "🛌", - "🧑‍🤝‍🧑", - "👭", - "👫", - "👬", - "💏", - "👪", - "👨‍👩‍👦", - "👨‍👩‍👧", - "👨‍👩‍👧‍👦", - "👨‍👩‍👦‍👦", - "👨‍👩‍👧‍👧", - "👨‍👨‍👦", - "👨‍👨‍👧", - "👨‍👨‍👧‍👦", - "👨‍👨‍👦‍👦", - "👨‍👨‍👧‍👧", - "👩‍👩‍👦", - "👩‍👩‍👧", - "👩‍👩‍👧‍👦", - "👩‍👩‍👦‍👦", - "👩‍👩‍👧‍👧", - "👨‍👦", - "👨‍👦‍👦", - "👨‍👧", - "👨‍👧‍👦", - "👨‍👧‍👧", - "👩‍👦", - "👩‍👦‍👦", - "👩‍👧", - "👩‍👧‍👦", - "👩‍👧‍👧", - "🗣️", - "👤", - "👥", - "👣", - "🐵", - "🐒", - "🦍", - "🦧", - "🐶", - "🐕", - "🦮", - "🐩", - "🐺", - "🦊", - "🦝", - "🐱", - "🐈", - "🐈‍⬛", - "🦁", - "🐯", - "🐅", - "🐆", - "🐴", - "🐎", - "🦄", - "🦓", - "🦌", - "🐮", - "🐂", - "🐃", - "🐄", - "🐷", - "🐖", - "🐗", - "🐽", - "🐏", - "🐑", - "🐐", - "🐪", - "🐫", - "🦙", - "🦒", - "🐘", - "🦏", - "🦛", - "🐭", - "🐁", - "🐀", - "🐹", - "🐰", - "🐇", - "🐿️", - "🦔", - "🦇", - "🐻", - "🐻‍❄️", - "🐨", - "🐼", - "🦥", - "🦦", - "🦨", - "🦘", - "🦡", - "🐾", - "🦃", - "🐔", - "🐓", - "🐣", - "🐤", - "🐥", - "🐦", - "🐧", - "🕊️", - "🦅", - "🦆", - "🦢", - "🦉", - "🦩", - "🦚", - "🦜", - "🐸", - "🐊", - "🐢", - "🦎", - "🐍", - "🐲", - "🐉", - "🦕", - "🦖", - "🐳", - "🐋", - "🐬", - "🐟", - "🐠", - "🐡", - "🦈", - "🐙", - "🐚", - "🐌", - "🦋", - "🐛", - "🐜", - "🐝", - "🐞", - "🦗", - "🕷️", - "🕸️", - "🦂", - "🦟", - "🦠", - "💐", - "🌸", - "💮", - "🏵️", - "🌹", - "🥀", - "🌺", - "🌻", - "🌼", - "🌷", - "🌱", - "🌲", - "🌳", - "🌴", - "🌵", - "🌾", - "🌿", - "☘️", - "🍀", - "🍁", - "🍂", - "🍃", - "🍇", - "🍈", - "🍉", - "🍊", - "🍋", - "🍌", - "🍍", - "🥭", - "🍎", - "🍏", - "🍐", - "🍑", - "🍒", - "🍓", - "🥝", - "🍅", - "🥥", - "🥑", - "🍆", - "🥔", - "🥕", - "🌽", - "🌶️", - "🥒", - "🥬", - "🥦", - "🧄", - "🧅", - "🍄", - "🥜", - "🌰", - "🍞", - "🥐", - "🥖", - "🥨", - "🥯", - "🥞", - "🧇", - "🧀", - "🍖", - "🍗", - "🥩", - "🥓", - "🍔", - "🍟", - "🍕", - "🌭", - "🥪", - "🌮", - "🌯", - "🥙", - "🧆", - "🥚", - "🍳", - "🥘", - "🍲", - "🥣", - "🥗", - "🍿", - "🧈", - "🧂", - "🥫", - "🍱", - "🍘", - "🍙", - "🍚", - "🍛", - "🍜", - "🍝", - "🍠", - "🍢", - "🍣", - "🍤", - "🍥", - "🥮", - "🍡", - "🥟", - "🥠", - "🥡", - "🦀", - "🦞", - "🦐", - "🦑", - "🦪", - "🍦", - "🍧", - "🍨", - "🍩", - "🍪", - "🎂", - "🍰", - "🧁", - "🥧", - "🍫", - "🍬", - "🍭", - "🍮", - "🍯", - "🍼", - "🥛", - "🍵", - "🍶", - "🍾", - "🍷", - "🍸", - "🍹", - "🍺", - "🍻", - "🥂", - "🥃", - "🥤", - "🧃", - "🧉", - "🧊", - "🥢", - "🍽️", - "🍴", - "🥄", - "🔪", - "🏺", - "🌍", - "🌎", - "🌏", - "🌐", - "🗺️", - "🗾", - "🧭", - "🏔️", - "⛰️", - "🌋", - "🗻", - "🏕️", - "🏖️", - "🏜️", - "🏝️", - "🏞️", - "🏟️", - "🏛️", - "🏗️", - "🧱", - "🏘️", - "🏚️", - "🏠", - "🏡", - "🏢", - "🏣", - "🏤", - "🏥", - "🏦", - "🏨", - "🏩", - "🏪", - "🏫", - "🏬", - "🏭", - "🏯", - "🏰", - "💒", - "🗼", - "🗽", - "⛪", - "🕌", - "🛕", - "🕍", - "⛩️", - "🕋", - "⛲", - "⛺", - "🌁", - "🌃", - "🏙️", - "🌄", - "🌅", - "🌆", - "🌇", - "🌉", - "♨️", - "🎠", - "🎡", - "🎢", - "💈", - "🎪", - "🚂", - "🚃", - "🚄", - "🚅", - "🚆", - "🚇", - "🚈", - "🚉", - "🚊", - "🚝", - "🚞", - "🚋", - "🚌", - "🚍", - "🚎", - "🚐", - "🚑", - "🚒", - "🚓", - "🚔", - "🚕", - "🚖", - "🚗", - "🚘", - "🚙", - "🚚", - "🚛", - "🚜", - "🏎️", - "🏍️", - "🛵", - "🦽", - "🦼", - "🛺", - "🚲", - "🛴", - "🛹", - "🚏", - "🛣️", - "🛤️", - "🛢️", - "⛽", - "🚨", - "🚥", - "🚦", - "🛑", - "🚧", - "⚓", - "⛵", - "🛶", - "🚤", - "🛳️", - "⛴️", - "🛥️", - "🚢", - "✈️", - "🛩️", - "🛫", - "🛬", - "🪂", - "💺", - "🚁", - "🚟", - "🚠", - "🚡", - "🛰️", - "🚀", - "🛸", - "🛎️", - "🧳", - "⌛", - "⏳", - "⌚", - "⏰", - "⏱️", - "⏲️", - "🕰️", - "🕛", - "🕧", - "🕐", - "🕜", - "🕑", - "🕝", - "🕒", - "🕞", - "🕓", - "🕟", - "🕔", - "🕠", - "🕕", - "🕡", - "🕖", - "🕢", - "🕗", - "🕣", - "🕘", - "🕤", - "🕙", - "🕥", - "🕚", - "🕦", - "🌑", - "🌒", - "🌓", - "🌔", - "🌕", - "🌖", - "🌗", - "🌘", - "🌙", - "🌚", - "🌛", - "🌜", - "🌡️", - "☀️", - "🌝", - "🌞", - "🪐", - "⭐", - "🌟", - "🌠", - "🌌", - "☁️", - "⛅", - "⛈️", - "🌤️", - "🌥️", - "🌦️", - "🌧️", - "🌨️", - "🌩️", - "🌪️", - "🌫️", - "🌬️", - "🌀", - "🌈", - "🌂", - "☂️", - "☔", - "⛱️", - "⚡", - "❄️", - "☃️", - "⛄", - "☄️", - "🔥", - "💧", - "🌊", - "🎃", - "🎄", - "🎆", - "🎇", - "🧨", - "✨", - "🎈", - "🎉", - "🎊", - "🎋", - "🎍", - "🎎", - "🎏", - "🎐", - "🎑", - "🧧", - "🎀", - "🎁", - "🎗️", - "🎟️", - "🎫", - "🎖️", - "🏆", - "🏅", - "🥇", - "🥈", - "🥉", - "⚽", - "⚾", - "🥎", - "🏀", - "🏐", - "🏈", - "🏉", - "🎾", - "🥏", - "🎳", - "🏏", - "🏑", - "🏒", - "🥍", - "🏓", - "🏸", - "🥊", - "🥋", - "🥅", - "⛳", - "⛸️", - "🎣", - "🤿", - "🎽", - "🎿", - "🛷", - "🥌", - "🎯", - "🪀", - "🪁", - "🎱", - "🔮", - "🧿", - "🎮", - "🕹️", - "🎰", - "🎲", - "🧩", - "🧸", - "♠️", - "♥️", - "♦️", - "♣️", - "♟️", - "🃏", - "🀄", - "🎴", - "🎭", - "🖼️", - "🎨", - "🧵", - "🧶", - "👓", - "🕶️", - "🥽", - "🥼", - "🦺", - "👔", - "👕", - "👖", - "🧣", - "🧤", - "🧥", - "🧦", - "👗", - "👘", - "🥻", - "🩱", - "🩲", - "🩳", - "👙", - "👚", - "👛", - "👜", - "👝", - "🛍️", - "🎒", - "👞", - "👟", - "🥾", - "🥿", - "👠", - "👡", - "🩰", - "👢", - "👑", - "👒", - "🎩", - "🎓", - "🧢", - "⛑️", - "📿", - "💄", - "💍", - "💎", - "🔇", - "🔈", - "🔉", - "🔊", - "📢", - "📣", - "📯", - "🔔", - "🔕", - "🎼", - "🎵", - "🎶", - "🎙️", - "🎚️", - "🎛️", - "🎤", - "🎧", - "📻", - "🎷", - "🎸", - "🎹", - "🎺", - "🎻", - "🪕", - "🥁", - "📱", - "📲", - "☎️", - "📞", - "📟", - "📠", - "🔋", - "🔌", - "💻", - "🖥️", - "🖨️", - "⌨️", - "🖱️", - "🖲️", - "💽", - "💾", - "💿", - "📀", - "🧮", - "🎥", - "🎞️", - "📽️", - "🎬", - "📺", - "📷", - "📸", - "📹", - "📼", - "🔍", - "🔎", - "🕯️", - "💡", - "🔦", - "🏮", - "🪔", - "📔", - "📕", - "📖", - "📗", - "📘", - "📙", - "📚", - "📓", - "📒", - "📃", - "📜", - "📄", - "📰", - "🗞️", - "📑", - "🔖", - "🏷️", - "💰", - "💴", - "💵", - "💶", - "💷", - "💸", - "💳", - "🧾", - "💹", - "✉️", - "📧", - "📨", - "📩", - "📤", - "📥", - "📦", - "📫", - "📪", - "📬", - "📭", - "📮", - "🗳️", - "✏️", - "✒️", - "🖋️", - "🖊️", - "🖌️", - "🖍️", - "📝", - "💼", - "📁", - "📂", - "🗂️", - "📅", - "📆", - "🗒️", - "🗓️", - "📇", - "📈", - "📉", - "📊", - "📋", - "📌", - "📍", - "📎", - "🖇️", - "📏", - "📐", - "✂️", - "🗃️", - "🗄️", - "🗑️", - "🔒", - "🔓", - "🔏", - "🔐", - "🔑", - "🗝️", - "🔨", - "🪓", - "⛏️", - "⚒️", - "🛠️", - "🗡️", - "⚔️", - "🔫", - "🏹", - "🛡️", - "🔧", - "🔩", - "⚙️", - "🗜️", - "⚖️", - "🦯", - "🔗", - "⛓️", - "🧰", - "🧲", - "⚗️", - "🧪", - "🧫", - "🧬", - "🔬", - "🔭", - "📡", - "💉", - "🩸", - "💊", - "🩹", - "🩺", - "🚪", - "🛏️", - "🛋️", - "🪑", - "🚽", - "🚿", - "🛁", - "🪒", - "🧴", - "🧷", - "🧹", - "🧺", - "🧻", - "🧼", - "🧽", - "🧯", - "🛒", - "🚬", - "⚰️", - "⚱️", - "🗿", - "🏧", - "🚮", - "🚰", - "♿", - "🚹", - "🚺", - "🚻", - "🚼", - "🚾", - "🛂", - "🛃", - "🛄", - "🛅", - "⚠️", - "🚸", - "⛔", - "🚫", - "🚳", - "🚭", - "🚯", - "🚱", - "🚷", - "📵", - "🔞", - "☢️", - "☣️", - "⬆️", - "↗️", - "➡️", - "↘️", - "⬇️", - "↙️", - "⬅️", - "↖️", - "↕️", - "↔️", - "↩️", - "↪️", - "⤴️", - "⤵️", - "🔃", - "🔄", - "🔙", - "🔚", - "🔛", - "🔜", - "🔝", - "🛐", - "⚛️", - "🕉️", - "✡️", - "☸️", - "☯️", - "✝️", - "☦️", - "☪️", - "☮️", - "🕎", - "🔯", - "♈", - "♉", - "♊", - "♋", - "♌", - "♍", - "♎", - "♏", - "♐", - "♑", - "♒", - "♓", - "⛎", - "🔀", - "🔁", - "🔂", - "▶️", - "⏩", - "⏭️", - "⏯️", - "◀️", - "⏪", - "⏮️", - "🔼", - "⏫", - "🔽", - "⏬", - "⏸️", - "⏹️", - "⏺️", - "⏏️", - "🎦", - "🔅", - "🔆", - "📶", - "📳", - "📴", - "♀️", - "♂️", - "⚧️", - "✖️", - "➕", - "➖", - "➗", - "♾️", - "‼️", - "⁉️", - "❓", - "❔", - "❕", - "❗", - "〰️", - "💱", - "💲", - "⚕️", - "♻️", - "⚜️", - "🔱", - "📛", - "🔰", - "⭕", - "✅", - "☑️", - "✔️", - "❌", - "❎", - "➰", - "➿", - "〽️", - "✳️", - "✴️", - "❇️", - "©️", - "®️", - "™️", - "#️⃣", - "*️⃣", - "0️⃣", - "1️⃣", - "2️⃣", - "3️⃣", - "4️⃣", - "5️⃣", - "6️⃣", - "7️⃣", - "8️⃣", - "9️⃣", - "🔟", - "🔠", - "🔡", - "🔢", - "🔣", - "🔤", - "🅰️", - "🆎", - "🅱️", - "🆑", - "🆒", - "🆓", - "ℹ️", - "🆔", - "Ⓜ️", - "🆕", - "🆖", - "🅾️", - "🆗", - "🅿️", - "🆘", - "🆙", - "🆚", - "🈁", - "🈂️", - "🈷️", - "🈶", - "🈯", - "🉐", - "🈹", - "🈚", - "🈲", - "🉑", - "🈸", - "🈴", - "🈳", - "㊗️", - "㊙️", - "🈺", - "🈵", - "🔴", - "🟠", - "🟡", - "🟢", - "🔵", - "🟣", - "🟤", - "⚫", - "⚪", - "🟥", - "🟧", - "🟨", - "🟩", - "🟦", - "🟪", - "🟫", - "⬛", - "⬜", - "◼️", - "◻️", - "◾", - "◽", - "▪️", - "▫️", - "🔶", - "🔷", - "🔸", - "🔹", - "🔺", - "🔻", - "💠", - "🔘", - "🔳", - "🔲", - "🏁", - "🚩", - "🎌", - "🏴", - "🏳️", - "🏴‍☠️", - "🇦🇨", - "🇦🇩", - "🇦🇪", - "🇦🇫", - "🇦🇬", - "🇦🇮", - "🇦🇱", - "🇦🇲", - "🇦🇴", - "🇦🇶", - "🇦🇷", - "🇦🇸", - "🇦🇹", - "🇦🇺", - "🇦🇼", - "🇦🇽", - "🇦🇿", - "🇧🇦", - "🇧🇧", - "🇧🇩", - "🇧🇪", - "🇧🇫", - "🇧🇬", - "🇧🇭", - "🇧🇮", - "🇧🇯", - "🇧🇱", - "🇧🇲", - "🇧🇳", - "🇧🇴", - "🇧🇶", - "🇧🇷", - "🇧🇸", - "🇧🇹", - "🇧🇻", - "🇧🇼", - "🇧🇾", - "🇧🇿", - "🇨🇦", - "🇨🇨", - "🇨🇩", - "🇨🇫", - "🇨🇬", - "🇨🇭", - "🇨🇮", - "🇨🇰", - "🇨🇱", - "🇨🇲", - "🇨🇳", - "🇨🇴", - "🇨🇵", - "🇨🇷", - "🇨🇺", - "🇨🇻", - "🇨🇼", - "🇨🇽", - "🇨🇾", - "🇨🇿", - "🇩🇪", - "🇩🇬", - "🇩🇯", - "🇩🇰", - "🇩🇲", - "🇩🇴", - "🇩🇿", - "🇪🇦", - "🇪🇨", - "🇪🇪", - "🇪🇬", - "🇪🇭", - "🇪🇷", - "🇪🇸", - "🇪🇹", - "🇪🇺", - "🇫🇮", - "🇫🇯", - "🇫🇰", - "🇫🇲", - "🇫🇴", - "🇫🇷", - "🇬🇦", - "🇬🇧", - "🇬🇩", - "🇬🇪", - "🇬🇫", - "🇬🇬", - "🇬🇭", - "🇬🇮", - "🇬🇱", - "🇬🇲", - "🇬🇳", - "🇬🇵", - "🇬🇶", - "🇬🇷", - "🇬🇸", - "🇬🇹", - "🇬🇺", - "🇬🇼", - "🇬🇾", - "🇭🇰", - "🇭🇲", - "🇭🇳", - "🇭🇷", - "🇭🇹", - "🇭🇺", - "🇮🇨", - "🇮🇩", - "🇮🇪", - "🇮🇱", - "🇮🇲", - "🇮🇳", - "🇮🇴", - "🇮🇶", - "🇮🇷", - "🇮🇸", - "🇮🇹", - "🇯🇪", - "🇯🇲", - "🇯🇴", - "🇯🇵", - "🇰🇪", - "🇰🇬", - "🇰🇭", - "🇰🇮", - "🇰🇲", - "🇰🇳", - "🇰🇵", - "🇰🇷", - "🇰🇼", - "🇰🇾", - "🇰🇿", - "🇱🇦", - "🇱🇧", - "🇱🇨", - "🇱🇮", - "🇱🇰", - "🇱🇷", - "🇱🇸", - "🇱🇹", - "🇱🇺", - "🇱🇻", - "🇱🇾", - "🇲🇦", - "🇲🇨", - "🇲🇩", - "🇲🇪", - "🇲🇫", - "🇲🇬", - "🇲🇭", - "🇲🇰", - "🇲🇱", - "🇲🇲", - "🇲🇳", - "🇲🇴", - "🇲🇵", - "🇲🇶", - "🇲🇷", - "🇲🇸", - "🇲🇹", - "🇲🇺", - "🇲🇻", - "🇲🇼", - "🇲🇽", - "🇲🇾", - "🇲🇿", - "🇳🇦", - "🇳🇨", - "🇳🇪", - "🇳🇫", - "🇳🇬", - "🇳🇮", - "🇳🇱", - "🇳🇴", - "🇳🇵", - "🇳🇷", - "🇳🇺", - "🇳🇿", - "🇴🇲", - "🇵🇦", - "🇵🇪", - "🇵🇫", - "🇵🇬", - "🇵🇭", - "🇵🇰", - "🇵🇱", - "🇵🇲", - "🇵🇳", - "🇵🇷", - "🇵🇸", - "🇵🇹", - "🇵🇼", - "🇵🇾", - "🇶🇦", - "🇷🇪", - "🇷🇴", - "🇷🇸", - "🇷🇺", - "🇷🇼", - "🇸🇦", - "🇸🇧", - "🇸🇨", - "🇸🇩", - "🇸🇪", - "🇸🇬", - "🇸🇭", - "🇸🇮", - "🇸🇯", - "🇸🇰", - "🇸🇱", - "🇸🇲", - "🇸🇳", - "🇸🇴", - "🇸🇷", - "🇸🇸", - "🇸🇹", - "🇸🇻", - "🇸🇽", - "🇸🇾", - "🇸🇿", - "🇹🇦", - "🇹🇨", - "🇹🇩", - "🇹🇫", - "🇹🇬", - "🇹🇭", - "🇹🇯", - "🇹🇰", - "🇹🇱", - "🇹🇲", - "🇹🇳", - "🇹🇴", - "🇹🇷", - "🇹🇹", - "🇹🇻", - "🇹🇼", - "🇹🇿", - "🇺🇦", - "🇺🇬", - "🇺🇲", - "🇺🇳", - "🇺🇸", - "🇺🇾", - "🇺🇿", - "🇻🇦", - "🇻🇨", - "🇻🇪", - "🇻🇬", - "🇻🇮", - "🇻🇳", - "🇻🇺", - "🇼🇫", - "🇼🇸", - "🇽🇰", - "🇾🇪", - "🇾🇹", - "🇿🇦", - "🇿🇲", - "🇿🇼", + "animal": { + "🐵", "🐒", "🦍", "🦧", "🐶", "🐕", "🦮", "🐩", "🐺", "🦊", + "🦝", "🐱", "🐈", "🐈‍⬛", "🦁", "🐯", "🐅", "🐆", "🐴", "🐎", + "🦄", "🦓", "🦌", "🐮", "🐂", "🐃", "🐄", "🐷", "🐖", "🐗", + "🐽", "🐏", "🐑", "🐐", "🐪", "🐫", "🦙", "🦒", "🐘", "🦏", + "🦛", "🐭", "🐁", "🐀", "🐹", "🐰", "🐇", "🐿️", "🦔", "🦇", + "🐻", "🐻‍❄️", "🐨", "🐼", "🦥", "🦦", "🦨", "🦘", "🦡", "🐾", + "🦃", "🐔", "🐓", "🐣", "🐤", "🐥", "🐦", "🐧", "🕊️", "🦅", + "🦆", "🦢", "🦉", "🦩", "🦚", "🦜", "🐸", "🐊", "🐢", "🦎", + "🐍", "🐲", "🐉", "🦕", "🦖", "🐳", "🐋", "🐬", "🐟", "🐠", + "🐡", "🦈", "🐙", "🐚", "🐌", "🦋", "🐛", "🐜", "🐝", "🐞", + "🦗", "🕷️", "🕸️", "🦂", "🦟", "🦠", }, - "description": { - "grinning face", - "grinning face with big eyes", - "grinning face with smiling eyes", - "beaming face with smiling eyes", - "grinning squinting face", - "grinning face with sweat", - "rolling on the floor laughing", - "face with tears of joy", - "slightly smiling face", - "upside-down face", - "winking face", - "smiling face with smiling eyes", - "smiling face with halo", - "smiling face with hearts", - "smiling face with heart-eyes", - "star-struck", - "face blowing a kiss", - "kissing face", - "smiling face", - "kissing face with closed eyes", - "kissing face with smiling eyes", - "smiling face with tear", - "face savoring food", - "face with tongue", - "winking face with tongue", - "zany face", - "squinting face with tongue", - "money-mouth face", - "hugging face", - "face with hand over mouth", - "shushing face", - "thinking face", - "zipper-mouth face", - "face with raised eyebrow", - "neutral face", - "expressionless face", - "face without mouth", - "smirking face", - "unamused face", - "face with rolling eyes", - "grimacing face", - "lying face", - "relieved face", - "pensive face", - "sleepy face", - "drooling face", - "sleeping face", - "face with medical mask", - "face with thermometer", - "face with head-bandage", - "nauseated face", - "face vomiting", - "sneezing face", - "hot face", - "cold face", - "woozy face", - "dizzy face", - "exploding head", - "cowboy hat face", - "partying face", - "disguised face", - "smiling face with sunglasses", - "nerd face", - "face with monocle", - "confused face", - "worried face", - "slightly frowning face", - "frowning face", - "face with open mouth", - "hushed face", - "astonished face", - "flushed face", - "pleading face", - "frowning face with open mouth", - "anguished face", - "fearful face", - "anxious face with sweat", - "sad but relieved face", - "crying face", - "loudly crying face", - "face screaming in fear", - "confounded face", - "persevering face", - "disappointed face", - "downcast face with sweat", - "weary face", - "tired face", - "yawning face", - "face with steam from nose", - "pouting face", - "angry face", - "face with symbols on mouth", - "smiling face with horns", - "angry face with horns", - "skull", - "skull and crossbones", - "pile of poo", - "clown face", - "ogre", - "goblin", - "ghost", - "alien", - "alien monster", - "robot", - "grinning cat", - "grinning cat with smiling eyes", - "cat with tears of joy", - "smiling cat with heart-eyes", - "cat with wry smile", - "kissing cat", - "weary cat", - "crying cat", - "pouting cat", - "see-no-evil monkey", - "hear-no-evil monkey", - "speak-no-evil monkey", - "kiss mark", - "love letter", - "heart with arrow", - "heart with ribbon", - "sparkling heart", - "growing heart", - "beating heart", - "revolving hearts", - "two hearts", - "heart decoration", - "heart exclamation", - "broken heart", - "red heart", - "orange heart", - "yellow heart", - "green heart", - "blue heart", - "purple heart", - "brown heart", - "black heart", - "white heart", - "hundred points", - "anger symbol", - "collision", - "dizzy", - "sweat droplets", - "dashing away", - "hole", - "bomb", - "speech balloon", - "eye in speech bubble", - "left speech bubble", - "right anger bubble", - "thought balloon", - "zzz", - "waving hand", - "raised back of hand", - "hand with fingers splayed", - "raised hand", - "vulcan salute", - "OK hand", - "pinched fingers", - "pinching hand", - "victory hand", - "crossed fingers", - "love-you gesture", - "sign of the horns", - "call me hand", - "backhand index pointing left", - "backhand index pointing right", - "backhand index pointing up", - "middle finger", - "backhand index pointing down", - "index pointing up", - "thumbs up", - "thumbs down", - "raised fist", - "oncoming fist", - "left-facing fist", - "right-facing fist", - "clapping hands", - "raising hands", - "open hands", - "palms up together", - "handshake", - "folded hands", - "writing hand", - "nail polish", - "selfie", - "flexed biceps", - "mechanical arm", - "mechanical leg", - "leg", - "foot", - "ear", - "ear with hearing aid", - "nose", - "brain", - "anatomical heart", - "lungs", - "tooth", - "bone", - "eyes", - "eye", - "tongue", - "mouth", - "baby", - "child", - "boy", - "girl", - "person", - "person: blond hair", - "man", - "man: beard", - "man: red hair", - "man: curly hair", - "man: white hair", - "man: bald", - "woman", - "woman: red hair", - "person: red hair", - "woman: curly hair", - "person: curly hair", - "woman: white hair", - "person: white hair", - "woman: bald", - "person: bald", - "woman: blond hair", - "man: blond hair", - "older person", - "old man", - "old woman", - "person frowning", - "man frowning", - "woman frowning", - "person pouting", - "man pouting", - "woman pouting", - "person gesturing NO", - "man gesturing NO", - "woman gesturing NO", - "person gesturing OK", - "man gesturing OK", - "woman gesturing OK", - "person tipping hand", - "man tipping hand", - "woman tipping hand", - "person raising hand", - "man raising hand", - "woman raising hand", - "deaf person", - "deaf man", - "deaf woman", - "person bowing", - "man bowing", - "woman bowing", - "person facepalming", - "man facepalming", - "woman facepalming", - "person shrugging", - "man shrugging", - "woman shrugging", - "health worker", - "man health worker", - "woman health worker", - "student", - "man student", - "woman student", - "teacher", - "man teacher", - "woman teacher", - "judge", - "man judge", - "woman judge", - "farmer", - "man farmer", - "woman farmer", - "cook", - "man cook", - "woman cook", - "mechanic", - "man mechanic", - "woman mechanic", - "factory worker", - "man factory worker", - "woman factory worker", - "office worker", - "man office worker", - "woman office worker", - "scientist", - "man scientist", - "woman scientist", - "technologist", - "man technologist", - "woman technologist", - "singer", - "man singer", - "woman singer", - "artist", - "man artist", - "woman artist", - "pilot", - "man pilot", - "woman pilot", - "astronaut", - "man astronaut", - "woman astronaut", - "firefighter", - "man firefighter", - "woman firefighter", - "police officer", - "man police officer", - "woman police officer", - "detective", - "man detective", - "woman detective", - "guard", - "man guard", - "woman guard", - "ninja", - "construction worker", - "man construction worker", - "woman construction worker", - "prince", - "princess", - "person wearing turban", - "man wearing turban", - "woman wearing turban", - "person with skullcap", - "woman with headscarf", - "person in tuxedo", - "man in tuxedo", - "woman in tuxedo", - "person with veil", - "man with veil", - "woman with veil", - "pregnant woman", - "breast-feeding", - "woman feeding baby", - "man feeding baby", - "person feeding baby", - "baby angel", - "Santa Claus", - "Mrs. Claus", - "mx claus", - "superhero", - "man superhero", - "woman superhero", - "supervillain", - "man supervillain", - "woman supervillain", - "mage", - "man mage", - "woman mage", - "fairy", - "man fairy", - "woman fairy", - "vampire", - "man vampire", - "woman vampire", - "merperson", - "merman", - "mermaid", - "elf", - "man elf", - "woman elf", - "genie", - "man genie", - "woman genie", - "zombie", - "man zombie", - "woman zombie", - "person getting massage", - "man getting massage", - "woman getting massage", - "person getting haircut", - "man getting haircut", - "woman getting haircut", - "person walking", - "man walking", - "woman walking", - "person standing", - "man standing", - "woman standing", - "person kneeling", - "man kneeling", - "woman kneeling", - "person with white cane", - "man with white cane", - "woman with white cane", - "person in motorized wheelchair", - "man in motorized wheelchair", - "woman in motorized wheelchair", - "person in manual wheelchair", - "man in manual wheelchair", - "woman in manual wheelchair", - "person running", - "man running", - "woman running", - "woman dancing", - "man dancing", - "person in suit levitating", - "people with bunny ears", - "men with bunny ears", - "women with bunny ears", - "person in steamy room", - "man in steamy room", - "woman in steamy room", - "person climbing", - "man climbing", - "woman climbing", - "person fencing", - "horse racing", - "skier", - "snowboarder", - "person golfing", - "man golfing", - "woman golfing", - "person surfing", - "man surfing", - "woman surfing", - "person rowing boat", - "man rowing boat", - "woman rowing boat", - "person swimming", - "man swimming", - "woman swimming", - "person bouncing ball", - "man bouncing ball", - "woman bouncing ball", - "person lifting weights", - "man lifting weights", - "woman lifting weights", - "person biking", - "man biking", - "woman biking", - "person mountain biking", - "man mountain biking", - "woman mountain biking", - "person cartwheeling", - "man cartwheeling", - "woman cartwheeling", - "people wrestling", - "men wrestling", - "women wrestling", - "person playing water polo", - "man playing water polo", - "woman playing water polo", - "person playing handball", - "man playing handball", - "woman playing handball", - "person juggling", - "man juggling", - "woman juggling", - "person in lotus position", - "man in lotus position", - "woman in lotus position", - "person taking bath", - "person in bed", - "people holding hands", - "women holding hands", - "woman and man holding hands", - "men holding hands", - "kiss", - "kiss: woman, man", - "kiss: man, man", - "kiss: woman, woman", - "couple with heart", - "couple with heart: woman, man", - "couple with heart: man, man", - "couple with heart: woman, woman", - "family", - "family: man, woman, boy", - "family: man, woman, girl", - "family: man, woman, girl, boy", - "family: man, woman, boy, boy", - "family: man, woman, girl, girl", - "family: man, man, boy", - "family: man, man, girl", - "family: man, man, girl, boy", - "family: man, man, boy, boy", - "family: man, man, girl, girl", - "family: woman, woman, boy", - "family: woman, woman, girl", - "family: woman, woman, girl, boy", - "family: woman, woman, boy, boy", - "family: woman, woman, girl, girl", - "family: man, boy", - "family: man, boy, boy", - "family: man, girl", - "family: man, girl, boy", - "family: man, girl, girl", - "family: woman, boy", - "family: woman, boy, boy", - "family: woman, girl", - "family: woman, girl, boy", - "family: woman, girl, girl", - "speaking head", - "bust in silhouette", - "busts in silhouette", - "people hugging", - "footprints", - "monkey face", - "monkey", - "gorilla", - "orangutan", - "dog face", - "dog", - "guide dog", - "service dog", - "poodle", - "wolf", - "fox", - "raccoon", - "cat face", - "cat", - "black cat", - "lion", - "tiger face", - "tiger", - "leopard", - "horse face", - "horse", - "unicorn", - "zebra", - "deer", - "bison", - "cow face", - "ox", - "water buffalo", - "cow", - "pig face", - "pig", - "boar", - "pig nose", - "ram", - "ewe", - "goat", - "camel", - "two-hump camel", - "llama", - "giraffe", - "elephant", - "mammoth", - "rhinoceros", - "hippopotamus", - "mouse face", - "mouse", - "rat", - "hamster", - "rabbit face", - "rabbit", - "chipmunk", - "beaver", - "hedgehog", - "bat", - "bear", - "polar bear", - "koala", - "panda", - "sloth", - "otter", - "skunk", - "kangaroo", - "badger", - "paw prints", - "turkey", - "chicken", - "rooster", - "hatching chick", - "baby chick", - "front-facing baby chick", - "bird", - "penguin", - "dove", - "eagle", - "duck", - "swan", - "owl", - "dodo", - "feather", - "flamingo", - "peacock", - "parrot", - "frog", - "crocodile", - "turtle", - "lizard", - "snake", - "dragon face", - "dragon", - "sauropod", - "T-Rex", - "spouting whale", - "whale", - "dolphin", - "seal", - "fish", - "tropical fish", - "blowfish", - "shark", - "octopus", - "spiral shell", - "snail", - "butterfly", - "bug", - "ant", - "honeybee", - "beetle", - "lady beetle", - "cricket", - "cockroach", - "spider", - "spider web", - "scorpion", - "mosquito", - "fly", - "worm", - "microbe", - "bouquet", - "cherry blossom", - "white flower", - "rosette", - "rose", - "wilted flower", - "hibiscus", - "sunflower", - "blossom", - "tulip", - "seedling", - "potted plant", - "evergreen tree", - "deciduous tree", - "palm tree", - "cactus", - "sheaf of rice", - "herb", - "shamrock", - "four leaf clover", - "maple leaf", - "fallen leaf", - "leaf fluttering in wind", - "grapes", - "melon", - "watermelon", - "tangerine", - "lemon", - "banana", - "pineapple", - "mango", - "red apple", - "green apple", - "pear", - "peach", - "cherries", - "strawberry", - "blueberries", - "kiwi fruit", - "tomato", - "olive", - "coconut", - "avocado", - "eggplant", - "potato", - "carrot", - "ear of corn", - "hot pepper", - "bell pepper", - "cucumber", - "leafy green", - "broccoli", - "garlic", - "onion", - "mushroom", - "peanuts", - "chestnut", - "bread", - "croissant", - "baguette bread", - "flatbread", - "pretzel", - "bagel", - "pancakes", - "waffle", - "cheese wedge", - "meat on bone", - "poultry leg", - "cut of meat", - "bacon", - "hamburger", - "french fries", - "pizza", - "hot dog", - "sandwich", - "taco", - "burrito", - "tamale", - "stuffed flatbread", - "falafel", - "egg", - "cooking", - "shallow pan of food", - "pot of food", - "fondue", - "bowl with spoon", - "green salad", - "popcorn", - "butter", - "salt", - "canned food", - "bento box", - "rice cracker", - "rice ball", - "cooked rice", - "curry rice", - "steaming bowl", - "spaghetti", - "roasted sweet potato", - "oden", - "sushi", - "fried shrimp", - "fish cake with swirl", - "moon cake", - "dango", - "dumpling", - "fortune cookie", - "takeout box", - "crab", - "lobster", - "shrimp", - "squid", - "oyster", - "soft ice cream", - "shaved ice", - "ice cream", - "doughnut", - "cookie", - "birthday cake", - "shortcake", - "cupcake", - "pie", - "chocolate bar", - "candy", - "lollipop", - "custard", - "honey pot", - "baby bottle", - "glass of milk", - "hot beverage", - "teapot", - "teacup without handle", - "sake", - "bottle with popping cork", - "wine glass", - "cocktail glass", - "tropical drink", - "beer mug", - "clinking beer mugs", - "clinking glasses", - "tumbler glass", - "cup with straw", - "bubble tea", - "beverage box", - "mate", - "ice", - "chopsticks", - "fork and knife with plate", - "fork and knife", - "spoon", - "kitchen knife", - "amphora", - "globe showing Europe-Africa", - "globe showing Americas", - "globe showing Asia-Australia", - "globe with meridians", - "world map", - "map of Japan", - "compass", - "snow-capped mountain", - "mountain", - "volcano", - "mount fuji", - "camping", - "beach with umbrella", - "desert", - "desert island", - "national park", - "stadium", - "classical building", - "building construction", - "brick", - "rock", - "wood", - "hut", - "houses", - "derelict house", - "house", - "house with garden", - "office building", - "Japanese post office", - "post office", - "hospital", - "bank", - "hotel", - "love hotel", - "convenience store", - "school", - "department store", - "factory", - "Japanese castle", - "castle", - "wedding", - "Tokyo tower", - "Statue of Liberty", - "church", - "mosque", - "hindu temple", - "synagogue", - "shinto shrine", - "kaaba", - "fountain", - "tent", - "foggy", - "night with stars", - "cityscape", - "sunrise over mountains", - "sunrise", - "cityscape at dusk", - "sunset", - "bridge at night", - "hot springs", - "carousel horse", - "ferris wheel", - "roller coaster", - "barber pole", - "circus tent", - "locomotive", - "railway car", - "high-speed train", - "bullet train", - "train", - "metro", - "light rail", - "station", - "tram", - "monorail", - "mountain railway", - "tram car", - "bus", - "oncoming bus", - "trolleybus", - "minibus", - "ambulance", - "fire engine", - "police car", - "oncoming police car", - "taxi", - "oncoming taxi", - "automobile", - "oncoming automobile", - "sport utility vehicle", - "pickup truck", - "delivery truck", - "articulated lorry", - "tractor", - "racing car", - "motorcycle", - "motor scooter", - "manual wheelchair", - "motorized wheelchair", - "auto rickshaw", - "bicycle", - "kick scooter", - "skateboard", - "roller skate", - "bus stop", - "motorway", - "railway track", - "oil drum", - "fuel pump", - "police car light", - "horizontal traffic light", - "vertical traffic light", - "stop sign", - "construction", - "anchor", - "sailboat", - "canoe", - "speedboat", - "passenger ship", - "ferry", - "motor boat", - "ship", - "airplane", - "small airplane", - "airplane departure", - "airplane arrival", - "parachute", - "seat", - "helicopter", - "suspension railway", - "mountain cableway", - "aerial tramway", - "satellite", - "rocket", - "flying saucer", - "bellhop bell", - "luggage", - "hourglass done", - "hourglass not done", - "watch", - "alarm clock", - "stopwatch", - "timer clock", - "mantelpiece clock", - "twelve o’clock", - "twelve-thirty", - "one o’clock", - "one-thirty", - "two o’clock", - "two-thirty", - "three o’clock", - "three-thirty", - "four o’clock", - "four-thirty", - "five o’clock", - "five-thirty", - "six o’clock", - "six-thirty", - "seven o’clock", - "seven-thirty", - "eight o’clock", - "eight-thirty", - "nine o’clock", - "nine-thirty", - "ten o’clock", - "ten-thirty", - "eleven o’clock", - "eleven-thirty", - "new moon", - "waxing crescent moon", - "first quarter moon", - "waxing gibbous moon", - "full moon", - "waning gibbous moon", - "last quarter moon", - "waning crescent moon", - "crescent moon", - "new moon face", - "first quarter moon face", - "last quarter moon face", - "thermometer", - "sun", - "full moon face", - "sun with face", - "ringed planet", - "star", - "glowing star", - "shooting star", - "milky way", - "cloud", - "sun behind cloud", - "cloud with lightning and rain", - "sun behind small cloud", - "sun behind large cloud", - "sun behind rain cloud", - "cloud with rain", - "cloud with snow", - "cloud with lightning", - "tornado", - "fog", - "wind face", - "cyclone", - "rainbow", - "closed umbrella", - "umbrella", - "umbrella with rain drops", - "umbrella on ground", - "high voltage", - "snowflake", - "snowman", - "snowman without snow", - "comet", - "fire", - "droplet", - "water wave", - "jack-o-lantern", - "Christmas tree", - "fireworks", - "sparkler", - "firecracker", - "sparkles", - "balloon", - "party popper", - "confetti ball", - "tanabata tree", - "pine decoration", - "Japanese dolls", - "carp streamer", - "wind chime", - "moon viewing ceremony", - "red envelope", - "ribbon", - "wrapped gift", - "reminder ribbon", - "admission tickets", - "ticket", - "military medal", - "trophy", - "sports medal", - "1st place medal", - "2nd place medal", - "3rd place medal", - "soccer ball", - "baseball", - "softball", - "basketball", - "volleyball", - "american football", - "rugby football", - "tennis", - "flying disc", - "bowling", - "cricket game", - "field hockey", - "ice hockey", - "lacrosse", - "ping pong", - "badminton", - "boxing glove", - "martial arts uniform", - "goal net", - "flag in hole", - "ice skate", - "fishing pole", - "diving mask", - "running shirt", - "skis", - "sled", - "curling stone", - "direct hit", - "yo-yo", - "kite", - "pool 8 ball", - "crystal ball", - "magic wand", - "nazar amulet", - "video game", - "joystick", - "slot machine", - "game die", - "puzzle piece", - "teddy bear", - "piñata", - "nesting dolls", - "spade suit", - "heart suit", - "diamond suit", - "club suit", - "chess pawn", - "joker", - "mahjong red dragon", - "flower playing cards", - "performing arts", - "framed picture", - "artist palette", - "thread", - "sewing needle", - "yarn", - "knot", - "glasses", - "sunglasses", - "goggles", - "lab coat", - "safety vest", - "necktie", - "t-shirt", - "jeans", - "scarf", - "gloves", - "coat", - "socks", - "dress", - "kimono", - "sari", - "one-piece swimsuit", - "briefs", - "shorts", - "bikini", - "woman’s clothes", - "purse", - "handbag", - "clutch bag", - "shopping bags", - "backpack", - "thong sandal", - "man’s shoe", - "running shoe", - "hiking boot", - "flat shoe", - "high-heeled shoe", - "woman’s sandal", - "ballet shoes", - "woman’s boot", - "crown", - "woman’s hat", - "top hat", - "graduation cap", - "billed cap", - "military helmet", - "rescue worker’s helmet", - "prayer beads", - "lipstick", - "ring", - "gem stone", - "muted speaker", - "speaker low volume", - "speaker medium volume", - "speaker high volume", - "loudspeaker", - "megaphone", - "postal horn", - "bell", - "bell with slash", - "musical score", - "musical note", - "musical notes", - "studio microphone", - "level slider", - "control knobs", - "microphone", - "headphone", - "radio", - "saxophone", - "accordion", - "guitar", - "musical keyboard", - "trumpet", - "violin", - "banjo", - "drum", - "long drum", - "mobile phone", - "mobile phone with arrow", - "telephone", - "telephone receiver", - "pager", - "fax machine", - "battery", - "electric plug", - "laptop", - "desktop computer", - "printer", - "keyboard", - "computer mouse", - "trackball", - "computer disk", - "floppy disk", - "optical disk", - "dvd", - "abacus", - "movie camera", - "film frames", - "film projector", - "clapper board", - "television", - "camera", - "camera with flash", - "video camera", - "videocassette", - "magnifying glass tilted left", - "magnifying glass tilted right", - "candle", - "light bulb", - "flashlight", - "red paper lantern", - "diya lamp", - "notebook with decorative cover", - "closed book", - "open book", - "green book", - "blue book", - "orange book", - "books", - "notebook", - "ledger", - "page with curl", - "scroll", - "page facing up", - "newspaper", - "rolled-up newspaper", - "bookmark tabs", - "bookmark", - "label", - "money bag", - "coin", - "yen banknote", - "dollar banknote", - "euro banknote", - "pound banknote", - "money with wings", - "credit card", - "receipt", - "chart increasing with yen", - "envelope", - "e-mail", - "incoming envelope", - "envelope with arrow", - "outbox tray", - "inbox tray", - "package", - "closed mailbox with raised flag", - "closed mailbox with lowered flag", - "open mailbox with raised flag", - "open mailbox with lowered flag", - "postbox", - "ballot box with ballot", - "pencil", - "black nib", - "fountain pen", - "pen", - "paintbrush", - "crayon", - "memo", - "briefcase", - "file folder", - "open file folder", - "card index dividers", - "calendar", - "tear-off calendar", - "spiral notepad", - "spiral calendar", - "card index", - "chart increasing", - "chart decreasing", - "bar chart", - "clipboard", - "pushpin", - "round pushpin", - "paperclip", - "linked paperclips", - "straight ruler", - "triangular ruler", - "scissors", - "card file box", - "file cabinet", - "wastebasket", - "locked", - "unlocked", - "locked with pen", - "locked with key", - "key", - "old key", - "hammer", - "axe", - "pick", - "hammer and pick", - "hammer and wrench", - "dagger", - "crossed swords", - "pistol", - "boomerang", - "bow and arrow", - "shield", - "carpentry saw", - "wrench", - "screwdriver", - "nut and bolt", - "gear", - "clamp", - "balance scale", - "white cane", - "link", - "chains", - "hook", - "toolbox", - "magnet", - "ladder", - "alembic", - "test tube", - "petri dish", - "dna", - "microscope", - "telescope", - "satellite antenna", - "syringe", - "drop of blood", - "pill", - "adhesive bandage", - "stethoscope", - "door", - "elevator", - "mirror", - "window", - "bed", - "couch and lamp", - "chair", - "toilet", - "plunger", - "shower", - "bathtub", - "mouse trap", - "razor", - "lotion bottle", - "safety pin", - "broom", - "basket", - "roll of paper", - "bucket", - "soap", - "toothbrush", - "sponge", - "fire extinguisher", - "shopping cart", - "cigarette", - "coffin", - "headstone", - "funeral urn", - "moai", - "placard", - "ATM sign", - "litter in bin sign", - "potable water", - "wheelchair symbol", - "men’s room", - "women’s room", - "restroom", - "baby symbol", - "water closet", - "passport control", - "customs", - "baggage claim", - "left luggage", - "warning", - "children crossing", - "no entry", - "prohibited", - "no bicycles", - "no smoking", - "no littering", - "non-potable water", - "no pedestrians", - "no mobile phones", - "no one under eighteen", - "radioactive", - "biohazard", - "up arrow", - "up-right arrow", - "right arrow", - "down-right arrow", - "down arrow", - "down-left arrow", - "left arrow", - "up-left arrow", - "up-down arrow", - "left-right arrow", - "right arrow curving left", - "left arrow curving right", - "right arrow curving up", - "right arrow curving down", - "clockwise vertical arrows", - "counterclockwise arrows button", - "BACK arrow", - "END arrow", - "ON! arrow", - "SOON arrow", - "TOP arrow", - "place of worship", - "atom symbol", - "om", - "star of David", - "wheel of dharma", - "yin yang", - "latin cross", - "orthodox cross", - "star and crescent", - "peace symbol", - "menorah", - "dotted six-pointed star", - "Aries", - "Taurus", - "Gemini", - "Cancer", - "Leo", - "Virgo", - "Libra", - "Scorpio", - "Sagittarius", - "Capricorn", - "Aquarius", - "Pisces", - "Ophiuchus", - "shuffle tracks button", - "repeat button", - "repeat single button", - "play button", - "fast-forward button", - "next track button", - "play or pause button", - "reverse button", - "fast reverse button", - "last track button", - "upwards button", - "fast up button", - "downwards button", - "fast down button", - "pause button", - "stop button", - "record button", - "eject button", - "cinema", - "dim button", - "bright button", - "antenna bars", - "vibration mode", - "mobile phone off", - "female sign", - "male sign", - "transgender symbol", - "multiply", - "plus", - "minus", - "divide", - "infinity", - "double exclamation mark", - "exclamation question mark", - "question mark", - "white question mark", - "white exclamation mark", - "exclamation mark", - "wavy dash", - "currency exchange", - "heavy dollar sign", - "medical symbol", - "recycling symbol", - "fleur-de-lis", - "trident emblem", - "name badge", - "Japanese symbol for beginner", - "hollow red circle", - "check mark button", - "check box with check", - "check mark", - "cross mark", - "cross mark button", - "curly loop", - "double curly loop", - "part alternation mark", - "eight-spoked asterisk", - "eight-pointed star", - "sparkle", - "copyright", - "registered", - "trade mark", - "keycap: #", - "keycap: *", - "keycap: 0", - "keycap: 1", - "keycap: 2", - "keycap: 3", - "keycap: 4", - "keycap: 5", - "keycap: 6", - "keycap: 7", - "keycap: 8", - "keycap: 9", - "keycap: 10", - "input latin uppercase", - "input latin lowercase", - "input numbers", - "input symbols", - "input latin letters", - "A button (blood type)", - "AB button (blood type)", - "B button (blood type)", - "CL button", - "COOL button", - "FREE button", - "information", - "ID button", - "circled M", - "NEW button", - "NG button", - "O button (blood type)", - "OK button", - "P button", - "SOS button", - "UP! button", - "VS button", - "Japanese “here” button", - "Japanese “service charge” button", - "Japanese “monthly amount” button", - "Japanese “not free of charge” button", - "Japanese “reserved” button", - "Japanese “bargain” button", - "Japanese “discount” button", - "Japanese “free of charge” button", - "Japanese “prohibited” button", - "Japanese “acceptable” button", - "Japanese “application” button", - "Japanese “passing grade” button", - "Japanese “vacancy” button", - "Japanese “congratulations” button", - "Japanese “secret” button", - "Japanese “open for business” button", - "Japanese “no vacancy” button", - "red circle", - "orange circle", - "yellow circle", - "green circle", - "blue circle", - "purple circle", - "brown circle", - "black circle", - "white circle", - "red square", - "orange square", - "yellow square", - "green square", - "blue square", - "purple square", - "brown square", - "black large square", - "white large square", - "black medium square", - "white medium square", - "black medium-small square", - "white medium-small square", - "black small square", - "white small square", - "large orange diamond", - "large blue diamond", - "small orange diamond", - "small blue diamond", - "red triangle pointed up", - "red triangle pointed down", - "diamond with a dot", - "radio button", - "white square button", - "black square button", - "chequered flag", - "triangular flag", - "crossed flags", - "black flag", - "white flag", - "rainbow flag", - "transgender flag", - "pirate flag", - "flag: Ascension Island", - "flag: Andorra", - "flag: United Arab Emirates", - "flag: Afghanistan", - "flag: Antigua & Barbuda", - "flag: Anguilla", - "flag: Albania", - "flag: Armenia", - "flag: Angola", - "flag: Antarctica", - "flag: Argentina", - "flag: American Samoa", - "flag: Austria", - "flag: Australia", - "flag: Aruba", - "flag: Åland Islands", - "flag: Azerbaijan", - "flag: Bosnia & Herzegovina", - "flag: Barbados", - "flag: Bangladesh", - "flag: Belgium", - "flag: Burkina Faso", - "flag: Bulgaria", - "flag: Bahrain", - "flag: Burundi", - "flag: Benin", - "flag: St. Barthélemy", - "flag: Bermuda", - "flag: Brunei", - "flag: Bolivia", - "flag: Caribbean Netherlands", - "flag: Brazil", - "flag: Bahamas", - "flag: Bhutan", - "flag: Bouvet Island", - "flag: Botswana", - "flag: Belarus", - "flag: Belize", - "flag: Canada", - "flag: Cocos (Keeling) Islands", - "flag: Congo - Kinshasa", - "flag: Central African Republic", - "flag: Congo - Brazzaville", - "flag: Switzerland", - "flag: Côte d’Ivoire", - "flag: Cook Islands", - "flag: Chile", - "flag: Cameroon", - "flag: China", - "flag: Colombia", - "flag: Clipperton Island", - "flag: Costa Rica", - "flag: Cuba", - "flag: Cape Verde", - "flag: Curaçao", - "flag: Christmas Island", - "flag: Cyprus", - "flag: Czechia", - "flag: Germany", - "flag: Diego Garcia", - "flag: Djibouti", - "flag: Denmark", - "flag: Dominica", - "flag: Dominican Republic", - "flag: Algeria", - "flag: Ceuta & Melilla", - "flag: Ecuador", - "flag: Estonia", - "flag: Egypt", - "flag: Western Sahara", - "flag: Eritrea", - "flag: Spain", - "flag: Ethiopia", - "flag: European Union", - "flag: Finland", - "flag: Fiji", - "flag: Falkland Islands", - "flag: Micronesia", - "flag: Faroe Islands", - "flag: France", - "flag: Gabon", - "flag: United Kingdom", - "flag: Grenada", - "flag: Georgia", - "flag: French Guiana", - "flag: Guernsey", - "flag: Ghana", - "flag: Gibraltar", - "flag: Greenland", - "flag: Gambia", - "flag: Guinea", - "flag: Guadeloupe", - "flag: Equatorial Guinea", - "flag: Greece", - "flag: South Georgia & South Sandwich Islands", - "flag: Guatemala", - "flag: Guam", - "flag: Guinea-Bissau", - "flag: Guyana", - "flag: Hong Kong SAR China", - "flag: Heard & McDonald Islands", - "flag: Honduras", - "flag: Croatia", - "flag: Haiti", - "flag: Hungary", - "flag: Canary Islands", - "flag: Indonesia", - "flag: Ireland", - "flag: Israel", - "flag: Isle of Man", - "flag: India", - "flag: British Indian Ocean Territory", - "flag: Iraq", - "flag: Iran", - "flag: Iceland", - "flag: Italy", - "flag: Jersey", - "flag: Jamaica", - "flag: Jordan", - "flag: Japan", - "flag: Kenya", - "flag: Kyrgyzstan", - "flag: Cambodia", - "flag: Kiribati", - "flag: Comoros", - "flag: St. Kitts & Nevis", - "flag: North Korea", - "flag: South Korea", - "flag: Kuwait", - "flag: Cayman Islands", - "flag: Kazakhstan", - "flag: Laos", - "flag: Lebanon", - "flag: St. Lucia", - "flag: Liechtenstein", - "flag: Sri Lanka", - "flag: Liberia", - "flag: Lesotho", - "flag: Lithuania", - "flag: Luxembourg", - "flag: Latvia", - "flag: Libya", - "flag: Morocco", - "flag: Monaco", - "flag: Moldova", - "flag: Montenegro", - "flag: St. Martin", - "flag: Madagascar", - "flag: Marshall Islands", - "flag: North Macedonia", - "flag: Mali", - "flag: Myanmar (Burma)", - "flag: Mongolia", - "flag: Macao SAR China", - "flag: Northern Mariana Islands", - "flag: Martinique", - "flag: Mauritania", - "flag: Montserrat", - "flag: Malta", - "flag: Mauritius", - "flag: Maldives", - "flag: Malawi", - "flag: Mexico", - "flag: Malaysia", - "flag: Mozambique", - "flag: Namibia", - "flag: New Caledonia", - "flag: Niger", - "flag: Norfolk Island", - "flag: Nigeria", - "flag: Nicaragua", - "flag: Netherlands", - "flag: Norway", - "flag: Nepal", - "flag: Nauru", - "flag: Niue", - "flag: New Zealand", - "flag: Oman", - "flag: Panama", - "flag: Peru", - "flag: French Polynesia", - "flag: Papua New Guinea", - "flag: Philippines", - "flag: Pakistan", - "flag: Poland", - "flag: St. Pierre & Miquelon", - "flag: Pitcairn Islands", - "flag: Puerto Rico", - "flag: Palestinian Territories", - "flag: Portugal", - "flag: Palau", - "flag: Paraguay", - "flag: Qatar", - "flag: Réunion", - "flag: Romania", - "flag: Serbia", - "flag: Russia", - "flag: Rwanda", - "flag: Saudi Arabia", - "flag: Solomon Islands", - "flag: Seychelles", - "flag: Sudan", - "flag: Sweden", - "flag: Singapore", - "flag: St. Helena", - "flag: Slovenia", - "flag: Svalbard & Jan Mayen", - "flag: Slovakia", - "flag: Sierra Leone", - "flag: San Marino", - "flag: Senegal", - "flag: Somalia", - "flag: Suriname", - "flag: South Sudan", - "flag: São Tomé & Príncipe", - "flag: El Salvador", - "flag: Sint Maarten", - "flag: Syria", - "flag: Eswatini", - "flag: Tristan da Cunha", - "flag: Turks & Caicos Islands", - "flag: Chad", - "flag: French Southern Territories", - "flag: Togo", - "flag: Thailand", - "flag: Tajikistan", - "flag: Tokelau", - "flag: Timor-Leste", - "flag: Turkmenistan", - "flag: Tunisia", - "flag: Tonga", - "flag: Turkey", - "flag: Trinidad & Tobago", - "flag: Tuvalu", - "flag: Taiwan", - "flag: Tanzania", - "flag: Ukraine", - "flag: Uganda", - "flag: U.S. Outlying Islands", - "flag: United Nations", - "flag: United States", - "flag: Uruguay", - "flag: Uzbekistan", - "flag: Vatican City", - "flag: St. Vincent & Grenadines", - "flag: Venezuela", - "flag: British Virgin Islands", - "flag: U.S. Virgin Islands", - "flag: Vietnam", - "flag: Vanuatu", - "flag: Wallis & Futuna", - "flag: Samoa", - "flag: Kosovo", - "flag: Yemen", - "flag: Mayotte", - "flag: South Africa", - "flag: Zambia", - "flag: Zimbabwe", - "flag: England", - "flag: Scotland", - "flag: Wales", + "food": { + "🍇", "🍈", "🍉", "🍊", "🍋", "🍌", "🍍", "🥭", "🍎", "🍏", + "🍐", "🍑", "🍒", "🍓", "🥝", "🍅", "🥥", "🥑", "🍆", "🥔", + "🥕", "🌽", "🌶️", "🥒", "🥬", "🥦", "🧄", "🧅", "🍄", "🥜", + "🌰", "🍞", "🥐", "🥖", "🥨", "🥯", "🥞", "🧇", "🧀", "🍖", + "🍗", "🥩", "🥓", "🍔", "🍟", "🍕", "🌭", "🥪", "🌮", "🌯", + "🥙", "🧆", "🥚", "🍳", "🥘", "🍲", "🥣", "🥗", "🍿", "🧈", + "🧂", "🥫", "🍱", "🍘", "🍙", "🍚", "🍛", "🍜", "🍝", "🍠", + "🍢", "🍣", "🍤", "🍥", "🥮", "🍡", "🥟", "🥠", "🥡", "🦀", + "🦞", "🦐", "🦑", "🦪", "🍦", "🍧", "🍨", "🍩", "🍪", "🎂", + "🍰", "🧁", "🥧", "🍫", "🍬", "🍭", "🍮", "🍯", "🍼", "🥛", + "☕", "🍵", "🍶", "🍾", "🍷", "🍸", "🍹", "🍺", "🍻", "🥂", + "🥃", "🥤", "🧃", "🧉", "🧊", + }, + "plant": { + "💐", "🌸", "💮", "🏵️", "🌹", "🥀", "🌺", "🌻", "🌼", "🌷", + "🌱", "🪴", "🌲", "🌳", "🌴", "🌵", "🌾", "🌿", "☘️", "🍀", + "🍁", "🍂", "🍃", + }, + "face": { + "😀", "😃", "😄", "😁", "😆", "😅", "🤣", "😂", "🙂", "🙃", "😉", "😊", "😇", "🥰", "😍", "🤩", + "😘", "😗", "☺️", "😚", "😙", "😋", "😛", "😜", "🤪", "😝", "🤑", "🤗", "🤭", "🤫", "🤔", "🤐", + "🤨", "😐", "😑", "😶", "😏", "😒", "🙄", "😬", "🤥", "😌", "😔", "😪", "🤤", "😴", "😷", "🤒", + "🤕", "🤢", "🤮", "🤧", "🥵", "🥶", "🥴", "😵", "🤯", "🤠", "🥳", "😎", "🤓", "🧐", "😕", "😟", + "🙁", "☹️", "😮", "😯", "😲", "😳", "🥺", "😦", "😧", "😨", "😰", "😥", "😢", "😭", "😱", "😖", + "😣", "😞", "😓", "😩", "😫", "🥱", "😤", "😡", "😠", "🤬", "😈", "👿", "💀", "☠️", "💩", "🤡", + "👹", "👺", "👻", "👽", "👾", "🤖", "😺", "😸", "😹", "😻", "😼", "😽", "🙀", "😿", "😾", "🙈", + "🙉", "🙊", + }, + "hand": { + "👋", "🤚", "🖐️", "✋", "🖖", "👌", "🤏", "✌️", "🤞", "🤟", "🤘", "🤙", + "👈", "👉", "👆", "🖕", "👇", "☝️", "👍", "👎", "✊", "👊", "🤛", "🤜", + "👏", "🙌", "👐", "🤲", "🤝", "🙏", "✍️", "💅", "🤳", "💪", + }, + "clothing": { + "🧵", "🧶", "👓", "🕶️", "🥽", "🥼", "🦺", "👔", "👕", "👖", + "🧣", "🧤", "🧥", "🧦", "👗", "👘", "🥻", "🩱", "🩲", "🩳", + "👙", "👚", "👛", "👜", "👝", "🛍️", "🎒", "👞", "👟", "🥾", + "🥿", "👠", "👡", "🩰", "👢", "👑", "👒", "🎩", "🎓", "🧢", + "⛑️", "📿", "💄", "💍", "💎", + }, + "landmark": { + "🗺️", "🗾", "🧭", "🏔️", "⛰️", "🌋", "🗻", "🏕️", "🏖️", "🏜️", "🏝️", + "🏟️", "🏛️", "🏗️", "🧱", "🏘️", "🏚️", "🏠", "🏡", "🏢", "🏣", "🏤", "🏥", + "🏦", "🏨", "🏩", "🏪", "🏫", "🏬", "🏭", "🏯", "🏰", "💒", "🗼", "🗽", + "⛪", "🕌", "🛕", "🕍", "⛩️", "🕋", "⛲", "⛺", "🎠", "🎡", "🎢", "💈", "🎪", + }, + "electronics": { + "📱", "📲", "☎️", "📞", "📟", "📠", "🔋", "🔌", "💻", "🖥️", "🖨️", + "⌨️", "🖱️", "🖲️", "💽", "💾", "💿", "📀", "🧮", "🎥", "🎞️", "📽️", + "🎬", "📺", "📷", "📸", "📹", "📼", "📡", + }, + "game": { + "🔮", "🧿", "🎮", "🕹️", "🎰", "🎲", "🧩", "🧸", "♠️", "♥️", "♦️", + "♣️", "♟️", "🃏", "🀄", "🎴", + }, + "tools": { + "🔨", "🪓", "⛏️", "⚒️", "🛠️", "🗡️", "⚔️", "🔫", "🏹", "🛡️", + "🔧", "🔩", "⚙️", "🗜️", "⚖️", "🦯", "🔗", "⛓️", "🧰", "🧲", + }, + "weather": { + "🌙", "🌚", "🌛", "🌡️", "☀️", "🌝", "🌞", "🪐", "⭐", "☁️", "⛅", + "⛈️", "🌤️", "🌥️", "🌦️", "🌧️", "🌨️", "🌩️", "🌪️", "🌫️", "🌬️", "🌀", "🌈", + }, + "job": { + "🧑‍⚕️", "👨‍⚕️", "👩‍⚕️", "🧑‍🎓", "👨‍🎓", "👩‍🎓", "🧑‍🏫", "👨‍🏫", "👩‍🏫", + "🧑‍⚖️", "👨‍⚖️", "👩‍⚖️", "🧑‍🌾", "👨‍🌾", "👩‍🌾", "🧑‍🍳", "👨‍🍳", "👩‍🍳", + "🧑‍🔧", "👨‍🔧", "👩‍🔧", "🧑‍🏭", "👨‍🏭", "👩‍🏭", "🧑‍💼", "👨‍💼", "👩‍💼", + "🧑‍🔬", "👨‍🔬", "👩‍🔬", "🧑‍💻", "👨‍💻", "👩‍💻", "🧑‍🎤", "👨‍🎤", "👩‍🎤", + "🧑‍🎨", "👨‍🎨", "👩‍🎨", "🧑‍✈️", "👨‍✈️", "👩‍✈️", "🧑‍🚀", "👨‍🚀", "👩‍🚀", + "🧑‍🚒", "👨‍🚒", "👩‍🚒", "👮", "👮‍♂️", "👮‍♀️", "🕵️", "💂", "💂‍♂️", "💂‍♀️", + "👷", "👷‍♂️", "👷‍♀️", + }, + "person": { + "👶", "🧒", "👦", "👧", "🧑", "👱", "👨", "🧔", "👨‍🦰", "👨‍🦱", "👨‍🦳", "👨‍🦲", + "👩", "👩‍🦰", "🧑‍🦰", "👩‍🦱", "🧑‍🦱", "👩‍🦳", "🧑‍🦳", "👩‍🦲", "🧑‍🦲", "👱‍♀️", "👱‍♂️", + "🧓", "👴", "👵", + }, + "gesture": { + "🙍", "🙍‍♂️", "🙍‍♀️", "🙎", "🙎‍♂️", "🙎‍♀️", "🙅", "🙅‍♂️", "🙅‍♀️", "🙆", "🙆‍♂️", "🙆‍♀️", + "💁", "💁‍♂️", "💁‍♀️", "🙋", "🙋‍♂️", "🙋‍♀️", "🧏", "🧏‍♂️", "🧏‍♀️", "🙇", "🙇‍♂️", "🙇‍♀️", + "🤦", "🤦‍♂️", "🤦‍♀️", "🤷", "🤷‍♂️", "🤷‍♀️", + }, + "costume": { + "🤴", "👸", "👳", "👳‍♂️", "👳‍♀️", "👲", "🧕", "🤵", "🤵‍♂️", "🤵‍♀️", "👰", "👰‍♂️", "👰‍♀️", + "🤰", "🤱", "👩‍🍼", "👨‍🍼", "🧑‍🍼", "👼", "🎅", "🤶", "🧑‍🎄", "🦸", "🦸‍♂️", "🦸‍♀️", + "🦹", "🦹‍♂️", "🦹‍♀️", "🧙", "🧙‍♂️", "🧙‍♀️", "🧚", "🧚‍♂️", "🧚‍♀️", "🧛", "🧛‍♂️", "🧛‍♀️", + "🧜", "🧜‍♂️", "🧜‍♀️", "🧝", "🧝‍♂️", "🧝‍♀️", "🧞", "🧞‍♂️", "🧞‍♀️", "🧟", "🧟‍♂️", "🧟‍♀️", + }, + "music": { + "🎼", "🎵", "🎶", "🎙️", "🎚️", "🎛️", "🎤", "🎧", "📻", "🎷", + "🎸", "🎹", "🎺", "🎻", "🪕", "🥁", + }, + "sport": { + "🏆", "🏅", "🥇", "🥈", "🥉", + "⚽", "⚾", "🥎", "🏀", "🏐", "🏈", "🏉", "🎾", "🥏", "🎳", + "🏏", "🏑", "🏒", "🥍", "🏓", "🏸", "🥊", "🥋", "🥅", "⛳", "⛸️", + "🎣", "🤿", "🎽", "🎿", "🛷", "🥌", "🎯", "🪀", "🪁", "🎱", + "🧗", "🧗‍♂️", "🧗‍♀️", "🤺", "🏇", "⛷️", "🏂", "🏌️", "🏄", + "🚣", "🚣‍♂️", "🚣‍♀️", "🏊", "⛹️", "🏋️", "🚴", "🚴‍♂️", "🚴‍♀️", + "🚵", "🚵‍♂️", "🚵‍♀️", "🤸", "🤸‍♂️", "🤸‍♀️", "🤼", "🤼‍♂️", "🤼‍♀️", + "🤽", "🤽‍♂️", "🤽‍♀️", "🤾", "🤾‍♂️", "🤾‍♀️", "🧘", "🧘‍♂️", "🧘‍♀️", + }, + "vehicle": { + "🚂", "🚃", "🚄", "🚅", "🚆", "🚇", "🚈", "🚉", "🚊", "🚝", + "🚞", "🚋", "🚌", "🚍", "🚎", "🚐", "🚑", "🚒", "🚓", "🚔", + "🚕", "🚖", "🚗", "🚘", "🚙", "🚚", "🚛", "🚜", "🏎️", "🏍️", + "🛵", "🦽", "🦼", "🛺", "🚲", "🛴", "🛹", "⛵", "🛶", "🚤", + "🛳️", "⛴️", "🛥️", "🚢", "✈️", "🛩️", "🛫", "🛬", "🚁", "🚟", + "🚠", "🚡", "🚀", "🛸", + }, + "flag": { + "🏴", "🏳️", "🏴‍☠️", + "🇦🇨", "🇦🇩", "🇦🇪", "🇦🇫", "🇦🇬", "🇦🇮", "🇦🇱", "🇦🇲", "🇦🇴", "🇦🇶", + "🇦🇷", "🇦🇸", "🇦🇹", "🇦🇺", "🇦🇼", "🇦🇽", "🇦🇿", "🇧🇦", "🇧🇧", "🇧🇩", + "🇧🇪", "🇧🇫", "🇧🇬", "🇧🇭", "🇧🇮", "🇧🇯", "🇧🇱", "🇧🇲", "🇧🇳", "🇧🇴", + "🇧🇶", "🇧🇷", "🇧🇸", "🇧🇹", "🇧🇻", "🇧🇼", "🇧🇾", "🇧🇿", "🇨🇦", "🇨🇨", + "🇨🇩", "🇨🇫", "🇨🇬", "🇨🇭", "🇨🇮", "🇨🇰", "🇨🇱", "🇨🇲", "🇨🇳", "🇨🇴", + "🇨🇵", "🇨🇷", "🇨🇺", "🇨🇻", "🇨🇼", "🇨🇽", "🇨🇾", "🇨🇿", "🇩🇪", "🇩🇬", + "🇩🇯", "🇩🇰", "🇩🇲", "🇩🇴", "🇩🇿", "🇪🇦", "🇪🇨", "🇪🇪", "🇪🇬", "🇪🇭", + "🇪🇷", "🇪🇸", "🇪🇹", "🇪🇺", "🇫🇮", "🇫🇯", "🇫🇰", "🇫🇲", "🇫🇴", "🇫🇷", + "🇬🇦", "🇬🇧", "🇬🇩", "🇬🇪", "🇬🇫", "🇬🇬", "🇬🇭", "🇬🇮", "🇬🇱", "🇬🇲", + "🇬🇳", "🇬🇵", "🇬🇶", "🇬🇷", "🇬🇸", "🇬🇹", "🇬🇺", "🇬🇼", "🇬🇾", "🇭🇰", + "🇭🇲", "🇭🇳", "🇭🇷", "🇭🇹", "🇭🇺", "🇮🇨", "🇮🇩", "🇮🇪", "🇮🇱", "🇮🇲", + "🇮🇳", "🇮🇴", "🇮🇶", "🇮🇷", "🇮🇸", "🇮🇹", "🇯🇪", "🇯🇲", "🇯🇴", "🇯🇵", + "🇰🇪", "🇰🇬", "🇰🇭", "🇰🇮", "🇰🇲", "🇰🇳", "🇰🇵", "🇰🇷", "🇰🇼", "🇰🇾", + "🇰🇿", "🇱🇦", "🇱🇧", "🇱🇨", "🇱🇮", "🇱🇰", "🇱🇷", "🇱🇸", "🇱🇹", "🇱🇺", + "🇱🇻", "🇱🇾", "🇲🇦", "🇲🇨", "🇲🇩", "🇲🇪", "🇲🇫", "🇲🇬", "🇲🇭", "🇲🇰", + "🇲🇱", "🇲🇲", "🇲🇳", "🇲🇴", "🇲🇵", "🇲🇶", "🇲🇷", "🇲🇸", "🇲🇹", "🇲🇺", + "🇲🇻", "🇲🇼", "🇲🇽", "🇲🇾", "🇲🇿", "🇳🇦", "🇳🇨", "🇳🇪", "🇳🇫", "🇳🇬", + "🇳🇮", "🇳🇱", "🇳🇴", "🇳🇵", "🇳🇷", "🇳🇺", "🇳🇿", "🇴🇲", "🇵🇦", "🇵🇪", + "🇵🇫", "🇵🇬", "🇵🇭", "🇵🇰", "🇵🇱", "🇵🇲", "🇵🇳", "🇵🇷", "🇵🇸", "🇵🇹", + "🇵🇼", "🇵🇾", "🇶🇦", "🇷🇪", "🇷🇴", "🇷🇸", "🇷🇺", "🇷🇼", "🇸🇦", "🇸🇧", + "🇸🇨", "🇸🇩", "🇸🇪", "🇸🇬", "🇸🇭", "🇸🇮", "🇸🇯", "🇸🇰", "🇸🇱", "🇸🇲", + "🇸🇳", "🇸🇴", "🇸🇷", "🇸🇸", "🇸🇹", "🇸🇻", "🇸🇽", "🇸🇾", "🇸🇿", "🇹🇦", + "🇹🇨", "🇹🇩", "🇹🇫", "🇹🇬", "🇹🇭", "🇹🇯", "🇹🇰", "🇹🇱", "🇹🇲", "🇹🇳", + "🇹🇴", "🇹🇷", "🇹🇹", "🇹🇻", "🇹🇼", "🇹🇿", "🇺🇦", "🇺🇬", "🇺🇲", "🇺🇳", + "🇺🇸", "🇺🇾", "🇺🇿", "🇻🇦", "🇻🇨", "🇻🇪", "🇻🇬", "🇻🇮", "🇻🇳", "🇻🇺", + "🇼🇫", "🇼🇸", "🇽🇰", "🇾🇪", "🇾🇹", "🇿🇦", "🇿🇲", "🇿🇼", }, "category": { "Smileys & Emotion", @@ -3548,2302 +169,612 @@ var Emoji = map[string][]string{ "Flags", }, "alias": { - "grinning", - "smiley", - "smile", - "grin", - "laughing", - "satisfied", - "sweat_smile", - "rofl", - "joy", - "slightly_smiling_face", - "upside_down_face", - "wink", - "blush", - "innocent", - "smiling_face_with_three_hearts", - "heart_eyes", - "star_struck", - "kissing_heart", - "kissing", - "relaxed", - "kissing_closed_eyes", - "kissing_smiling_eyes", - "smiling_face_with_tear", - "yum", - "stuck_out_tongue", - "stuck_out_tongue_winking_eye", - "zany_face", - "stuck_out_tongue_closed_eyes", - "money_mouth_face", - "hugs", - "hand_over_mouth", - "shushing_face", - "thinking", - "zipper_mouth_face", - "raised_eyebrow", - "neutral_face", - "expressionless", - "no_mouth", - "smirk", - "unamused", - "roll_eyes", - "grimacing", - "lying_face", - "relieved", - "pensive", - "sleepy", - "drooling_face", - "sleeping", - "mask", - "face_with_thermometer", - "face_with_head_bandage", - "nauseated_face", - "vomiting_face", - "sneezing_face", - "hot_face", - "cold_face", - "woozy_face", - "dizzy_face", - "exploding_head", - "cowboy_hat_face", - "partying_face", - "disguised_face", - "sunglasses", - "nerd_face", - "monocle_face", - "confused", - "worried", - "slightly_frowning_face", - "frowning_face", - "open_mouth", - "hushed", - "astonished", - "flushed", - "pleading_face", - "frowning", - "anguished", - "fearful", - "cold_sweat", - "disappointed_relieved", - "cry", - "sob", - "scream", - "confounded", - "persevere", - "disappointed", - "sweat", - "weary", - "tired_face", - "yawning_face", - "triumph", - "rage", - "pout", - "angry", - "cursing_face", - "smiling_imp", - "imp", - "skull", - "skull_and_crossbones", - "hankey", - "poop", - "shit", - "clown_face", - "japanese_ogre", - "japanese_goblin", - "ghost", - "alien", - "space_invader", - "robot", - "smiley_cat", - "smile_cat", - "joy_cat", - "heart_eyes_cat", - "smirk_cat", - "kissing_cat", - "scream_cat", - "crying_cat_face", - "pouting_cat", - "see_no_evil", - "hear_no_evil", - "speak_no_evil", - "kiss", - "love_letter", - "cupid", - "gift_heart", - "sparkling_heart", - "heartpulse", - "heartbeat", - "revolving_hearts", - "two_hearts", - "heart_decoration", - "heavy_heart_exclamation", - "broken_heart", - "heart", - "orange_heart", - "yellow_heart", - "green_heart", - "blue_heart", - "purple_heart", - "brown_heart", - "black_heart", + // Faces and emotions + "grinning", "smiley", "smile", "grin", "laughing", + "satisfied", "sweat_smile", "rofl", "joy", "slightly_smiling_face", + "upside_down_face", "wink", "blush", "innocent", "smiling_face_with_three_hearts", + "heart_eyes", "star_struck", "kissing_heart", "kissing", "relaxed", + "kissing_closed_eyes", "kissing_smiling_eyes", "smiling_face_with_tear", "yum", "stuck_out_tongue", + "stuck_out_tongue_winking_eye", "zany_face", "stuck_out_tongue_closed_eyes", "money_mouth_face", "hugs", + "hand_over_mouth", "shushing_face", "thinking", "zipper_mouth_face", "raised_eyebrow", + "neutral_face", "expressionless", "no_mouth", "smirk", "unamused", + "roll_eyes", "grimacing", "lying_face", "relieved", "pensive", + "sleepy", "drooling_face", "sleeping", "mask", "face_with_thermometer", + "face_with_head_bandage", "nauseated_face", "vomiting_face", "sneezing_face", "hot_face", + "cold_face", "woozy_face", "dizzy_face", "exploding_head", "cowboy_hat_face", + "partying_face", "disguised_face", "sunglasses", "nerd_face", "monocle_face", + "confused", "worried", "slightly_frowning_face", "frowning_face", "open_mouth", + "hushed", "astonished", "flushed", "pleading_face", "frowning", + "anguished", "fearful", "cold_sweat", "disappointed_relieved", "cry", + "sob", "scream", "confounded", "persevere", "disappointed", + "sweat", "weary", "tired_face", "yawning_face", "triumph", + "rage", "pout", "angry", "cursing_face", + // Creatures and cat/monkey + "smiling_imp", "imp", "skull", "skull_and_crossbones", "hankey", + "poop", "shit", "clown_face", "japanese_ogre", "japanese_goblin", + "ghost", "alien", "space_invader", "robot", + // Cat faces + "smiley_cat", "smile_cat", "joy_cat", "heart_eyes_cat", "smirk_cat", + "kissing_cat", "scream_cat", "crying_cat_face", "pouting_cat", + // Three wise monkeys + "see_no_evil", "hear_no_evil", "speak_no_evil", + // Hearts and love symbols + "kiss", "love_letter", "cupid", "gift_heart", "sparkling_heart", + "heartpulse", "heartbeat", "revolving_hearts", "two_hearts", "heart_decoration", + "heavy_heart_exclamation", "broken_heart", "heart", "orange_heart", "yellow_heart", + "green_heart", "blue_heart", "purple_heart", "brown_heart", "black_heart", "white_heart", - "100", - "anger", - "boom", - "collision", - "dizzy", - "sweat_drops", - "dash", - "hole", - "bomb", - "speech_balloon", - "eye_speech_bubble", - "left_speech_bubble", - "right_anger_bubble", - "thought_balloon", - "zzz", - "wave", - "raised_back_of_hand", - "raised_hand_with_fingers_splayed", - "hand", - "raised_hand", - "vulcan_salute", - "ok_hand", - "pinched_fingers", - "pinching_hand", - "v", - "crossed_fingers", - "love_you_gesture", - "metal", - "call_me_hand", - "point_left", - "point_right", - "point_up_2", - "middle_finger", - "fu", - "point_down", - "point_up", - "+1", - "thumbsup", - "-1", - "thumbsdown", - "fist_raised", - "fist", - "fist_oncoming", - "facepunch", - "punch", - "fist_left", - "fist_right", - "clap", - "raised_hands", - "open_hands", - "palms_up_together", - "handshake", - "pray", - "writing_hand", - "nail_care", - "selfie", - "muscle", - "mechanical_arm", - "mechanical_leg", - "leg", - "foot", - "ear", - "ear_with_hearing_aid", - "nose", - "brain", - "anatomical_heart", - "lungs", - "tooth", - "bone", - "eyes", - "eye", - "tongue", - "lips", - "baby", - "child", - "boy", - "girl", - "adult", - "blond_haired_person", - "man", - "bearded_person", - "red_haired_man", - "curly_haired_man", - "white_haired_man", - "bald_man", - "woman", - "red_haired_woman", - "person_red_hair", - "curly_haired_woman", - "person_curly_hair", - "white_haired_woman", - "person_white_hair", - "bald_woman", - "person_bald", - "blond_haired_woman", - "blonde_woman", - "blond_haired_man", - "older_adult", - "older_man", - "older_woman", - "frowning_person", - "frowning_man", - "frowning_woman", - "pouting_face", - "pouting_man", - "pouting_woman", - "no_good", - "no_good_man", - "ng_man", - "no_good_woman", - "ng_woman", - "ok_person", - "ok_man", - "ok_woman", - "tipping_hand_person", - "information_desk_person", - "tipping_hand_man", - "sassy_man", - "tipping_hand_woman", - "sassy_woman", - "raising_hand", - "raising_hand_man", - "raising_hand_woman", - "deaf_person", - "deaf_man", - "deaf_woman", - "bow", - "bowing_man", - "bowing_woman", - "facepalm", - "man_facepalming", - "woman_facepalming", - "shrug", - "man_shrugging", - "woman_shrugging", - "health_worker", - "man_health_worker", - "woman_health_worker", - "student", - "man_student", - "woman_student", - "teacher", - "man_teacher", - "woman_teacher", - "judge", - "man_judge", - "woman_judge", - "farmer", - "man_farmer", - "woman_farmer", - "cook", - "man_cook", - "woman_cook", - "mechanic", - "man_mechanic", - "woman_mechanic", - "factory_worker", - "man_factory_worker", - "woman_factory_worker", - "office_worker", - "man_office_worker", - "woman_office_worker", - "scientist", - "man_scientist", - "woman_scientist", - "technologist", - "man_technologist", - "woman_technologist", - "singer", - "man_singer", - "woman_singer", - "artist", - "man_artist", - "woman_artist", - "pilot", - "man_pilot", - "woman_pilot", - "astronaut", - "man_astronaut", - "woman_astronaut", - "firefighter", - "man_firefighter", - "woman_firefighter", - "police_officer", - "cop", - "policeman", - "policewoman", - "detective", - "male_detective", - "female_detective", - "guard", - "guardsman", - "guardswoman", - "ninja", - "construction_worker", - "construction_worker_man", - "construction_worker_woman", - "prince", - "princess", - "person_with_turban", - "man_with_turban", - "woman_with_turban", - "man_with_gua_pi_mao", - "woman_with_headscarf", - "person_in_tuxedo", - "man_in_tuxedo", - "woman_in_tuxedo", - "person_with_veil", - "man_with_veil", - "woman_with_veil", - "bride_with_veil", - "pregnant_woman", - "breast_feeding", - "woman_feeding_baby", - "man_feeding_baby", - "person_feeding_baby", - "angel", - "santa", - "mrs_claus", - "mx_claus", - "superhero", - "superhero_man", - "superhero_woman", - "supervillain", - "supervillain_man", - "supervillain_woman", - "mage", - "mage_man", - "mage_woman", - "fairy", - "fairy_man", - "fairy_woman", - "vampire", - "vampire_man", - "vampire_woman", - "merperson", - "merman", - "mermaid", - "elf", - "elf_man", - "elf_woman", - "genie", - "genie_man", - "genie_woman", - "zombie", - "zombie_man", - "zombie_woman", - "massage", - "massage_man", - "massage_woman", - "haircut", - "haircut_man", - "haircut_woman", - "walking", - "walking_man", - "walking_woman", - "standing_person", - "standing_man", - "standing_woman", - "kneeling_person", - "kneeling_man", - "kneeling_woman", - "person_with_probing_cane", - "man_with_probing_cane", - "woman_with_probing_cane", - "person_in_motorized_wheelchair", - "man_in_motorized_wheelchair", - "woman_in_motorized_wheelchair", - "person_in_manual_wheelchair", - "man_in_manual_wheelchair", - "woman_in_manual_wheelchair", - "runner", - "running", - "running_man", - "running_woman", - "woman_dancing", - "dancer", - "man_dancing", - "business_suit_levitating", - "dancers", - "dancing_men", - "dancing_women", - "sauna_person", - "sauna_man", - "sauna_woman", - "climbing", - "climbing_man", - "climbing_woman", - "person_fencing", - "horse_racing", - "skier", - "snowboarder", - "golfing", - "golfing_man", - "golfing_woman", - "surfer", - "surfing_man", - "surfing_woman", - "rowboat", - "rowing_man", - "rowing_woman", - "swimmer", - "swimming_man", - "swimming_woman", - "bouncing_ball_person", - "bouncing_ball_man", - "basketball_man", - "bouncing_ball_woman", - "basketball_woman", - "weight_lifting", - "weight_lifting_man", - "weight_lifting_woman", - "bicyclist", - "biking_man", - "biking_woman", - "mountain_bicyclist", - "mountain_biking_man", - "mountain_biking_woman", - "cartwheeling", - "man_cartwheeling", - "woman_cartwheeling", - "wrestling", - "men_wrestling", - "women_wrestling", - "water_polo", - "man_playing_water_polo", - "woman_playing_water_polo", - "handball_person", - "man_playing_handball", - "woman_playing_handball", - "juggling_person", - "man_juggling", - "woman_juggling", - "lotus_position", - "lotus_position_man", - "lotus_position_woman", - "bath", - "sleeping_bed", - "people_holding_hands", - "two_women_holding_hands", - "couple", - "two_men_holding_hands", - "couplekiss", - "couplekiss_man_woman", - "couplekiss_man_man", - "couplekiss_woman_woman", - "couple_with_heart", - "couple_with_heart_woman_man", - "couple_with_heart_man_man", - "couple_with_heart_woman_woman", - "family", - "family_man_woman_boy", - "family_man_woman_girl", - "family_man_woman_girl_boy", - "family_man_woman_boy_boy", - "family_man_woman_girl_girl", - "family_man_man_boy", - "family_man_man_girl", - "family_man_man_girl_boy", - "family_man_man_boy_boy", - "family_man_man_girl_girl", - "family_woman_woman_boy", - "family_woman_woman_girl", - "family_woman_woman_girl_boy", - "family_woman_woman_boy_boy", - "family_woman_woman_girl_girl", - "family_man_boy", - "family_man_boy_boy", - "family_man_girl", - "family_man_girl_boy", - "family_man_girl_girl", - "family_woman_boy", - "family_woman_boy_boy", - "family_woman_girl", - "family_woman_girl_boy", - "family_woman_girl_girl", - "speaking_head", - "bust_in_silhouette", - "busts_in_silhouette", - "people_hugging", - "footprints", - "monkey_face", - "monkey", - "gorilla", - "orangutan", - "dog", - "dog2", - "guide_dog", - "service_dog", - "poodle", - "wolf", - "fox_face", - "raccoon", - "cat", - "cat2", - "black_cat", - "lion", - "tiger", - "tiger2", - "leopard", - "horse", - "racehorse", - "unicorn", - "zebra", - "deer", - "bison", - "cow", - "ox", - "water_buffalo", - "cow2", - "pig", - "pig2", - "boar", - "pig_nose", - "ram", - "sheep", - "goat", - "dromedary_camel", - "camel", - "llama", - "giraffe", - "elephant", - "mammoth", - "rhinoceros", - "hippopotamus", - "mouse", - "mouse2", - "rat", - "hamster", - "rabbit", - "rabbit2", - "chipmunk", - "beaver", - "hedgehog", - "bat", - "bear", - "polar_bear", - "koala", - "panda_face", - "sloth", - "otter", - "skunk", - "kangaroo", + // Symbols and comic effects + "100", "anger", "boom", "collision", "dizzy", + "sweat_drops", "dash", "hole", "bomb", "speech_balloon", + "eye_speech_bubble", "left_speech_bubble", "right_anger_bubble", "thought_balloon", "zzz", + // Hand gestures + "wave", "raised_back_of_hand", "raised_hand_with_fingers_splayed", "hand", "raised_hand", + "vulcan_salute", "ok_hand", "pinched_fingers", "pinching_hand", "v", + "crossed_fingers", "love_you_gesture", "metal", "call_me_hand", "point_left", + "point_right", "point_up_2", "middle_finger", "fu", "point_down", + "point_up", "+1", "thumbsup", "-1", "thumbsdown", + "fist_raised", "fist", "fist_oncoming", "facepunch", "punch", + "fist_left", "fist_right", "clap", "raised_hands", "open_hands", + "palms_up_together", "handshake", "pray", "writing_hand", "nail_care", "selfie", + // People variants and roles + "baby", "child", "boy", "girl", "adult", + "blond_haired_person", "man", "bearded_person", "red_haired_man", "curly_haired_man", + "white_haired_man", "bald_man", "woman", "red_haired_woman", "person_red_hair", + "person_curly_hair", "white_haired_woman", "person_white_hair", "person_bald", "blond_haired_woman", + "older_adult", "frowning_person", "pouting_face", "no_good", "ok_person", + "tipping_hand_person", "information_desk_person", + // Gestures and actions + "raising_hand", "deaf_person", "bow", "facepalm", "shrug", + // Occupations + "health_worker", "student", "teacher", "judge", "farmer", + "cook", "mechanic", "factory_worker", "office_worker", "scientist", + "technologist", "singer", "artist", "pilot", "astronaut", + "firefighter", "police_officer", "cop", "detective", "guard", + "ninja", "construction_worker", + // Costumes and fantasy + "prince", "princess", "person_with_turban", "person_in_tuxedo", "bride_with_veil", + "pregnant_woman", "woman_feeding_baby", "angel", "santa", "superhero", + "supervillain", "mage", "mage_man", "mage_woman", "fairy", + "vampire", "merperson", "elf", "genie", "zombie", + // Activities, sports, and relationships + "massage", "haircut", "walking", "standing_person", "kneeling_person", + "person_with_probing_cane", "person_in_motorized_wheelchair", "person_in_manual_wheelchair", "runner", "running", + "woman_dancing", "dancer", "man_dancing", "business_suit_levitating", "dancers", + "sauna_person", "climbing", "person_fencing", "horse_racing", "skier", + "snowboarder", "golfing", "surfer", "rowboat", "swimmer", + "bouncing_ball_person", "weight_lifting", "bicyclist", "mountain_bicyclist", "cartwheeling", + "man_cartwheeling", "woman_cartwheeling", "wrestling", "men_wrestling", "women_wrestling", + "water_polo", "man_playing_water_polo", "woman_playing_water_polo", "handball_person", "man_playing_handball", + "woman_playing_handball", "juggling_person", "man_juggling", "woman_juggling", "lotus_position", + "bath", "sleeping_bed", "people_holding_hands", "two_women_holding_hands", "couple", "family", + // People and silhouettes + "speaking_head", "bust_in_silhouette", "busts_in_silhouette", "people_hugging", "footprints", + // Mammals and land animals + "monkey_face", "monkey", "gorilla", "orangutan", "dog", + "guide_dog", "service_dog", "poodle", "wolf", "fox_face", + "raccoon", "cat", "black_cat", "lion", "tiger", + "leopard", "horse", "racehorse", "unicorn", "zebra", + "deer", "bison", "cow", "ox", "water_buffalo", + "pig", "boar", "pig_nose", "ram", "sheep", + "goat", "dromedary_camel", "camel", "llama", "giraffe", + "elephant", "mammoth", "rhinoceros", "hippopotamus", "mouse", + "rat", "hamster", "rabbit", "chipmunk", "beaver", + "hedgehog", "bat", "bear", "polar_bear", "koala", + "panda_face", "sloth", "otter", "skunk", "kangaroo", "badger", - "feet", - "paw_prints", - "turkey", - "chicken", - "rooster", - "hatching_chick", - "baby_chick", - "hatched_chick", - "bird", - "penguin", - "dove", - "eagle", - "duck", - "swan", - "owl", - "dodo", - "feather", - "flamingo", - "peacock", - "parrot", - "frog", - "crocodile", - "turtle", - "lizard", - "snake", - "dragon_face", - "dragon", - "sauropod", - "t-rex", - "whale", - "whale2", - "dolphin", - "flipper", - "seal", - "fish", - "tropical_fish", - "blowfish", - "shark", - "octopus", - "shell", - "snail", - "butterfly", - "bug", - "ant", - "bee", - "honeybee", - "beetle", - "lady_beetle", - "cricket", - "cockroach", - "spider", - "spider_web", - "scorpion", - "mosquito", - "fly", - "worm", - "microbe", - "bouquet", - "cherry_blossom", - "white_flower", - "rosette", - "rose", - "wilted_flower", - "hibiscus", - "sunflower", - "blossom", - "tulip", - "seedling", - "potted_plant", - "evergreen_tree", - "deciduous_tree", - "palm_tree", - "cactus", - "ear_of_rice", - "herb", - "shamrock", - "four_leaf_clover", - "maple_leaf", - "fallen_leaf", - "leaves", - "grapes", - "melon", - "watermelon", - "tangerine", - "orange", - "mandarin", - "lemon", - "banana", - "pineapple", - "mango", - "apple", - "green_apple", - "pear", - "peach", - "cherries", - "strawberry", - "blueberries", - "kiwi_fruit", - "tomato", - "olive", - "coconut", - "avocado", - "eggplant", - "potato", - "carrot", - "corn", - "hot_pepper", - "bell_pepper", - "cucumber", - "leafy_green", - "broccoli", - "garlic", - "onion", - "mushroom", - "peanuts", + // Birds and amphibians + "feet", "paw_prints", "turkey", "chicken", "rooster", + "hatching_chick", "baby_chick", "hatched_chick", "bird", "penguin", + "dove", "eagle", "duck", "swan", "owl", + "dodo", "feather", "flamingo", "peacock", "parrot", + "frog", "crocodile", + // Animals - reptiles, fish, invertebrates + "turtle", "lizard", "snake", "dragon_face", "dragon", + "sauropod", "t-rex", "whale", "whale2", "dolphin", + "flipper", "seal", "fish", "tropical_fish", "blowfish", + "shark", "octopus", "shell", "snail", "butterfly", + "bug", "ant", "bee", "honeybee", "beetle", + "lady_beetle", "cricket", "cockroach", "spider", "spider_web", + "scorpion", "mosquito", "fly", "worm", "microbe", + // Plants and nature + "bouquet", "cherry_blossom", "white_flower", "rosette", "rose", + "wilted_flower", "hibiscus", "sunflower", "blossom", "tulip", + "seedling", "potted_plant", "evergreen_tree", "deciduous_tree", "palm_tree", + "cactus", "ear_of_rice", "herb", "shamrock", "four_leaf_clover", + "maple_leaf", "fallen_leaf", "leaves", + // Food - fruits and vegetables + "grapes", "melon", "watermelon", "tangerine", "orange", + "mandarin", "lemon", "banana", "pineapple", "mango", + "apple", "green_apple", "pear", "peach", "cherries", + "strawberry", "blueberries", "kiwi_fruit", "tomato", "olive", + "coconut", "avocado", "eggplant", "potato", "carrot", + "corn", "hot_pepper", "bell_pepper", "cucumber", "leafy_green", + "broccoli", "garlic", "onion", "mushroom", "peanuts", "chestnut", - "bread", - "croissant", - "baguette_bread", - "flatbread", - "pretzel", - "bagel", - "pancakes", - "waffle", - "cheese", - "meat_on_bone", - "poultry_leg", - "cut_of_meat", - "bacon", - "hamburger", - "fries", - "pizza", - "hotdog", - "sandwich", - "taco", - "burrito", - "tamale", - "stuffed_flatbread", - "falafel", - "egg", - "fried_egg", - "shallow_pan_of_food", - "stew", - "fondue", - "bowl_with_spoon", - "green_salad", - "popcorn", - "butter", - "salt", - "canned_food", - "bento", - "rice_cracker", - "rice_ball", - "rice", - "curry", - "ramen", - "spaghetti", - "sweet_potato", - "oden", - "sushi", - "fried_shrimp", - "fish_cake", - "moon_cake", - "dango", - "dumpling", - "fortune_cookie", + // Food - staples and dishes + "bread", "croissant", "baguette_bread", "flatbread", "pretzel", + "bagel", "pancakes", "waffle", "cheese", "meat_on_bone", + "poultry_leg", "cut_of_meat", "bacon", "hamburger", "fries", + "pizza", "hotdog", "sandwich", "taco", "burrito", + "tamale", "stuffed_flatbread", "falafel", "egg", "fried_egg", + "shallow_pan_of_food", "stew", "fondue", "bowl_with_spoon", "green_salad", + "popcorn", "butter", "salt", "canned_food", "bento", + "rice_cracker", "rice_ball", "rice", "curry", "ramen", + "spaghetti", "sweet_potato", "oden", "sushi", "fried_shrimp", + "fish_cake", "moon_cake", "dango", "dumpling", "fortune_cookie", "takeout_box", - "crab", - "lobster", - "shrimp", - "squid", - "oyster", - "icecream", - "shaved_ice", - "ice_cream", - "doughnut", - "cookie", - "birthday", - "cake", - "cupcake", - "pie", - "chocolate_bar", - "candy", - "lollipop", - "custard", - "honey_pot", - "baby_bottle", - "milk_glass", - "coffee", - "teapot", - "tea", - "sake", - "champagne", - "wine_glass", - "cocktail", - "tropical_drink", - "beer", - "beers", - "clinking_glasses", - "tumbler_glass", - "cup_with_straw", - "bubble_tea", - "beverage_box", - "mate", - "ice_cube", - "chopsticks", - "plate_with_cutlery", - "fork_and_knife", - "spoon", - "hocho", - "knife", - "amphora", - "earth_africa", - "earth_americas", - "earth_asia", - "globe_with_meridians", - "world_map", - "japan", - "compass", - "mountain_snow", - "mountain", - "volcano", - "mount_fuji", - "camping", - "beach_umbrella", - "desert", - "desert_island", - "national_park", - "stadium", - "classical_building", - "building_construction", - "bricks", - "rock", - "wood", - "hut", - "houses", - "derelict_house", - "house", - "house_with_garden", - "office", - "post_office", - "european_post_office", - "hospital", - "bank", - "hotel", - "love_hotel", - "convenience_store", - "school", - "department_store", - "factory", - "japanese_castle", - "european_castle", - "wedding", - "tokyo_tower", - "statue_of_liberty", - "church", - "mosque", - "hindu_temple", - "synagogue", - "shinto_shrine", + // Food - seafood, desserts, beverages, utensils + "crab", "lobster", "shrimp", "squid", "oyster", + "icecream", "shaved_ice", "ice_cream", "doughnut", "cookie", + "birthday", "cake", "cupcake", "pie", "chocolate_bar", + "candy", "lollipop", "custard", "honey_pot", "baby_bottle", + "milk_glass", "coffee", "teapot", "tea", "sake", + "champagne", "wine_glass", "cocktail", "tropical_drink", "beer", + "beers", "clinking_glasses", "tumbler_glass", "cup_with_straw", "bubble_tea", + "beverage_box", "mate", "ice_cube", "chopsticks", "plate_with_cutlery", + "fork_and_knife", "spoon", "hocho", "knife", "amphora", + // Travel & Places - globe and maps + "earth_africa", "earth_americas", "earth_asia", "globe_with_meridians", "world_map", + "japan", "compass", + // Travel & Places - natural landscapes + "mountain_snow", "mountain", "volcano", "mount_fuji", "camping", + "beach_umbrella", "desert", "desert_island", "national_park", + // Travel & Places - landmarks and buildings + "stadium", "classical_building", "building_construction", "bricks", "rock", + "wood", "hut", "houses", "derelict_house", "house", + "house_with_garden", "office", "post_office", "european_post_office", "hospital", + "bank", "hotel", "love_hotel", "convenience_store", "school", + "department_store", "factory", "japanese_castle", "european_castle", "wedding", + "tokyo_tower", "statue_of_liberty", "church", "mosque", "hindu_temple", + "synagogue", "shinto_shrine", "kaaba", - "fountain", - "tent", - "foggy", - "night_with_stars", - "cityscape", - "sunrise_over_mountains", - "sunrise", - "city_sunset", - "city_sunrise", - "bridge_at_night", - "hotsprings", - "carousel_horse", - "ferris_wheel", - "roller_coaster", - "barber", - "circus_tent", - "steam_locomotive", - "railway_car", - "bullettrain_side", - "bullettrain_front", - "train2", - "metro", - "light_rail", - "station", - "tram", - "monorail", - "mountain_railway", - "train", - "bus", - "oncoming_bus", - "trolleybus", - "minibus", - "ambulance", - "fire_engine", - "police_car", - "oncoming_police_car", - "taxi", - "oncoming_taxi", - "car", - "red_car", - "oncoming_automobile", - "blue_car", - "pickup_truck", - "truck", - "articulated_lorry", - "tractor", - "racing_car", - "motorcycle", - "motor_scooter", - "manual_wheelchair", - "motorized_wheelchair", - "auto_rickshaw", - "bike", - "kick_scooter", - "skateboard", - "roller_skate", - "busstop", - "motorway", - "railway_track", - "oil_drum", - "fuelpump", - "rotating_light", - "traffic_light", - "vertical_traffic_light", - "stop_sign", - "construction", - "anchor", - "boat", - "sailboat", - "canoe", - "speedboat", - "passenger_ship", - "ferry", - "motor_boat", - "ship", - "airplane", - "small_airplane", - "flight_departure", - "flight_arrival", - "parachute", - "seat", - "helicopter", - "suspension_railway", - "mountain_cableway", - "aerial_tramway", - "artificial_satellite", - "rocket", - "flying_saucer", - "bellhop_bell", - "luggage", - "hourglass", - "hourglass_flowing_sand", - "watch", - "alarm_clock", - "stopwatch", - "timer_clock", - "mantelpiece_clock", - "clock12", - "clock1230", - "clock1", - "clock130", - "clock2", - "clock230", - "clock3", - "clock330", - "clock4", - "clock430", - "clock5", - "clock530", - "clock6", - "clock630", - "clock7", - "clock730", - "clock8", - "clock830", - "clock9", - "clock930", - "clock10", - "clock1030", - "clock11", - "clock1130", - "new_moon", - "waxing_crescent_moon", - "first_quarter_moon", - "moon", - "waxing_gibbous_moon", - "full_moon", - "waning_gibbous_moon", - "last_quarter_moon", - "waning_crescent_moon", - "crescent_moon", - "new_moon_with_face", - "first_quarter_moon_with_face", - "last_quarter_moon_with_face", - "thermometer", - "sunny", - "full_moon_with_face", - "sun_with_face", - "ringed_planet", - "star", - "star2", - "stars", - "milky_way", - "cloud", - "partly_sunny", - "cloud_with_lightning_and_rain", - "sun_behind_small_cloud", - "sun_behind_large_cloud", - "sun_behind_rain_cloud", - "cloud_with_rain", - "cloud_with_snow", - "cloud_with_lightning", - "tornado", - "fog", - "wind_face", - "cyclone", - "rainbow", - "closed_umbrella", - "open_umbrella", - "umbrella", - "parasol_on_ground", - "zap", - "snowflake", - "snowman_with_snow", - "snowman", - "comet", - "fire", - "droplet", - "ocean", - "jack_o_lantern", - "christmas_tree", - "fireworks", - "sparkler", - "firecracker", - "sparkles", - "balloon", - "tada", - "confetti_ball", - "tanabata_tree", - "bamboo", - "dolls", - "flags", - "wind_chime", - "rice_scene", - "red_envelope", - "ribbon", - "gift", - "reminder_ribbon", - "tickets", - "ticket", - "medal_military", - "trophy", - "medal_sports", - "1st_place_medal", - "2nd_place_medal", - "3rd_place_medal", - "soccer", - "baseball", - "softball", - "basketball", - "volleyball", - "football", - "rugby_football", - "tennis", - "flying_disc", - "bowling", - "cricket_game", - "field_hockey", - "ice_hockey", - "lacrosse", - "ping_pong", - "badminton", - "boxing_glove", - "martial_arts_uniform", - "goal_net", - "golf", - "ice_skate", - "fishing_pole_and_fish", - "diving_mask", - "running_shirt_with_sash", - "ski", - "sled", - "curling_stone", - "dart", - "yo_yo", - "kite", - "8ball", - "crystal_ball", - "magic_wand", - "nazar_amulet", - "video_game", - "joystick", - "slot_machine", - "game_die", - "jigsaw", - "teddy_bear", - "pi_ata", - "nesting_dolls", - "spades", - "hearts", - "diamonds", - "clubs", - "chess_pawn", - "black_joker", - "mahjong", - "flower_playing_cards", - "performing_arts", - "framed_picture", - "art", - "thread", - "sewing_needle", - "yarn", - "knot", - "eyeglasses", - "dark_sunglasses", - "goggles", - "lab_coat", - "safety_vest", - "necktie", - "shirt", - "tshirt", - "jeans", - "scarf", - "gloves", - "coat", - "socks", - "dress", - "kimono", - "sari", - "one_piece_swimsuit", - "swim_brief", - "shorts", - "bikini", - "womans_clothes", - "purse", - "handbag", - "pouch", - "shopping", - "school_satchel", - "thong_sandal", - "mans_shoe", - "shoe", - "athletic_shoe", - "hiking_boot", - "flat_shoe", - "high_heel", - "sandal", - "ballet_shoes", - "boot", - "crown", - "womans_hat", - "tophat", - "mortar_board", - "billed_cap", - "military_helmet", - "rescue_worker_helmet", - "prayer_beads", - "lipstick", - "ring", - "gem", - "mute", - "speaker", - "sound", - "loud_sound", - "loudspeaker", - "mega", - "postal_horn", - "bell", - "no_bell", - "musical_score", - "musical_note", - "notes", - "studio_microphone", - "level_slider", - "control_knobs", - "microphone", - "headphones", - "radio", - "saxophone", - "accordion", - "guitar", - "musical_keyboard", - "trumpet", - "violin", - "banjo", - "drum", - "long_drum", - "iphone", - "calling", - "phone", - "telephone", - "telephone_receiver", - "pager", - "fax", - "battery", - "electric_plug", - "computer", - "desktop_computer", - "printer", - "keyboard", - "computer_mouse", - "trackball", - "minidisc", - "floppy_disk", - "cd", - "dvd", - "abacus", - "movie_camera", - "film_strip", - "film_projector", - "clapper", - "tv", - "camera", - "camera_flash", - "video_camera", - "vhs", - "mag", - "mag_right", - "candle", - "bulb", - "flashlight", - "izakaya_lantern", - "lantern", - "diya_lamp", - "notebook_with_decorative_cover", - "closed_book", - "book", - "open_book", - "green_book", - "blue_book", - "orange_book", - "books", - "notebook", - "ledger", - "page_with_curl", - "scroll", - "page_facing_up", - "newspaper", - "newspaper_roll", - "bookmark_tabs", - "bookmark", - "label", - "moneybag", - "coin", - "yen", - "dollar", - "euro", - "pound", - "money_with_wings", - "credit_card", - "receipt", - "chart", - "email", - "envelope", - "e-mail", - "incoming_envelope", - "envelope_with_arrow", - "outbox_tray", - "inbox_tray", - "package", - "mailbox", - "mailbox_closed", - "mailbox_with_mail", - "mailbox_with_no_mail", - "postbox", - "ballot_box", - "pencil2", - "black_nib", - "fountain_pen", - "pen", - "paintbrush", - "crayon", - "memo", - "pencil", - "briefcase", - "file_folder", - "open_file_folder", - "card_index_dividers", - "date", - "calendar", - "spiral_notepad", - "spiral_calendar", - "card_index", - "chart_with_upwards_trend", - "chart_with_downwards_trend", - "bar_chart", - "clipboard", - "pushpin", - "round_pushpin", - "paperclip", - "paperclips", - "straight_ruler", - "triangular_ruler", - "scissors", - "card_file_box", - "file_cabinet", - "wastebasket", - "lock", - "unlock", - "lock_with_ink_pen", - "closed_lock_with_key", - "key", - "old_key", - "hammer", - "axe", - "pick", - "hammer_and_pick", - "hammer_and_wrench", - "dagger", - "crossed_swords", - "gun", - "boomerang", - "bow_and_arrow", - "shield", - "carpentry_saw", - "wrench", - "screwdriver", - "nut_and_bolt", - "gear", - "clamp", - "balance_scale", - "probing_cane", - "link", - "chains", - "hook", - "toolbox", - "magnet", - "ladder", - "alembic", - "test_tube", - "petri_dish", - "dna", - "microscope", - "telescope", - "satellite", - "syringe", - "drop_of_blood", - "pill", - "adhesive_bandage", - "stethoscope", - "door", - "elevator", - "mirror", - "window", - "bed", - "couch_and_lamp", - "chair", - "toilet", - "plunger", - "shower", - "bathtub", - "mouse_trap", - "razor", - "lotion_bottle", - "safety_pin", - "broom", - "basket", - "roll_of_paper", - "bucket", - "soap", - "toothbrush", - "sponge", - "fire_extinguisher", - "shopping_cart", - "smoking", - "coffin", - "headstone", - "funeral_urn", - "moyai", - "placard", - "atm", - "put_litter_in_its_place", - "potable_water", - "wheelchair", - "mens", - "womens", - "restroom", - "baby_symbol", - "wc", - "passport_control", - "customs", - "baggage_claim", - "left_luggage", - "warning", - "children_crossing", - "no_entry", - "no_entry_sign", - "no_bicycles", - "no_smoking", - "do_not_litter", - "non-potable_water", - "no_pedestrians", - "no_mobile_phones", - "underage", - "radioactive", - "biohazard", - "arrow_up", - "arrow_upper_right", - "arrow_right", - "arrow_lower_right", - "arrow_down", - "arrow_lower_left", - "arrow_left", - "arrow_upper_left", - "arrow_up_down", - "left_right_arrow", - "leftwards_arrow_with_hook", - "arrow_right_hook", - "arrow_heading_up", - "arrow_heading_down", - "arrows_clockwise", - "arrows_counterclockwise", - "back", - "end", - "on", - "soon", - "top", - "place_of_worship", - "atom_symbol", - "om", - "star_of_david", - "wheel_of_dharma", - "yin_yang", - "latin_cross", - "orthodox_cross", - "star_and_crescent", - "peace_symbol", - "menorah", - "six_pointed_star", - "aries", - "taurus", - "gemini", - "cancer", - "leo", - "virgo", - "libra", - "scorpius", - "sagittarius", - "capricorn", - "aquarius", - "pisces", - "ophiuchus", - "twisted_rightwards_arrows", - "repeat", - "repeat_one", - "arrow_forward", - "fast_forward", - "next_track_button", - "play_or_pause_button", - "arrow_backward", - "rewind", - "previous_track_button", - "arrow_up_small", - "arrow_double_up", - "arrow_down_small", - "arrow_double_down", - "pause_button", - "stop_button", - "record_button", - "eject_button", - "cinema", - "low_brightness", - "high_brightness", - "signal_strength", - "vibration_mode", - "mobile_phone_off", - "female_sign", - "male_sign", - "transgender_symbol", - "heavy_multiplication_x", - "heavy_plus_sign", - "heavy_minus_sign", - "heavy_division_sign", - "infinity", - "bangbang", - "interrobang", - "question", - "grey_question", - "grey_exclamation", - "exclamation", - "heavy_exclamation_mark", - "wavy_dash", - "currency_exchange", - "heavy_dollar_sign", - "medical_symbol", - "recycle", - "fleur_de_lis", - "trident", - "name_badge", - "beginner", - "o", - "white_check_mark", - "ballot_box_with_check", - "heavy_check_mark", - "x", - "negative_squared_cross_mark", - "curly_loop", - "loop", - "part_alternation_mark", - "eight_spoked_asterisk", - "eight_pointed_black_star", - "sparkle", - "copyright", - "registered", - "tm", - "hash", - "asterisk", - "zero", - "one", - "two", - "three", - "four", - "five", - "six", - "seven", - "eight", - "nine", - "keycap_ten", - "capital_abcd", - "abcd", - "1234", - "symbols", - "abc", - "a", - "ab", - "b", - "cl", - "cool", - "free", - "information_source", - "id", - "m", - "new", - "ng", - "o2", - "ok", - "parking", - "sos", - "up", - "vs", - "koko", - "sa", - "u6708", - "u6709", - "u6307", - "ideograph_advantage", - "u5272", - "u7121", - "u7981", - "accept", - "u7533", - "u5408", - "u7a7a", - "congratulations", - "secret", - "u55b6", - "u6e80", - "red_circle", - "orange_circle", - "yellow_circle", - "green_circle", - "large_blue_circle", - "purple_circle", - "brown_circle", - "black_circle", - "white_circle", - "red_square", - "orange_square", - "yellow_square", - "green_square", - "blue_square", - "purple_square", - "brown_square", - "black_large_square", - "white_large_square", - "black_medium_square", - "white_medium_square", - "black_medium_small_square", - "white_medium_small_square", - "black_small_square", - "white_small_square", - "large_orange_diamond", - "large_blue_diamond", - "small_orange_diamond", - "small_blue_diamond", - "small_red_triangle", - "small_red_triangle_down", - "diamond_shape_with_a_dot_inside", - "radio_button", - "white_square_button", - "black_square_button", - "checkered_flag", - "triangular_flag_on_post", - "crossed_flags", - "black_flag", - "white_flag", - "rainbow_flag", - "transgender_flag", - "pirate_flag", - "ascension_island", - "andorra", - "united_arab_emirates", - "afghanistan", - "antigua_barbuda", - "anguilla", - "albania", - "armenia", - "angola", - "antarctica", - "argentina", - "american_samoa", - "austria", - "australia", - "aruba", - "aland_islands", - "azerbaijan", - "bosnia_herzegovina", - "barbados", - "bangladesh", - "belgium", - "burkina_faso", - "bulgaria", - "bahrain", - "burundi", - "benin", - "st_barthelemy", - "bermuda", - "brunei", - "bolivia", - "caribbean_netherlands", - "brazil", - "bahamas", - "bhutan", - "bouvet_island", - "botswana", - "belarus", - "belize", - "canada", - "cocos_islands", - "congo_kinshasa", - "central_african_republic", - "congo_brazzaville", - "switzerland", - "cote_divoire", - "cook_islands", - "chile", - "cameroon", - "cn", - "colombia", - "clipperton_island", - "costa_rica", - "cuba", - "cape_verde", - "curacao", - "christmas_island", - "cyprus", - "czech_republic", - "de", - "diego_garcia", - "djibouti", - "denmark", - "dominica", - "dominican_republic", - "algeria", - "ceuta_melilla", - "ecuador", - "estonia", - "egypt", - "western_sahara", - "eritrea", - "es", - "ethiopia", - "eu", - "european_union", - "finland", - "fiji", - "falkland_islands", - "micronesia", - "faroe_islands", - "fr", - "gabon", - "gb", - "uk", - "grenada", - "georgia", - "french_guiana", - "guernsey", - "ghana", - "gibraltar", - "greenland", - "gambia", - "guinea", - "guadeloupe", - "equatorial_guinea", - "greece", - "south_georgia_south_sandwich_islands", - "guatemala", - "guam", - "guinea_bissau", - "guyana", - "hong_kong", - "heard_mcdonald_islands", - "honduras", - "croatia", - "haiti", - "hungary", - "canary_islands", - "indonesia", - "ireland", - "israel", - "isle_of_man", - "india", - "british_indian_ocean_territory", - "iraq", - "iran", - "iceland", - "it", - "jersey", - "jamaica", - "jordan", - "jp", - "kenya", - "kyrgyzstan", - "cambodia", - "kiribati", - "comoros", - "st_kitts_nevis", - "north_korea", - "kr", - "kuwait", - "cayman_islands", - "kazakhstan", - "laos", - "lebanon", - "st_lucia", - "liechtenstein", - "sri_lanka", - "liberia", - "lesotho", - "lithuania", - "luxembourg", - "latvia", - "libya", - "morocco", - "monaco", - "moldova", - "montenegro", - "st_martin", - "madagascar", - "marshall_islands", - "macedonia", - "mali", - "myanmar", - "mongolia", - "macau", - "northern_mariana_islands", - "martinique", - "mauritania", - "montserrat", - "malta", - "mauritius", - "maldives", - "malawi", - "mexico", - "malaysia", - "mozambique", - "namibia", - "new_caledonia", - "niger", - "norfolk_island", - "nigeria", - "nicaragua", - "netherlands", - "norway", - "nepal", - "nauru", - "niue", - "new_zealand", - "oman", - "panama", - "peru", - "french_polynesia", - "papua_new_guinea", - "philippines", - "pakistan", - "poland", - "st_pierre_miquelon", - "pitcairn_islands", - "puerto_rico", - "palestinian_territories", - "portugal", - "palau", - "paraguay", - "qatar", - "reunion", - "romania", - "serbia", - "ru", - "rwanda", - "saudi_arabia", - "solomon_islands", - "seychelles", - "sudan", - "sweden", - "singapore", - "st_helena", - "slovenia", - "svalbard_jan_mayen", - "slovakia", - "sierra_leone", - "san_marino", - "senegal", - "somalia", - "suriname", - "south_sudan", - "sao_tome_principe", - "el_salvador", - "sint_maarten", - "syria", - "swaziland", - "tristan_da_cunha", - "turks_caicos_islands", - "chad", - "french_southern_territories", - "togo", - "thailand", - "tajikistan", - "tokelau", - "timor_leste", - "turkmenistan", - "tunisia", - "tonga", - "tr", - "trinidad_tobago", - "tuvalu", - "taiwan", - "tanzania", - "ukraine", - "uganda", - "us_outlying_islands", - "united_nations", - "us", - "uruguay", - "uzbekistan", - "vatican_city", - "st_vincent_grenadines", - "venezuela", - "british_virgin_islands", - "us_virgin_islands", - "vietnam", - "vanuatu", - "wallis_futuna", - "samoa", - "kosovo", - "yemen", - "mayotte", - "south_africa", - "zambia", - "zimbabwe", - "england", - "scotland", - "wales", + // Travel scenes & attractions + "fountain", "tent", "foggy", "night_with_stars", "cityscape", + "sunrise_over_mountains", "sunrise", "city_sunset", "city_sunrise", "bridge_at_night", + "hotsprings", "carousel_horse", "ferris_wheel", "roller_coaster", "barber", "circus_tent", + // Rail transport + "steam_locomotive", "railway_car", "bullettrain_side", "bullettrain_front", "train2", + "metro", "light_rail", "station", "tram", "monorail", + "mountain_railway", "train", + // Road vehicles + "bus", "oncoming_bus", "trolleybus", "minibus", "ambulance", + "fire_engine", "police_car", "oncoming_police_car", "taxi", "oncoming_taxi", + "car", "red_car", "oncoming_automobile", "blue_car", "pickup_truck", + "truck", "articulated_lorry", "tractor", "racing_car", "motorcycle", "motor_scooter", + // Personal mobility + "manual_wheelchair", "motorized_wheelchair", "auto_rickshaw", "bike", "kick_scooter", + "skateboard", "roller_skate", + // Transport infrastructure + "busstop", "motorway", "railway_track", "oil_drum", "fuelpump", + "rotating_light", "traffic_light", "vertical_traffic_light", "stop_sign", "construction", + // Water transport + "anchor", "boat", "sailboat", "canoe", "speedboat", + "passenger_ship", "ferry", "motor_boat", "ship", + // Air and space travel + "airplane", "small_airplane", "flight_departure", "flight_arrival", "parachute", + "seat", "helicopter", "suspension_railway", "mountain_cableway", "aerial_tramway", + "artificial_satellite", "rocket", "flying_saucer", + // Hotel and timepieces + "bellhop_bell", "luggage", "hourglass", "hourglass_flowing_sand", "watch", + "alarm_clock", "stopwatch", "timer_clock", "mantelpiece_clock", "clock130", + // Moon phases + "new_moon", "waxing_crescent_moon", "first_quarter_moon", "moon", "waxing_gibbous_moon", + "full_moon", "waning_gibbous_moon", "last_quarter_moon", "waning_crescent_moon", "crescent_moon", + "new_moon_with_face", "first_quarter_moon_with_face", "last_quarter_moon_with_face", + // Weather and celestial + "thermometer", "sunny", "full_moon_with_face", "sun_with_face", "ringed_planet", + "star", "star2", "stars", "milky_way", "cloud", + "partly_sunny", "cloud_with_lightning_and_rain", "sun_behind_small_cloud", "sun_behind_large_cloud", "sun_behind_rain_cloud", + "cloud_with_rain", "cloud_with_snow", "cloud_with_lightning", "tornado", "fog", + "wind_face", "cyclone", "rainbow", "closed_umbrella", "open_umbrella", + "umbrella", "parasol_on_ground", "zap", "snowflake", "snowman_with_snow", + "snowman", "comet", "fire", "droplet", "ocean", + // Events and decorations + "jack_o_lantern", "christmas_tree", "fireworks", "sparkler", "firecracker", + "sparkles", "balloon", "tada", "confetti_ball", "tanabata_tree", + "bamboo", "dolls", "flags", "wind_chime", "rice_scene", + "red_envelope", "ribbon", "gift", "reminder_ribbon", "tickets", + "ticket", "medal_military", "trophy", "medal_sports", "1st_place_medal", + "2nd_place_medal", "3rd_place_medal", + // Sports + "soccer", "baseball", "softball", "basketball", "volleyball", + "football", "rugby_football", "tennis", "flying_disc", "bowling", + "cricket_game", "field_hockey", "ice_hockey", "lacrosse", "ping_pong", + "badminton", "boxing_glove", "martial_arts_uniform", "goal_net", "golf", + "ice_skate", "fishing_pole_and_fish", "diving_mask", "running_shirt_with_sash", "ski", + "sled", "curling_stone", "dart", "yo_yo", "kite", + // Games & leisure + "8ball", "crystal_ball", "magic_wand", "nazar_amulet", "video_game", + "joystick", "slot_machine", "game_die", "jigsaw", "teddy_bear", + "pi_ata", "nesting_dolls", + // Cards and board + "spades", "hearts", "diamonds", "clubs", "chess_pawn", + "black_joker", "mahjong", "flower_playing_cards", + // Arts & media + "performing_arts", "framed_picture", "art", + // Clothing & accessories + "thread", "sewing_needle", "yarn", "knot", "eyeglasses", + "dark_sunglasses", "goggles", "lab_coat", "safety_vest", "necktie", + "shirt", "tshirt", "jeans", "scarf", "gloves", + "coat", "socks", "dress", "kimono", "sari", + "one_piece_swimsuit", "swim_brief", "shorts", "bikini", "womans_clothes", + "purse", "handbag", "pouch", "shopping", "school_satchel", + "thong_sandal", "mans_shoe", "shoe", "athletic_shoe", "hiking_boot", + "flat_shoe", "high_heel", "sandal", "ballet_shoes", "boot", + "crown", "womans_hat", "tophat", "mortar_board", "billed_cap", + "military_helmet", "rescue_worker_helmet", "prayer_beads", "lipstick", "ring", "gem", + // Sound & music + "mute", "speaker", "sound", "loud_sound", "loudspeaker", + "mega", "postal_horn", "bell", "no_bell", "musical_score", + "musical_note", "notes", "studio_microphone", "level_slider", "control_knobs", + "microphone", "headphones", "radio", "saxophone", "accordion", + "guitar", "musical_keyboard", "trumpet", "violin", "banjo", + "drum", "long_drum", + // Electronics & media + "iphone", "calling", "phone", "telephone", "pager", + "fax", "battery", "electric_plug", "computer", "desktop_computer", + "printer", "keyboard", "computer_mouse", "trackball", "minidisc", + "floppy_disk", "cd", "dvd", "abacus", "movie_camera", + "film_strip", "film_projector", "clapper", "tv", "camera", + "camera_flash", "video_camera", "vhs", "mag", "mag_right", + // Lighting & stationary + "candle", "bulb", "flashlight", "izakaya_lantern", "lantern", "diya_lamp", + // Stationery and books + "notebook_with_decorative_cover", "closed_book", "book", "notebook", "ledger", + "page_with_curl", "scroll", "page_facing_up", "newspaper", "bookmark", "label", + // Money and office + "moneybag", "coin", "yen", "dollar", "euro", + "pound", "money_with_wings", "credit_card", "receipt", "chart", + // Mail and packages + "email", "envelope", "e-mail", "incoming_envelope", "envelope_with_arrow", + "outbox_tray", "inbox_tray", "package", "mailbox", "postbox", "ballot_box", + // Writing tools + "pencil2", "black_nib", "fountain_pen", "pen", "paintbrush", + "crayon", "memo", "pencil", + // Office items + "briefcase", "file_folder", "open_file_folder", "card_index_dividers", "date", + "calendar", "spiral_notepad", "spiral_calendar", "card_index", "chart_with_upwards_trend", + "chart_with_downwards_trend", "bar_chart", "clipboard", "pushpin", "round_pushpin", + "paperclip", "paperclips", "straight_ruler", "triangular_ruler", "scissors", + "card_file_box", "file_cabinet", "wastebasket", + // Locks and keys + "lock", "unlock", "lock_with_ink_pen", "closed_lock_with_key", "key", "old_key", + // Tools & weapons + "hammer", "axe", "pick", "hammer_and_pick", "hammer_and_wrench", + "dagger", "crossed_swords", "gun", "boomerang", "bow_and_arrow", + "shield", "carpentry_saw", "wrench", "screwdriver", "nut_and_bolt", + "gear", "clamp", "balance_scale", "probing_cane", "link", + "chains", "hook", "toolbox", "magnet", "ladder", + // Science & lab + "alembic", "test_tube", "petri_dish", "dna", "microscope", + "telescope", "satellite", + // Medical + "syringe", "drop_of_blood", "pill", "adhesive_bandage", "stethoscope", + // Household & furniture + "door", "elevator", "mirror", "window", "bed", + "couch_and_lamp", "chair", "toilet", "plunger", "shower", + "bathtub", "mouse_trap", "razor", "lotion_bottle", "safety_pin", + "broom", "basket", "roll_of_paper", "bucket", "soap", + "toothbrush", "sponge", "fire_extinguisher", "shopping_cart", + // Signs & facilities + "smoking", "coffin", "headstone", "funeral_urn", "moyai", + "placard", "atm", "put_litter_in_its_place", "potable_water", "wheelchair", + "mens", "womens", "restroom", "baby_symbol", "passport_control", + "customs", "baggage_claim", "left_luggage", "warning", "children_crossing", + "underage", "radioactive", "biohazard", + // UI symbols + "back", "end", "on", "soon", "top", + // Religion & philosophy + "place_of_worship", "atom_symbol", "wheel_of_dharma", "yin_yang", "star_and_crescent", + "peace_symbol", "menorah", "six_pointed_star", + // Zodiac + "aries", "taurus", "gemini", "cancer", "leo", + "virgo", "libra", "scorpius", "sagittarius", "capricorn", + "aquarius", "pisces", "ophiuchus", + // Playback & device status + "twisted_rightwards_arrows", "repeat", "cinema", "low_brightness", "high_brightness", + "signal_strength", "vibration_mode", "mobile_phone_off", "infinity", "bangbang", "interrobang", }, "tag": { - "smile", - "happy", - "joy", - "haha", - "laugh", - "pleased", - "hot", - "lol", - "laughing", - "tears", - "flirt", - "proud", - "angel", - "love", - "crush", - "eyes", - "blush", - "tongue", - "lick", - "prank", - "silly", - "goofy", - "wacky", - "rich", - "quiet", - "whoops", - "silence", - "hush", - "suspicious", - "meh", - "mute", - "smug", - "liar", - "whew", - "tired", - "zzz", - "sick", - "ill", - "hurt", - "barf", - "disgusted", + // Emotions - positive and laughter + "smile", "happy", "joy", "haha", "laugh", + "pleased", "hot", "lol", "laughing", "tears", + "flirt", "proud", "angel", "love", "crush", + "eyes", "blush", "tongue", "lick", "prank", + "silly", "goofy", "wacky", + // Emotions - misc states + "rich", "quiet", "whoops", "silence", "hush", + "suspicious", "meh", "mute", "smug", "liar", + "whew", "tired", "zzz", + // Health - illness and discomfort + "sick", "ill", "hurt", "barf", "disgusted", "achoo", - "heat", - "sweating", - "freezing", - "ice", - "groggy", - "mind", - "blown", - "celebration", - "birthday", - "cool", - "geek", - "glasses", - "nervous", - "surprise", - "impressed", - "wow", - "speechless", - "amazed", - "gasp", - "puppy", - "stunned", - "scared", - "shocked", - "oops", - "phew", - "sweat", - "sad", - "tear", - "cry", - "bawling", - "horror", - "struggling", - "upset", - "whine", - "angry", - "mad", - "annoyed", - "foul", - "devil", - "evil", - "horns", - "dead", - "danger", - "poison", - "pirate", - "crap", - "monster", - "halloween", - "ufo", - "game", - "retro", - "monkey", - "blind", - "ignore", - "deaf", - "lipstick", - "email", - "envelope", - "heart", - "chocolates", - "score", - "perfect", - "explode", - "star", - "water", - "workout", - "wind", - "blow", - "fast", - "boom", - "comment", - "thinking", - "sleeping", - "goodbye", - "highfive", - "stop", - "prosper", - "spock", - "victory", - "peace", - "luck", - "hopeful", - "approve", - "ok", - "disapprove", - "bury", - "power", - "attack", - "praise", - "applause", - "hooray", - "deal", - "please", - "hope", - "wish", - "beauty", - "manicure", - "flex", - "bicep", - "strong", - "hear", - "sound", - "listen", - "smell", - "look", - "see", - "watch", - "taste", - "kiss", - "child", - "newborn", - "mustache", - "father", - "dad", - "girls", - "halt", - "denied", - "information", - "respect", - "thanks", - "doctor", - "nurse", - "graduation", - "school", - "professor", - "justice", - "chef", - "business", - "research", - "coder", - "rockstar", - "painter", - "space", - "law", - "cop", - "sleuth", - "helmet", - "crown", - "royal", - "hijab", - "groom", - "marriage", - "wedding", - "nursing", - "christmas", - "santa", - "wizard", - "spa", - "exercise", - "marathon", - "dress", - "dancer", - "bunny", - "steamy", - "bouldering", - "basketball", - "gym", - "meditation", - "shower", - "couple", - "date", - "home", - "parents", - "user", - "users", - "group", - "team", - "feet", - "tracks", - "pet", - "dog", - "speed", - "desert", - "thanksgiving", - "slow", - "dinosaur", - "sea", - "beach", - "bug", - "germ", - "flowers", - "flower", - "spring", - "plant", - "wood", - "canada", - "autumn", - "leaf", - "fruit", - "aubergine", - "spicy", - "toast", - "meat", - "chicken", - "burger", - "breakfast", - "paella", - "curry", - "noodle", - "pasta", - "tempura", - "party", - "dessert", - "sweet", - "milk", - "cafe", + // Temperature and exclamations + "heat", "sweating", "freezing", "ice", "groggy", + "mind", "blown", + // Reactions & surprise + "celebration", "birthday", "cool", "geek", "glasses", + "nervous", "surprise", "impressed", "wow", "speechless", + "amazed", "gasp", + // Reactions - fear/shock/sadness + "puppy", "stunned", "scared", "shocked", "oops", + "phew", "sweat", "sad", "tear", "cry", + "bawling", "horror", "struggling", "upset", "whine", + // Negative emotions & danger + "angry", "mad", "annoyed", "foul", "devil", + "evil", "horns", "dead", "danger", "poison", + // Characters & fun + "pirate", "crap", "monster", "halloween", "ufo", + // Misc reactions & concepts + "game", "retro", "monkey", "blind", "ignore", + "deaf", "lipstick", "email", "envelope", "heart", + "chocolates", "score", "perfect", "explode", "star", + "water", "workout", "wind", "blow", "fast", + "boom", "comment", "thinking", "sleeping", "goodbye", + "highfive", "stop", "prosper", "spock", "victory", + "peace", "luck", "hopeful", "approve", "ok", + "disapprove", "bury", "power", "attack", "praise", + "applause", "hooray", "deal", "please", "hope", + "wish", "beauty", "manicure", "flex", "bicep", + "strong", "hear", "sound", "listen", "smell", + "look", "see", "watch", "taste", "kiss", + "child", "newborn", "mustache", "father", "dad", + "girls", "halt", "denied", "information", "respect", + "thanks", "doctor", "nurse", "graduation", "school", + "professor", "justice", "chef", "business", "research", + "coder", "rockstar", "painter", "space", "law", + "cop", "sleuth", "helmet", "crown", "royal", + "hijab", "groom", "marriage", "wedding", "nursing", + "christmas", "santa", "wizard", "spa", "exercise", + "marathon", "dress", "dancer", "bunny", "steamy", + "bouldering", "basketball", "gym", "meditation", "shower", + "couple", "date", "home", "parents", "user", + "users", "group", "team", "feet", "tracks", + "pet", "dog", + // Nature & food + "wood", "canada", "autumn", "leaf", "fruit", + "aubergine", "spicy", "toast", "meat", "chicken", + "burger", "breakfast", "paella", "curry", "noodle", + "pasta", "tempura", + "flowers", "flower", "spring", "plant", "bug", + "germ", "dinosaur", + // Drinks & dining + "party", "dessert", "sweet", "milk", "cafe", "espresso", - "green", - "bottle", - "bubbly", - "drink", - "summer", - "vacation", - "drinks", - "cheers", - "whisky", - "dining", - "dinner", - "cutlery", - "cut", - "chop", - "globe", - "world", - "international", - "global", - "travel", - "camping", - "karl", - "skyline", - "train", - "bicycle", - "911", - "emergency", - "semaphore", - "wip", - "ship", - "cruise", - "flight", - "orbit", - "launch", - "time", - "morning", - "night", - "weather", - "cloud", - "swirl", - "rain", - "beach_umbrella", - "lightning", - "thunder", - "winter", - "cold", - "burn", - "festival", - "shiny", - "present", - "award", - "contest", - "winner", - "gold", - "silver", - "bronze", - "sports", - "skating", - "target", - "pool", - "billiards", - "fortune", - "play", - "controller", - "console", - "dice", - "gambling", - "theater", - "drama", - "design", - "paint", - "shirt", - "formal", - "pants", - "bag", - "bags", - "sneaker", - "sport", - "running", - "shoe", - "king", - "queen", - "hat", - "classy", - "education", - "college", - "university", - "makeup", - "engaged", + "green", "bottle", "bubbly", "drink", "summer", + "vacation", "drinks", "cheers", "whisky", "dining", + "dinner", "cutlery", "cut", "chop", + // Travel & world + "globe", "world", "international", "global", "travel", + "camping", "karl", "skyline", "train", "bicycle", + "911", "emergency", "semaphore", "wip", "ship", + "cruise", "flight", "orbit", "launch", "time", + "morning", "night", "weather", "cloud", "swirl", + "rain", "beach_umbrella", "lightning", "thunder", "winter", + "cold", "burn", "desert", "sea", "beach", + // Events & awards + "festival", "shiny", "present", "award", "contest", + "winner", "gold", "silver", "bronze", "thanksgiving", + // Sports & arts + "sports", "skating", "target", "pool", "billiards", + "fortune", "play", "controller", "console", "dice", + "gambling", "theater", "drama", "design", "paint", + // Apparel & style + "shirt", "formal", "pants", "bag", "bags", + "sneaker", "sport", "running", "shoe", "king", + "queen", "hat", "classy", + // Education & occasions + "education", "college", "university", "makeup", "engaged", "diamond", - "volume", - "announcement", - "notification", - "off", - "music", - "podcast", - "sing", - "earphones", - "rock", - "piano", - "smartphone", - "mobile", - "call", - "incoming", - "phone", - "desktop", - "screen", - "save", - "film", - "video", - "photo", - "search", - "zoom", - "idea", - "light", - "library", - "document", - "press", - "tag", - "dollar", - "cream", - "money", - "subscription", - "letter", - "shipping", - "note", - "directory", - "calendar", - "schedule", - "graph", - "metrics", - "stats", - "location", - "trash", - "security", - "private", - "lock", + // Audio & music + "volume", "announcement", "notification", "off", "music", + "podcast", "sing", "earphones", "rock", "piano", + // Devices, media & ideas + "smartphone", "mobile", "call", "incoming", "phone", + "desktop", "screen", "save", "film", "video", + "photo", "search", "zoom", "idea", "light", + "library", "document", "press", "tag", "dollar", + // Finance & logistics + "cream", "money", "subscription", "letter", "shipping", + "note", "directory", "calendar", "schedule", "graph", + "metrics", "stats", + // Location & security + "location", "trash", "security", "private", "lock", "password", - "tool", - "shoot", - "weapon", - "archery", - "science", - "laboratory", - "investigate", - "signal", - "health", - "hospital", - "needle", - "medicine", - "wc", - "bath", - "toilet", - "cigarette", - "funeral", - "stone", - "accessibility", - "restroom", - "airport", - "limit", - "block", - "forbidden", - "return", - "sync", - "shuffle", - "loop", - "movie", - "wifi", - "confused", - "bang", - "environment", - "trademark", - "number", - "letters", - "numbers", - "alphabet", - "fresh", - "yes", - "help", - "milestone", - "finish", - "pride", - "keeling", - "ivory", - "china", - "flag", - "germany", - "spain", - "france", - "french", - "british", - "italy", - "japan", - "korea", - "burma", - "russia", - "turkey", - "united", - "america", + // Tools & actions + "tool", "shoot", "weapon", "archery", "science", + "laboratory", "investigate", "signal", "health", "hospital", + "needle", "medicine", + // Facilities & warnings + "wc", "bath", "toilet", "cigarette", "funeral", + "stone", "accessibility", "restroom", "airport", "limit", + "block", "forbidden", "return", "sync", "shuffle", + "loop", "movie", "wifi", "confused", "bang", + // Misc tail + "environment", "trademark", "number", "letters", "numbers", + "alphabet", "fresh", "help", "milestone", "finish", + "keeling", "ivory", + }, + "sentence": { + // Micro / reactions (emoji varies in position) + "Nice {emoji}!", + "Mood {emoji}.", + "Same {emoji}.", + "Oof {emoji}.", + "Yikes {emoji}.", + "Winning! {emoji}", + "Noted {emoji}.", + "Done {emoji}.", + "Verified {emoji}.", + "Approved {emoji}.", + + // Short combos (start/middle/end mixed) + "Good morning {emoji}.", + "{emoji} Good night.", + "Let’s go {emoji}!", + "Nailed it {emoji}{emoji}.", + "Ship it {emoji}.", + "One more? {emoji}", + "Be right back {emoji}.", + "On my way {emoji}.", + "Almost there… {emoji}", + "Thank you! {emoji}", + + // Sequences with light prose + "{emoji} Big win today.", + "Coffee first {emoji} {emoji}.", + "Focus mode {emoji}.", + "Teamwork! {emoji} {emoji}", + "Weekend vibes {emoji}.", + "Deep work now {emoji}.", + "Daily standup time {emoji}.", + "Heads down {emoji}.", + "Let’s eat {emoji}.", + "Back online {emoji}.", + + // Declarative statements + "Progress looks {adjectivedescriptive} {emoji}.", + "Shipping the {nouncommon} today {emoji}.", + "Great energy in {city}! {emoji}", + "The {nouncommon} feels {adjectivedescriptive} {emoji}.", + "Small steps, steady pace {emoji}.", + "Clean code, clear minds {emoji}.", + "New {productfeature} behind a flag {emoji}.", + "Reduced noise, increased signal {emoji}.", + "Fewer bugs, happier users {emoji}.", + "Results landed in the dashboard {emoji}.", + + // Imperatives + "Keep it simple {emoji}.", + "Please review the PR {emoji}.", + "Take a quick break {emoji}.", + "Document the change {emoji}.", + "Test before you ship {emoji}.", + "Name things clearly {emoji}.", + "Measure what matters {emoji}.", + "Mind the latency budget {emoji}.", + "Protect the happy path {emoji}.", + "Celebrate small wins! {emoji}", + + // Questions + "Ready to roll {emoji}?", + "Any blockers {emoji}?", + "Does this scale {emoji}?", + "Who owns this {emoji}?", + "What’s next {emoji}?", + "Can we simplify {emoji}?", + "Is this necessary {emoji}?", + "What did we learn {emoji}?", + "Are we aligned {emoji}?", + "Time for lunch {emoji}?", + + // Status / standup style + "Today: {verbaction} the {nouncommon}, then review {emoji}.", + "Blocked by {nouncommon}; asking for help {emoji}.", + "Done: tests, docs, and cleanup {emoji}.", + "Next: rollout to {company} {emoji}.", + "Risks: unknown {nouncommon} under load {emoji}.", + "Win: latency down {number:5,60}% {emoji}.", + "FYI: feature flag toggled in {city} {emoji}.", + "Heads-up: deploy at {time} {emoji}.", + "Reminder: write the changelog {emoji}.", + "Note: {nouncommon} is now optional {emoji}.", + + // Food / break + "Coffee break! {emoji}", + "Hydrate and stretch {emoji}.", + "Quick snack, back soon {emoji}.", + "Lunch run—brb {emoji}.", + "Treat yourself today {emoji}.", + "Tea time, then tasks {emoji}.", + "Dinner after the deploy {emoji}.", + "Dessert to celebrate! {emoji}", + "Refuel and refocus {emoji}.", + "Brunch plans confirmed {emoji}.", + + // Fitness / wellness + "Breathe in, breathe out {emoji}.", + "Walk and think {emoji}.", + "Posture check {emoji}!", + "Micro break—eyes off screen {emoji}.", + "Stand, stretch, reset {emoji}.", + "Quick workout complete {emoji}.", + "Calm mind, sharp code {emoji}.", + "Water break—now {emoji}.", + "Sleep early tonight {emoji}.", + "Wellness first, always {emoji}.", + + // Celebration / gratitude + "Thank you, team! {emoji}", + "Amazing work today {emoji}!", + "Proud of this release {emoji}.", + "You crushed it {emoji}!", + "Confetti for the crew! {emoji}{emoji}", + "Another milestone reached {emoji}.", + "Great feedback from users {emoji}.", + "High-five across time zones {emoji}!", + "Shipped and shining {emoji}.", + "Champagne later {emoji}?", + + // Caution / alerts + "Heads-up: incidents possible {emoji}.", + "Careful with that change {emoji}.", + "Review the diff twice {emoji}.", + "Watch your step here {emoji}.", + "Rate limit enabled {emoji}.", + "Safeguards in place {emoji}.", + "Proceed with caution {emoji}.", + "Rollback plan ready {emoji}.", + "Pager is quiet—for now {emoji}.", + "Triage starts now {emoji}.", + + // Travel / logistics + "Boarding soon {emoji}.", + "Wheels up {emoji}.", + "Landing shortly {emoji}.", + "Gate changed—move! {emoji}", + "Bags on carousel {number:1,12} {emoji}.", + "Seat {number:3,28}{letter} {emoji}.", + "Terminal {number:1,8}, group {number:1,5} {emoji}.", + "Delayed, will update {emoji}.", + "Taxi to the hotel {emoji}.", + "Home safe {emoji}.", + + // Tiny stories (emoji sprinkled) + "Started small, grew fast {emoji}.", + "Found the bug, fixed it {emoji}.", + "Drew the map, took the path {emoji}.", + "Built, tested, shipped {emoji}.", + "Asked, listened, learned {emoji}.", + "Slowed down to speed up {emoji}.", + "Cut scope, kept quality {emoji}.", + "Weathered the storm {emoji}{emoji}.", + "Saved the day {emoji}.", + "Onward {emoji}.", + + // Labels / tags with emoji anchors + "Priority: High {emoji}.", + "Status: In progress {emoji}.", + "Owner: {firstname} {lastname} {emoji}.", + "Location: {city}, {country} {emoji}.", + "Category: {emojicategory} {emoji}.", + "Tag: #{emojitag} {emoji}.", + "Version: {appversion} {emoji}.", + "Channel: {company} {emoji}.", + "Ref: {uuid} {emoji}.", + "Due: {date} {emoji}.", + + // Mixed fun + "Plot twist: it worked {emoji}.", + "Calm is a superpower {emoji}.", + "Curiosity unlocked {emoji}.", + "Details matter {emoji}.", + "Less, but better {emoji}.", + "Momentum beats motivation {emoji}.", + "Consistency compounds {emoji}.", + "Kindness scales {emoji}.", + "Taste takes time {emoji}.", + "Simplicity wins {emoji}.", }, } diff --git a/vendor/github.com/brianvoe/gofakeit/v7/data/hipster.go b/vendor/github.com/brianvoe/gofakeit/v7/data/hipster.go index f036f463..8f913ec5 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/data/hipster.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/data/hipster.go @@ -3,4 +3,117 @@ package data // Hipster consists of random hipster words var Hipster = map[string][]string{ "word": {"Wes Anderson", "chicharrones", "narwhal", "food truck", "marfa", "aesthetic", "keytar", "art party", "sustainable", "forage", "mlkshk", "gentrify", "locavore", "swag", "hoodie", "microdosing", "VHS", "before they sold out", "pabst", "plaid", "Thundercats", "freegan", "scenester", "hella", "occupy", "truffaut", "raw denim", "beard", "post-ironic", "photo booth", "twee", "90's", "pitchfork", "cray", "cornhole", "kale chips", "pour-over", "yr", "five dollar toast", "kombucha", "you probably haven't heard of them", "mustache", "fixie", "try-hard", "franzen", "kitsch", "austin", "stumptown", "keffiyeh", "whatever", "tumblr", "DIY", "shoreditch", "biodiesel", "vegan", "pop-up", "banjo", "kogi", "cold-pressed", "letterpress", "chambray", "butcher", "synth", "trust fund", "hammock", "farm-to-table", "intelligentsia", "loko", "ugh", "offal", "poutine", "gastropub", "Godard", "jean shorts", "sriracha", "dreamcatcher", "leggings", "fashion axe", "church-key", "meggings", "tote bag", "disrupt", "readymade", "helvetica", "flannel", "meh", "roof", "hashtag", "knausgaard", "cronut", "schlitz", "green juice", "waistcoat", "normcore", "viral", "ethical", "actually", "fingerstache", "humblebrag", "deep v", "wayfarers", "tacos", "taxidermy", "selvage", "put a bird on it", "ramps", "portland", "retro", "kickstarter", "bushwick", "brunch", "distillery", "migas", "flexitarian", "XOXO", "small batch", "messenger bag", "heirloom", "tofu", "bicycle rights", "bespoke", "salvia", "wolf", "selfies", "echo", "park", "listicle", "craft beer", "chartreuse", "sartorial", "pinterest", "mumblecore", "kinfolk", "vinyl", "etsy", "umami", "8-bit", "polaroid", "banh mi", "crucifix", "bitters", "brooklyn", "PBR&B", "drinking", "vinegar", "squid", "tattooed", "skateboard", "vice", "authentic", "literally", "lomo", "celiac", "health", "goth", "artisan", "chillwave", "blue bottle", "pickled", "next level", "neutra", "organic", "Yuccie", "paleo", "blog", "single-origin coffee", "seitan", "street", "gluten-free", "mixtape", "venmo", "irony", "everyday", "carry", "slow-carb", "3 wolf moon", "direct trade", "lo-fi", "tousled", "tilde", "semiotics", "cred", "chia", "master", "cleanse", "ennui", "quinoa", "pug", "iPhone", "fanny pack", "cliche", "cardigan", "asymmetrical", "meditation", "YOLO", "typewriter", "pork belly", "shabby chic", "+1", "lumbersexual", "williamsburg"}, + "sentence": { + // ultra-short / taglines + "{hipsterword} vibes only.", + "deeply {hipsterword}.", + "casually {hipsterword}.", + "endlessly {hipsterword}.", + "pure {hipsterword} energy.", + "very {hipsterword}, very {hipsterword}.", + "{hipsterword} meets {hipsterword}.", + "low-fi {hipsterword}.", + "post-{hipsterword} mood.", + "tastefully {hipsterword}.", + + // one-liners + "living that {hipsterword} life.", + "another day, another {hipsterword}.", + "strictly {hipsterword} palettes.", + "hand-picked {hipsterword}.", + "soft {hipsterword} glow in {city}.", + "from {city} with {hipsterword}.", + "born in {city}, raised on {hipsterword}.", + "powered by {hipsterword} and {hipsterword}.", + "mildly obsessed with {hipsterword}.", + "just add {hipsterword}.", + + // comparisons / mashups + "{hipsterword} meets {hipsterword} in {city}.", + "{hipsterword}, but make it {hipsterword}.", + "{hipsterword} × {hipsterword}, minimal edition.", + "{hipsterword} on the outside, {hipsterword} at heart.", + "somewhere between {hipsterword} and {hipsterword}.", + "{hipsterword} layered over {hipsterword}.", + "{hipsterword} with a hint of {hipsterword}.", + "heavy on {hipsterword}, light on {hipsterword}.", + "{hipsterword} > {hipsterword}, discuss.", + + // scene setters + "weekends are for {hipsterword} and {beerstyle}.", + "late nights, {hipsterword} playlists, {songgenre} loops.", + "mornings in {city}, afternoons in {hipsterword}.", + "slow afternoons, {hipsterword} everything.", + "back alley {hipsterword}, side-door {hipsterword}.", + "under neon {color}, pure {hipsterword}.", + "rooftop in {city}, whispering {hipsterword}.", + "{hipsterword} at golden hour in {city}.", + "between {hipsterword} stalls and {hipsterword} pop-ups.", + + // lifestyle / verbs + "{verb} through {hipsterword} alleys.", + "{verb} {noun} with {hipsterword} flair.", + "{verb} the {noun}, keep it {hipsterword}.", + "{verb} slowly; trust the {hipsterword}.", + "{verb} toward {hipsterword} minimalism.", + "{verb} and {verb}, always {hipsterword}.", + "let it be {hipsterword}, let it be {adjective}.", + + // craft / food / drink + "small-batch {hipsterword} in {city}.", + "single-origin {hipsterword}, double {adjective}.", + "farm-to-table {hipsterword} and {noun}.", + "house {hipsterword} with a {adjective} finish.", + "cold {hipsterword}, warm {hipsterword}.", + "locally sourced {hipsterword}, globally {adjective}.", + "{hipsterword} tasting notes: {adjective}, {adjective}, {adjective}.", + "pairing {hipsterword} with {beerstyle}.", + + // fashion / objects + "{color} threads, {hipsterword} cuts.", + "{hipsterword} layers over {adjective} basics.", + "{hipsterword} silhouettes, {adjective} textures.", + "{hipsterword} frames and {color} accents.", + "{hipsterword} tote with {noun} inside.", + "imperfect by design, perfectly {hipsterword}.", + + // travel / place + "lost in {city}, found in {hipsterword}.", + "passport full of {hipsterword}.", + "{hipsterword} maps and {language} menus.", + "backstreets of {city}, front row {hipsterword}.", + "from {country} to {city}, chasing {hipsterword}.", + "tiny studio in {city}, big {hipsterword} dreams.", + + // meta / attitude + "ironically {hipsterword}, sincerely {adjective}.", + "intentionally {hipsterword}.", + "unapologetically {hipsterword}.", + "quietly {hipsterword} since {number:2001,2020}.", + "probably too {hipsterword} for this.", + "you probably haven’t tried this {hipsterword}.", + "subtly {hipsterword}, never loud.", + + // maker / work + "built with {hipsterword} and {hobby}.", + "shipping {hipsterword} from {city}.", + "{programminglanguage} by day, {hipsterword} by night.", + "{company} meets {hipsterword} ethos.", + "{productcategory} with {hipsterword} edges.", + "designing around {hipsterword} constraints.", + + // music / film / culture + "{songgenre} loops with {hipsterword} undertones.", + "cinema nights, strictly {hipsterword}.", + "{hipsterword} soundtrack on repeat.", + "scored in {hipsterword}, mixed {adverb}.", + "director’s cut: more {hipsterword}.", + + // social / shareable + "tag it {hipsterword}, keep it {adjective}.", + "overheard in {city}: very {hipsterword}.", + "sent from a {hipsterword} corner of {city}.", + "friends don’t let friends skip {hipsterword}.", + "if you know, it’s {hipsterword}.", + }, } diff --git a/vendor/github.com/brianvoe/gofakeit/v7/data/internet.go b/vendor/github.com/brianvoe/gofakeit/v7/data/internet.go index ae7561af..d546d6d4 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/data/internet.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/data/internet.go @@ -8,4 +8,5 @@ var Internet = map[string][]string{ "http_version": {"HTTP/1.0", "HTTP/1.1", "HTTP/2.0"}, "http_status_simple": {"200", "301", "302", "400", "404", "500"}, "http_status_general": {"100", "200", "201", "203", "204", "205", "301", "302", "304", "400", "401", "403", "404", "405", "406", "416", "500", "501", "502", "503", "504"}, + "api": {"curl/#.#.#", "python-requests/#.#.#", "PostmanRuntime/#.#.#", "Go-http-client/#.#", "Java/#.#.#_###", "node-fetch/#.#.#", "okhttp/#.#.#", "axios/#.#.#", "rest-client/#.#.#", "httpie/#.#.#", "wget/#.#.#", "apache-httpclient/#.#.##", "ruby/#.#.#", "php/#.#.#", "swift/#.#.#", "dotnet/#.#.#", "insomnia/####.#.#", "httpie-go/#.#.#", "httpx/#.##.#", "urllib3/#.##.##"}, } diff --git a/vendor/github.com/brianvoe/gofakeit/v7/data/isbn.go b/vendor/github.com/brianvoe/gofakeit/v7/data/isbn.go new file mode 100644 index 00000000..ba9fd3ee --- /dev/null +++ b/vendor/github.com/brianvoe/gofakeit/v7/data/isbn.go @@ -0,0 +1,58 @@ +package data + +// Prefixes for ISBN standards +const ( + ISBN13Prefix = "978" + ISBN10Prefix = "979" +) + +// ISBNRule defines a registrant rule range and its length +type ISBNRule struct { + Min string + Max string + Length int +} + +// ISBNRules maps prefix -> registration group -> registrant rules +var ISBNRules = map[string]map[string][]ISBNRule{ + ISBN13Prefix: { + "0": { + {Min: "0000000", Max: "1999999", Length: 2}, + {Min: "2000000", Max: "2279999", Length: 3}, + {Min: "2280000", Max: "2289999", Length: 4}, + {Min: "2290000", Max: "6479999", Length: 3}, + {Min: "6480000", Max: "6489999", Length: 7}, + {Min: "6490000", Max: "6999999", Length: 3}, + {Min: "7000000", Max: "8499999", Length: 4}, + {Min: "8500000", Max: "8999999", Length: 5}, + {Min: "9000000", Max: "9499999", Length: 6}, + {Min: "9500000", Max: "9999999", Length: 7}, + }, + "1": { + {Min: "0000000", Max: "0999999", Length: 2}, + {Min: "1000000", Max: "3999999", Length: 3}, + {Min: "4000000", Max: "5499999", Length: 4}, + {Min: "5500000", Max: "7319999", Length: 5}, + {Min: "7320000", Max: "7399999", Length: 7}, + {Min: "7400000", Max: "8697999", Length: 5}, + {Min: "8698000", Max: "9729999", Length: 6}, + {Min: "9730000", Max: "9877999", Length: 4}, + {Min: "9878000", Max: "9989999", Length: 6}, + {Min: "9990000", Max: "9999999", Length: 7}, + }, + }, + ISBN10Prefix: { + "8": { + {Min: "0000000", Max: "1999999", Length: 2}, + {Min: "2000000", Max: "2279999", Length: 3}, + {Min: "2280000", Max: "2289999", Length: 4}, + {Min: "2290000", Max: "6479999", Length: 3}, + {Min: "6480000", Max: "6489999", Length: 7}, + {Min: "6490000", Max: "6999999", Length: 3}, + {Min: "7000000", Max: "8499999", Length: 4}, + {Min: "8500000", Max: "8999999", Length: 5}, + {Min: "9000000", Max: "9499999", Length: 6}, + {Min: "9500000", Max: "9999999", Length: 7}, + }, + }, +} diff --git a/vendor/github.com/brianvoe/gofakeit/v7/data/job.go b/vendor/github.com/brianvoe/gofakeit/v7/data/job.go index 905dd74e..726f1df2 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/data/job.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/data/job.go @@ -2,7 +2,66 @@ package data // Job consists of job data var Job = map[string][]string{ - "title": {"Administrator", "Agent", "Analyst", "Architect", "Assistant", "Associate", "Consultant", "Coordinator", "Designer", "Developer", "Director", "Engineer", "Executive", "Facilitator", "Liaison", "Manager", "Officer", "Orchestrator", "Planner", "Producer", "Representative", "Specialist", "Strategist", "Supervisor", "Technician"}, - "descriptor": {"Central", "Chief", "Corporate", "Customer", "Direct", "District", "Dynamic", "Dynamic", "Forward", "Future", "Global", "Human", "Internal", "International", "Investor", "Lead", "Legacy", "National", "Principal", "Product", "Regional", "Senior"}, - "level": {"Accountability", "Accounts", "Applications", "Assurance", "Brand", "Branding", "Communications", "Configuration", "Creative", "Data", "Directives", "Division", "Factors", "Functionality", "Group", "Identity", "Implementation", "Infrastructure", "Integration", "Interactions", "Intranet", "Marketing", "Markets", "Metrics", "Mobility", "Operations", "Optimization", "Paradigm", "Program", "Quality", "Research", "Response", "Security", "Solutions", "Tactics", "Usability", "Web"}, + "title": { + "Accountant", "Actor", "Acrobat", "Administrative Assistant", "Administrator", + "Animator", "Analyst", "Anesthesiologist", "Archaeologist", "Architect", + "Artist", "Astronomer", "Athletic Trainer", "Attorney", "Auditor", + "Baker", "Banker", "Barber", "Bartender", "Barista", + "Bellhop", "Biologist", "Blacksmith", "Bookkeeper", "Bouncer", + "Broker", "Bus Driver", "Butcher", "Camera Operator", "Carpenter", + "Cashier", "Chef", "Chemist", "Cleaner", "Coach", + "Comedian", "Computer Programmer", "Concierge", "Construction Worker", "Consultant", + "Contractor", "Cook", "Counselor", "Court Reporter", "Curator", + "Custodian", "Dancer", "Data Scientist", "Delivery Driver", "Dentist", + "Designer", "Detective", "Developer", "Dietitian", "Director", + "DJ", "Doctor", "Dog Walker", "Driver", "Electrician", + "Engineer", "Esthetician", "Executive", "Farmer", "Financial Advisor", + "Financial Planner", "Firefighter", "Fisher", "Flight Attendant", "Florist", + "Garbage Collector", "Gardener", "Geologist", "Graphic Designer", "Groundskeeper", + "Hairdresser", "Handyman", "Hostess", "Hotel Manager", "HVAC Technician", + "Illustrator", "Insurance Agent", "Interpreter", "Janitor", "Journalist", + "Judge", "Landscaper", "Lawyer", "Legal Assistant", "Librarian", + "Lifeguard", "Line Cook", "Loan Officer", "Mail Carrier", "Manager", + "Mason", "Massage Therapist", "Mechanic", "Musician", "Nail Technician", + "Network Administrator", "Nurse", "Nutritionist", "Occupational Therapist", "Optometrist", + "Painter", "Paralegal", "Paramedic", "Pharmacist", "Photographer", + "Physical Therapist", "Physician", "Pilot", "Plumber", "Police Officer", + "Principal", "Professor", "Programmer", "Psychologist", "Radiologist", + "Real Estate Agent", "Receptionist", "Referee", "Reporter", "Researcher", + "Retail Associate", "Roofer", "Sailor", "Sales Representative", "Scientist", + "Scrum Master", "Sculptor", "Secretary", "Security Guard", "Server", + "Singer", "Social Media Influencer", "Social Worker", "Software Engineer", "Sommelier", + "Sound Engineer", "Speech Therapist", "Stage Manager", "Statistician", "Steamer", + "Stock Clerk", "Store Manager", "Storyteller", "Stunt Performer", "Stylist", + "Substitute Teacher", "Surgeon", "Surveyor", "System Administrator", "Systems Analyst", + "Tailor", "Tattoo Artist", "Tax Preparer", "Taxi Driver", "Teacher", + "Technician", "Telemarketer", "Therapist", "Tiler", "Tour Guide", + "Toy Maker", "Traffic Controller", "Train Conductor", "Translator", "Travel Agent", + "Tutor", "Umpire", "Upholsterer", "Usher", "Valet", + "Vendor", "Veterinarian", "Video Editor", "Voice Actor", "Waiter", + "Watchmaker", "Web Developer", "Welder", "Window Washer", "Writer", + "Zookeeper", + }, + "descriptor": { + "Assistant", "Associate", "Brand", "Business", "Central", + "Chief", "Client", "Corporate", "Customer", "Deputy", + "Direct", "District", "Division", "Dynamic", "Executive", + "Finance", "Financial", "Forward", "Future", "Global", + "Group", "Head", "Human", "Internal", "International", + "Investor", "Junior", "Lead", "Legacy", "Main", + "Marketing", "National", "Operations", "Primary", "Principal", + "Product", "Public", "Regional", "Sales", "Senior", + "Staff", "Strategic", "Systems", "Technical", "Technology", + "Vice", + }, + "level": { + "Accountability", "Accounts", "Applications", "Assurance", "Brand", + "Branding", "Communications", "Configuration", "Creative", "Data", + "Directives", "Division", "Factors", "Functionality", "Group", + "Identity", "Implementation", "Infrastructure", "Integration", "Interactions", + "Intranet", "Marketing", "Markets", "Metrics", "Mobility", + "Operations", "Optimization", "Paradigm", "Program", "Quality", + "Research", "Response", "Security", "Solutions", "Tactics", + "Usability", "Web", + }, } diff --git a/vendor/github.com/brianvoe/gofakeit/v7/data/person.go b/vendor/github.com/brianvoe/gofakeit/v7/data/person.go index 8f65a16b..2eb101e8 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/data/person.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/data/person.go @@ -2,11 +2,664 @@ package data // Person consists of a slice of people information var Person = map[string][]string{ - "prefix": {"Mr.", "Mrs.", "Ms.", "Miss", "Dr."}, - "suffix": {"Jr.", "Sr.", "I", "II", "III", "IV", "V", "MD", "DDS", "PhD", "DVM"}, - "first": {"Aaliyah", "Aaron", "Abagail", "Abbey", "Abbie", "Abbigail", "Abby", "Abdiel", "Abdul", "Abdullah", "Abe", "Abel", "Abelardo", "Abigail", "Abigale", "Abigayle", "Abner", "Abraham", "Ada", "Adah", "Adalberto", "Adaline", "Adam", "Adan", "Addie", "Addison", "Adela", "Adelbert", "Adele", "Adelia", "Adeline", "Adell", "Adella", "Adelle", "Aditya", "Adolf", "Adolfo", "Adolph", "Adolphus", "Adonis", "Adrain", "Adrian", "Adriana", "Adrianna", "Adriel", "Adrien", "Adrienne", "Afton", "Aglae", "Agnes", "Agustin", "Agustina", "Ahmad", "Ahmed", "Aida", "Aidan", "Aiden", "Aileen", "Aimee", "Aisha", "Aiyana", "Akeem", "Al", "Alaina", "Alan", "Alana", "Alanis", "Alanna", "Alayna", "Alba", "Albert", "Alberta", "Albertha", "Alberto", "Albin", "Albina", "Alda", "Alden", "Alec", "Aleen", "Alejandra", "Alejandrin", "Alek", "Alena", "Alene", "Alessandra", "Alessandro", "Alessia", "Aletha", "Alex", "Alexa", "Alexander", "Alexandra", "Alexandre", "Alexandrea", "Alexandria", "Alexandrine", "Alexandro", "Alexane", "Alexanne", "Alexie", "Alexis", "Alexys", "Alexzander", "Alf", "Alfonso", "Alfonzo", "Alford", "Alfred", "Alfreda", "Alfredo", "Ali", "Alia", "Alice", "Alicia", "Alisa", "Alisha", "Alison", "Alivia", "Aliya", "Aliyah", "Aliza", "Alize", "Allan", "Allen", "Allene", "Allie", "Allison", "Ally", "Alphonso", "Alta", "Althea", "Alva", "Alvah", "Alvena", "Alvera", "Alverta", "Alvina", "Alvis", "Alyce", "Alycia", "Alysa", "Alysha", "Alyson", "Alysson", "Amalia", "Amanda", "Amani", "Amara", "Amari", "Amaya", "Amber", "Ambrose", "Amelia", "Amelie", "Amely", "America", "Americo", "Amie", "Amina", "Amir", "Amira", "Amiya", "Amos", "Amparo", "Amy", "Amya", "Ana", "Anabel", "Anabelle", "Anahi", "Anais", "Anastacio", "Anastasia", "Anderson", "Andre", "Andreane", "Andreanne", "Andres", "Andrew", "Andy", "Angel", "Angela", "Angelica", "Angelina", "Angeline", "Angelita", "Angelo", "Angie", "Angus", "Anibal", "Anika", "Anissa", "Anita", "Aniya", "Aniyah", "Anjali", "Anna", "Annabel", "Annabell", "Annabelle", "Annalise", "Annamae", "Annamarie", "Anne", "Annetta", "Annette", "Annie", "Ansel", "Ansley", "Anthony", "Antoinette", "Antone", "Antonetta", "Antonette", "Antonia", "Antonietta", "Antonina", "Antonio", "Antwan", "Antwon", "Anya", "April", "Ara", "Araceli", "Aracely", "Arch", "Archibald", "Ardella", "Arden", "Ardith", "Arely", "Ari", "Ariane", "Arianna", "Aric", "Ariel", "Arielle", "Arjun", "Arlene", "Arlie", "Arlo", "Armand", "Armando", "Armani", "Arnaldo", "Arne", "Arno", "Arnold", "Arnoldo", "Arnulfo", "Aron", "Art", "Arthur", "Arturo", "Arvel", "Arvid", "Arvilla", "Aryanna", "Asa", "Asha", "Ashlee", "Ashleigh", "Ashley", "Ashly", "Ashlynn", "Ashton", "Ashtyn", "Asia", "Assunta", "Astrid", "Athena", "Aubree", "Aubrey", "Audie", "Audra", "Audreanne", "Audrey", "August", "Augusta", "Augustine", "Augustus", "Aurelia", "Aurelie", "Aurelio", "Aurore", "Austen", "Austin", "Austyn", "Autumn", "Ava", "Avery", "Avis", "Axel", "Ayana", "Ayden", "Ayla", "Aylin", "Baby", "Bailee", "Bailey", "Barbara", "Barney", "Baron", "Barrett", "Barry", "Bart", "Bartholome", "Barton", "Baylee", "Beatrice", "Beau", "Beaulah", "Bell", "Bella", "Belle", "Ben", "Benedict", "Benjamin", "Bennett", "Bennie", "Benny", "Benton", "Berenice", "Bernadette", "Bernadine", "Bernard", "Bernardo", "Berneice", "Bernhard", "Bernice", "Bernie", "Berniece", "Bernita", "Berry", "Bert", "Berta", "Bertha", "Bertram", "Bertrand", "Beryl", "Bessie", "Beth", "Bethany", "Bethel", "Betsy", "Bette", "Bettie", "Betty", "Bettye", "Beulah", "Beverly", "Bianka", "Bill", "Billie", "Billy", "Birdie", "Blair", "Blaise", "Blake", "Blanca", "Blanche", "Blaze", "Bo", "Bobbie", "Bobby", "Bonita", "Bonnie", "Boris", "Boyd", "Brad", "Braden", "Bradford", "Bradley", "Bradly", "Brady", "Braeden", "Brain", "Brandi", "Brando", "Brandon", "Brandt", "Brandy", "Brandyn", "Brannon", "Branson", "Brant", "Braulio", "Braxton", "Brayan", "Breana", "Breanna", "Breanne", "Brenda", "Brendan", "Brenden", "Brendon", "Brenna", "Brennan", "Brennon", "Brent", "Bret", "Brett", "Bria", "Brian", "Briana", "Brianne", "Brice", "Bridget", "Bridgette", "Bridie", "Brielle", "Brigitte", "Brionna", "Brisa", "Britney", "Brittany", "Brock", "Broderick", "Brody", "Brook", "Brooke", "Brooklyn", "Brooks", "Brown", "Bruce", "Bryana", "Bryce", "Brycen", "Bryon", "Buck", "Bud", "Buddy", "Buford", "Bulah", "Burdette", "Burley", "Burnice", "Buster", "Cade", "Caden", "Caesar", "Caitlyn", "Cale", "Caleb", "Caleigh", "Cali", "Calista", "Callie", "Camden", "Cameron", "Camila", "Camilla", "Camille", "Camren", "Camron", "Camryn", "Camylle", "Candace", "Candelario", "Candice", "Candida", "Candido", "Cara", "Carey", "Carissa", "Carlee", "Carleton", "Carley", "Carli", "Carlie", "Carlo", "Carlos", "Carlotta", "Carmel", "Carmela", "Carmella", "Carmelo", "Carmen", "Carmine", "Carol", "Carolanne", "Carole", "Carolina", "Caroline", "Carolyn", "Carolyne", "Carrie", "Carroll", "Carson", "Carter", "Cary", "Casandra", "Casey", "Casimer", "Casimir", "Casper", "Cassandra", "Cassandre", "Cassidy", "Cassie", "Catalina", "Caterina", "Catharine", "Catherine", "Cathrine", "Cathryn", "Cathy", "Cayla", "Ceasar", "Cecelia", "Cecil", "Cecile", "Cecilia", "Cedrick", "Celestine", "Celestino", "Celia", "Celine", "Cesar", "Chad", "Chadd", "Chadrick", "Chaim", "Chance", "Chandler", "Chanel", "Chanelle", "Charity", "Charlene", "Charles", "Charley", "Charlie", "Charlotte", "Chase", "Chasity", "Chauncey", "Chaya", "Chaz", "Chelsea", "Chelsey", "Chelsie", "Chesley", "Chester", "Chet", "Cheyanne", "Cheyenne", "Chloe", "Chris", "Christ", "Christa", "Christelle", "Christian", "Christiana", "Christina", "Christine", "Christop", "Christophe", "Christopher", "Christy", "Chyna", "Ciara", "Cicero", "Cielo", "Cierra", "Cindy", "Citlalli", "Clair", "Claire", "Clara", "Clarabelle", "Clare", "Clarissa", "Clark", "Claud", "Claude", "Claudia", "Claudie", "Claudine", "Clay", "Clemens", "Clement", "Clementina", "Clementine", "Clemmie", "Cleo", "Cleora", "Cleta", "Cletus", "Cleve", "Cleveland", "Clifford", "Clifton", "Clint", "Clinton", "Clotilde", "Clovis", "Cloyd", "Clyde", "Coby", "Cody", "Colby", "Cole", "Coleman", "Colin", "Colleen", "Collin", "Colt", "Colten", "Colton", "Columbus", "Concepcion", "Conner", "Connie", "Connor", "Conor", "Conrad", "Constance", "Constantin", "Consuelo", "Cooper", "Cora", "Coralie", "Corbin", "Cordelia", "Cordell", "Cordia", "Cordie", "Corene", "Corine", "Cornelius", "Cornell", "Corrine", "Cortez", "Cortney", "Cory", "Coty", "Courtney", "Coy", "Craig", "Crawford", "Creola", "Cristal", "Cristian", "Cristina", "Cristobal", "Cristopher", "Cruz", "Crystal", "Crystel", "Cullen", "Curt", "Curtis", "Cydney", "Cynthia", "Cyril", "Cyrus", "Dagmar", "Dahlia", "Daija", "Daisha", "Daisy", "Dakota", "Dale", "Dallas", "Dallin", "Dalton", "Damaris", "Dameon", "Damian", "Damien", "Damion", "Damon", "Dan", "Dana", "Dandre", "Dane", "Dangelo", "Dangelo", "Danial", "Daniela", "Daniella", "Danielle", "Danika", "Dannie", "Danny", "Dante", "Danyka", "Daphne", "Daphnee", "Daphney", "Darby", "Daren", "Darian", "Dariana", "Darien", "Dario", "Darion", "Darius", "Darlene", "Daron", "Darrel", "Darrell", "Darren", "Darrick", "Darrin", "Darrion", "Darron", "Darryl", "Darwin", "Daryl", "Dashawn", "Dasia", "Dave", "David", "Davin", "Davion", "Davon", "Davonte", "Dawn", "Dawson", "Dax", "Dayana", "Dayna", "Dayne", "Dayton", "Dean", "Deangelo", "Deanna", "Deborah", "Declan", "Dedric", "Dedrick", "Dee", "Deion", "Deja", "Dejah", "Dejon", "Dejuan", "Delaney", "Delbert", "Delfina", "Delia", "Delilah", "Dell", "Della", "Delmer", "Delores", "Delpha", "Delphia", "Delphine", "Delta", "Demarco", "Demarcus", "Demario", "Demetris", "Demetrius", "Demond", "Dena", "Denis", "Dennis", "Deon", "Deondre", "Deontae", "Deonte", "Dereck", "Derek", "Derick", "Deron", "Derrick", "Deshaun", "Deshawn", "Desiree", "Desmond", "Dessie", "Destany", "Destin", "Destinee", "Destiney", "Destini", "Destiny", "Devan", "Devante", "Deven", "Devin", "Devon", "Devonte", "Devyn", "Dewayne", "Dewitt", "Dexter", "Diamond", "Diana", "Dianna", "Diego", "Dillan", "Dillon", "Dimitri", "Dina", "Dino", "Dion", "Dixie", "Dock", "Dolly", "Dolores", "Domenic", "Domenica", "Domenick", "Domenico", "Domingo", "Dominic", "Dominique", "Don", "Donald", "Donato", "Donavon", "Donna", "Donnell", "Donnie", "Donny", "Dora", "Dorcas", "Dorian", "Doris", "Dorothea", "Dorothy", "Dorris", "Dortha", "Dorthy", "Doug", "Douglas", "Dovie", "Doyle", "Drake", "Drew", "Duane", "Dudley", "Dulce", "Duncan", "Durward", "Dustin", "Dusty", "Dwight", "Dylan", "Earl", "Earlene", "Earline", "Earnest", "Earnestine", "Easter", "Easton", "Ebba", "Ebony", "Ed", "Eda", "Edd", "Eddie", "Eden", "Edgar", "Edgardo", "Edison", "Edmond", "Edmund", "Edna", "Eduardo", "Edward", "Edwardo", "Edwin", "Edwina", "Edyth", "Edythe", "Effie", "Efrain", "Efren", "Eileen", "Einar", "Eino", "Eladio", "Elaina", "Elbert", "Elda", "Eldon", "Eldora", "Eldred", "Eldridge", "Eleanora", "Eleanore", "Eleazar", "Electa", "Elena", "Elenor", "Elenora", "Eleonore", "Elfrieda", "Eli", "Elian", "Eliane", "Elias", "Eliezer", "Elijah", "Elinor", "Elinore", "Elisa", "Elisabeth", "Elise", "Eliseo", "Elisha", "Elissa", "Eliza", "Elizabeth", "Ella", "Ellen", "Ellie", "Elliot", "Elliott", "Ellis", "Ellsworth", "Elmer", "Elmira", "Elmo", "Elmore", "Elna", "Elnora", "Elody", "Eloisa", "Eloise", "Elouise", "Eloy", "Elroy", "Elsa", "Else", "Elsie", "Elta", "Elton", "Elva", "Elvera", "Elvie", "Elvis", "Elwin", "Elwyn", "Elyse", "Elyssa", "Elza", "Emanuel", "Emelia", "Emelie", "Emely", "Emerald", "Emerson", "Emery", "Emie", "Emil", "Emile", "Emilia", "Emiliano", "Emilie", "Emilio", "Emily", "Emma", "Emmalee", "Emmanuel", "Emmanuelle", "Emmet", "Emmett", "Emmie", "Emmitt", "Emmy", "Emory", "Ena", "Enid", "Enoch", "Enola", "Enos", "Enrico", "Enrique", "Ephraim", "Era", "Eriberto", "Eric", "Erica", "Erich", "Erick", "Ericka", "Erik", "Erika", "Erin", "Erling", "Erna", "Ernest", "Ernestina", "Ernestine", "Ernesto", "Ernie", "Ervin", "Erwin", "Eryn", "Esmeralda", "Esperanza", "Esta", "Esteban", "Estefania", "Estel", "Estell", "Estella", "Estelle", "Estevan", "Esther", "Estrella", "Etha", "Ethan", "Ethel", "Ethelyn", "Ethyl", "Ettie", "Eudora", "Eugene", "Eugenia", "Eula", "Eulah", "Eulalia", "Euna", "Eunice", "Eusebio", "Eva", "Evalyn", "Evan", "Evangeline", "Evans", "Eve", "Eveline", "Evelyn", "Everardo", "Everett", "Everette", "Evert", "Evie", "Ewald", "Ewell", "Ezekiel", "Ezequiel", "Ezra", "Fabian", "Fabiola", "Fae", "Fannie", "Fanny", "Fatima", "Faustino", "Fausto", "Favian", "Fay", "Faye", "Federico", "Felicia", "Felicita", "Felicity", "Felipa", "Felipe", "Felix", "Felton", "Fermin", "Fern", "Fernando", "Ferne", "Fidel", "Filiberto", "Filomena", "Finn", "Fiona", "Flavie", "Flavio", "Fleta", "Fletcher", "Flo", "Florence", "Florencio", "Florian", "Florida", "Florine", "Flossie", "Floy", "Floyd", "Ford", "Forest", "Forrest", "Foster", "Frances", "Francesca", "Francesco", "Francis", "Francisca", "Francisco", "Franco", "Frank", "Frankie", "Franz", "Fred", "Freda", "Freddie", "Freddy", "Frederic", "Frederick", "Frederik", "Frederique", "Fredrick", "Fredy", "Freeda", "Freeman", "Freida", "Frida", "Frieda", "Friedrich", "Fritz", "Furman", "Gabe", "Gabriel", "Gabriella", "Gabrielle", "Gaetano", "Gage", "Gail", "Gardner", "Garett", "Garfield", "Garland", "Garnet", "Garnett", "Garret", "Garrett", "Garrick", "Garrison", "Garry", "Garth", "Gaston", "Gavin", "Gay", "Gayle", "Gaylord", "Gene", "General", "Genesis", "Genevieve", "Gennaro", "Genoveva", "Geo", "Geoffrey", "George", "Georgette", "Georgiana", "Georgianna", "Geovanni", "Geovanny", "Geovany", "Gerald", "Geraldine", "Gerard", "Gerardo", "Gerda", "Gerhard", "Germaine", "German", "Gerry", "Gerson", "Gertrude", "Gia", "Gianni", "Gideon", "Gilbert", "Gilberto", "Gilda", "Giles", "Gillian", "Gina", "Gino", "Giovani", "Giovanna", "Giovanni", "Giovanny", "Gisselle", "Giuseppe", "Gladyce", "Gladys", "Glen", "Glenda", "Glenna", "Glennie", "Gloria", "Godfrey", "Golda", "Golden", "Gonzalo", "Gordon", "Grace", "Gracie", "Graciela", "Grady", "Graham", "Grant", "Granville", "Grayce", "Grayson", "Green", "Greg", "Gregg", "Gregoria", "Gregorio", "Gregory", "Greta", "Gretchen", "Greyson", "Griffin", "Grover", "Guadalupe", "Gudrun", "Guido", "Guillermo", "Guiseppe", "Gunnar", "Gunner", "Gus", "Gussie", "Gust", "Gustave", "Guy", "Gwen", "Gwendolyn", "Hadley", "Hailee", "Hailey", "Hailie", "Hal", "Haleigh", "Haley", "Halie", "Halle", "Hallie", "Hank", "Hanna", "Hannah", "Hans", "Hardy", "Harley", "Harmon", "Harmony", "Harold", "Harrison", "Harry", "Harvey", "Haskell", "Hassan", "Hassie", "Hattie", "Haven", "Hayden", "Haylee", "Hayley", "Haylie", "Hazel", "Hazle", "Heath", "Heather", "Heaven", "Heber", "Hector", "Heidi", "Helen", "Helena", "Helene", "Helga", "Hellen", "Helmer", "Heloise", "Henderson", "Henri", "Henriette", "Henry", "Herbert", "Herman", "Hermann", "Hermina", "Herminia", "Herminio", "Hershel", "Herta", "Hertha", "Hester", "Hettie", "Hilario", "Hilbert", "Hilda", "Hildegard", "Hillard", "Hillary", "Hilma", "Hilton", "Hipolito", "Hiram", "Hobart", "Holden", "Hollie", "Hollis", "Holly", "Hope", "Horace", "Horacio", "Hortense", "Hosea", "Houston", "Howard", "Howell", "Hoyt", "Hubert", "Hudson", "Hugh", "Hulda", "Humberto", "Hunter", "Hyman", "Ian", "Ibrahim", "Icie", "Ida", "Idell", "Idella", "Ignacio", "Ignatius", "Ike", "Ila", "Ilene", "Iliana", "Ima", "Imani", "Imelda", "Immanuel", "Imogene", "Ines", "Irma", "Irving", "Irwin", "Isaac", "Isabel", "Isabell", "Isabella", "Isabelle", "Isac", "Isadore", "Isai", "Isaiah", "Isaias", "Isidro", "Ismael", "Isobel", "Isom", "Israel", "Issac", "Itzel", "Iva", "Ivah", "Ivory", "Ivy", "Izabella", "Izaiah", "Jabari", "Jace", "Jacey", "Jacinthe", "Jacinto", "Jack", "Jackeline", "Jackie", "Jacklyn", "Jackson", "Jacky", "Jaclyn", "Jacquelyn", "Jacques", "Jacynthe", "Jada", "Jade", "Jaden", "Jadon", "Jadyn", "Jaeden", "Jaida", "Jaiden", "Jailyn", "Jaime", "Jairo", "Jakayla", "Jake", "Jakob", "Jaleel", "Jalen", "Jalon", "Jalyn", "Jamaal", "Jamal", "Jamar", "Jamarcus", "Jamel", "Jameson", "Jamey", "Jamie", "Jamil", "Jamir", "Jamison", "Jammie", "Jan", "Jana", "Janae", "Jane", "Janelle", "Janessa", "Janet", "Janice", "Janick", "Janie", "Janis", "Janiya", "Jannie", "Jany", "Jaquan", "Jaquelin", "Jaqueline", "Jared", "Jaren", "Jarod", "Jaron", "Jarred", "Jarrell", "Jarret", "Jarrett", "Jarrod", "Jarvis", "Jasen", "Jasmin", "Jason", "Jasper", "Jaunita", "Javier", "Javon", "Javonte", "Jay", "Jayce", "Jaycee", "Jayda", "Jayde", "Jayden", "Jaydon", "Jaylan", "Jaylen", "Jaylin", "Jaylon", "Jayme", "Jayne", "Jayson", "Jazlyn", "Jazmin", "Jazmyn", "Jazmyne", "Jean", "Jeanette", "Jeanie", "Jeanne", "Jed", "Jedediah", "Jedidiah", "Jeff", "Jefferey", "Jeffery", "Jeffrey", "Jeffry", "Jena", "Jenifer", "Jennie", "Jennifer", "Jennings", "Jennyfer", "Jensen", "Jerad", "Jerald", "Jeramie", "Jeramy", "Jerel", "Jeremie", "Jeremy", "Jermain", "Jermaine", "Jermey", "Jerod", "Jerome", "Jeromy", "Jerrell", "Jerrod", "Jerrold", "Jerry", "Jess", "Jesse", "Jessica", "Jessie", "Jessika", "Jessy", "Jessyca", "Jesus", "Jett", "Jettie", "Jevon", "Jewel", "Jewell", "Jillian", "Jimmie", "Jimmy", "Jo", "Joan", "Joana", "Joanie", "Joanne", "Joannie", "Joanny", "Joany", "Joaquin", "Jocelyn", "Jodie", "Jody", "Joe", "Joel", "Joelle", "Joesph", "Joey", "Johan", "Johann", "Johanna", "Johathan", "John", "Johnathan", "Johnathon", "Johnnie", "Johnny", "Johnpaul", "Johnson", "Jolie", "Jon", "Jonas", "Jonatan", "Jonathan", "Jonathon", "Jordan", "Jordane", "Jordi", "Jordon", "Jordy", "Jordyn", "Jorge", "Jose", "Josefa", "Josefina", "Joseph", "Josephine", "Josh", "Joshua", "Joshuah", "Josiah", "Josiane", "Josianne", "Josie", "Josue", "Jovan", "Jovani", "Jovanny", "Jovany", "Joy", "Joyce", "Juana", "Juanita", "Judah", "Judd", "Jude", "Judge", "Judson", "Judy", "Jules", "Julia", "Julian", "Juliana", "Julianne", "Julie", "Julien", "Juliet", "Julio", "Julius", "June", "Junior", "Junius", "Justen", "Justice", "Justina", "Justine", "Juston", "Justus", "Justyn", "Juvenal", "Juwan", "Kacey", "Kaci", "Kacie", "Kade", "Kaden", "Kadin", "Kaela", "Kaelyn", "Kaia", "Kailee", "Kailey", "Kailyn", "Kaitlin", "Kaitlyn", "Kale", "Kaleb", "Kaleigh", "Kaley", "Kali", "Kallie", "Kameron", "Kamille", "Kamren", "Kamron", "Kamryn", "Kane", "Kara", "Kareem", "Karelle", "Karen", "Kari", "Kariane", "Karianne", "Karina", "Karine", "Karl", "Karlee", "Karley", "Karli", "Karlie", "Karolann", "Karson", "Kasandra", "Kasey", "Kassandra", "Katarina", "Katelin", "Katelyn", "Katelynn", "Katharina", "Katherine", "Katheryn", "Kathleen", "Kathlyn", "Kathryn", "Kathryne", "Katlyn", "Katlynn", "Katrina", "Katrine", "Kattie", "Kavon", "Kay", "Kaya", "Kaycee", "Kayden", "Kayla", "Kaylah", "Kaylee", "Kayleigh", "Kayley", "Kayli", "Kaylie", "Kaylin", "Keagan", "Keanu", "Keara", "Keaton", "Keegan", "Keeley", "Keely", "Keenan", "Keira", "Keith", "Kellen", "Kelley", "Kelli", "Kellie", "Kelly", "Kelsi", "Kelsie", "Kelton", "Kelvin", "Ken", "Kendall", "Kendra", "Kendrick", "Kenna", "Kennedi", "Kennedy", "Kenneth", "Kennith", "Kenny", "Kenton", "Kenya", "Kenyatta", "Kenyon", "Keon", "Keshaun", "Keshawn", "Keven", "Kevin", "Kevon", "Keyon", "Keyshawn", "Khalid", "Khalil", "Kian", "Kiana", "Kianna", "Kiara", "Kiarra", "Kiel", "Kiera", "Kieran", "Kiley", "Kim", "Kimberly", "King", "Kip", "Kira", "Kirk", "Kirsten", "Kirstin", "Kitty", "Kobe", "Koby", "Kody", "Kolby", "Kole", "Korbin", "Korey", "Kory", "Kraig", "Kris", "Krista", "Kristian", "Kristin", "Kristina", "Kristofer", "Kristoffer", "Kristopher", "Kristy", "Krystal", "Krystel", "Krystina", "Kurt", "Kurtis", "Kyla", "Kyle", "Kylee", "Kyleigh", "Kyler", "Kylie", "Kyra", "Lacey", "Lacy", "Ladarius", "Lafayette", "Laila", "Laisha", "Lamar", "Lambert", "Lamont", "Lance", "Landen", "Lane", "Laney", "Larissa", "Laron", "Larry", "Larue", "Laura", "Laurel", "Lauren", "Laurence", "Lauretta", "Lauriane", "Laurianne", "Laurie", "Laurine", "Laury", "Lauryn", "Lavada", "Lavern", "Laverna", "Laverne", "Lavina", "Lavinia", "Lavon", "Lavonne", "Lawrence", "Lawson", "Layla", "Layne", "Lazaro", "Lea", "Leann", "Leanna", "Leanne", "Leatha", "Leda", "Lee", "Leif", "Leila", "Leilani", "Lela", "Lelah", "Leland", "Lelia", "Lempi", "Lemuel", "Lenna", "Lennie", "Lenny", "Lenora", "Lenore", "Leo", "Leola", "Leon", "Leonard", "Leonardo", "Leone", "Leonel", "Leonie", "Leonor", "Leonora", "Leopold", "Leopoldo", "Leora", "Lera", "Lesley", "Leslie", "Lesly", "Lessie", "Lester", "Leta", "Letha", "Letitia", "Levi", "Lew", "Lewis", "Lexi", "Lexie", "Lexus", "Lia", "Liam", "Liana", "Libbie", "Libby", "Lila", "Lilian", "Liliana", "Liliane", "Lilla", "Lillian", "Lilliana", "Lillie", "Lilly", "Lily", "Lilyan", "Lina", "Lincoln", "Linda", "Lindsay", "Lindsey", "Linnea", "Linnie", "Linwood", "Lionel", "Lisa", "Lisandro", "Lisette", "Litzy", "Liza", "Lizeth", "Lizzie", "Llewellyn", "Lloyd", "Logan", "Lois", "Lola", "Lolita", "Loma", "Lon", "London", "Lonie", "Lonnie", "Lonny", "Lonzo", "Lora", "Loraine", "Loren", "Lorena", "Lorenz", "Lorenza", "Lorenzo", "Lori", "Lorine", "Lorna", "Lottie", "Lou", "Louie", "Louisa", "Lourdes", "Louvenia", "Lowell", "Loy", "Loyal", "Loyce", "Lucas", "Luciano", "Lucie", "Lucienne", "Lucile", "Lucinda", "Lucio", "Lucious", "Lucius", "Lucy", "Ludie", "Ludwig", "Lue", "Luella", "Luigi", "Luis", "Luisa", "Lukas", "Lula", "Lulu", "Luna", "Lupe", "Lura", "Lurline", "Luther", "Luz", "Lyda", "Lydia", "Lyla", "Lynn", "Lyric", "Lysanne", "Mabel", "Mabelle", "Mable", "Mac", "Macey", "Maci", "Macie", "Mack", "Mackenzie", "Macy", "Madaline", "Madalyn", "Maddison", "Madeline", "Madelyn", "Madelynn", "Madge", "Madie", "Madilyn", "Madisen", "Madison", "Madisyn", "Madonna", "Madyson", "Mae", "Maegan", "Maeve", "Mafalda", "Magali", "Magdalen", "Magdalena", "Maggie", "Magnolia", "Magnus", "Maia", "Maida", "Maiya", "Major", "Makayla", "Makenna", "Makenzie", "Malachi", "Malcolm", "Malika", "Malinda", "Mallie", "Mallory", "Malvina", "Mandy", "Manley", "Manuel", "Manuela", "Mara", "Marc", "Marcel", "Marcelina", "Marcelino", "Marcella", "Marcelle", "Marcellus", "Marcelo", "Marcia", "Marco", "Marcos", "Marcus", "Margaret", "Margarete", "Margarett", "Margaretta", "Margarette", "Margarita", "Marge", "Margie", "Margot", "Margret", "Marguerite", "Maria", "Mariah", "Mariam", "Marian", "Mariana", "Mariane", "Marianna", "Marianne", "Mariano", "Maribel", "Marie", "Mariela", "Marielle", "Marietta", "Marilie", "Marilou", "Marilyne", "Marina", "Mario", "Marion", "Marisa", "Marisol", "Maritza", "Marjolaine", "Marjorie", "Marjory", "Mark", "Markus", "Marlee", "Marlen", "Marlene", "Marley", "Marlin", "Marlon", "Marques", "Marquis", "Marquise", "Marshall", "Marta", "Martin", "Martina", "Martine", "Marty", "Marvin", "Mary", "Maryam", "Maryjane", "Maryse", "Mason", "Mateo", "Mathew", "Mathias", "Mathilde", "Matilda", "Matilde", "Matt", "Matteo", "Mattie", "Maud", "Maude", "Maudie", "Maureen", "Maurice", "Mauricio", "Maurine", "Maverick", "Mavis", "Max", "Maxie", "Maxime", "Maximilian", "Maximillia", "Maximillian", "Maximo", "Maximus", "Maxine", "Maxwell", "May", "Maya", "Maybell", "Maybelle", "Maye", "Maymie", "Maynard", "Mayra", "Mazie", "Mckayla", "Mckenna", "Mckenzie", "Meagan", "Meaghan", "Meda", "Megane", "Meggie", "Meghan", "Mekhi", "Melany", "Melba", "Melisa", "Melissa", "Mellie", "Melody", "Melvin", "Melvina", "Melyna", "Melyssa", "Mercedes", "Meredith", "Merl", "Merle", "Merlin", "Merritt", "Mertie", "Mervin", "Meta", "Mia", "Micaela", "Micah", "Michael", "Michaela", "Michale", "Micheal", "Michel", "Michele", "Michelle", "Miguel", "Mikayla", "Mike", "Mikel", "Milan", "Miles", "Milford", "Miller", "Millie", "Milo", "Milton", "Mina", "Minerva", "Minnie", "Miracle", "Mireille", "Mireya", "Misael", "Missouri", "Misty", "Mitchel", "Mitchell", "Mittie", "Modesta", "Modesto", "Mohamed", "Mohammad", "Mohammed", "Moises", "Mollie", "Molly", "Mona", "Monica", "Monique", "Monroe", "Monserrat", "Monserrate", "Montana", "Monte", "Monty", "Morgan", "Moriah", "Morris", "Mortimer", "Morton", "Mose", "Moses", "Moshe", "Mossie", "Mozell", "Mozelle", "Muhammad", "Muriel", "Murl", "Murphy", "Murray", "Mustafa", "Mya", "Myah", "Mylene", "Myles", "Myra", "Myriam", "Myrl", "Myrna", "Myron", "Myrtice", "Myrtie", "Myrtis", "Myrtle", "Nadia", "Nakia", "Name", "Nannie", "Naomi", "Naomie", "Napoleon", "Narciso", "Nash", "Nasir", "Nat", "Natalia", "Natalie", "Natasha", "Nathan", "Nathanael", "Nathanial", "Nathaniel", "Nathen", "Nayeli", "Neal", "Ned", "Nedra", "Neha", "Neil", "Nelda", "Nella", "Nelle", "Nellie", "Nels", "Nelson", "Neoma", "Nestor", "Nettie", "Neva", "Newell", "Newton", "Nia", "Nicholas", "Nicholaus", "Nichole", "Nick", "Nicklaus", "Nickolas", "Nico", "Nicola", "Nicolas", "Nicole", "Nicolette", "Nigel", "Nikita", "Nikki", "Nikko", "Niko", "Nikolas", "Nils", "Nina", "Noah", "Noble", "Noe", "Noel", "Noelia", "Noemi", "Noemie", "Noemy", "Nola", "Nolan", "Nona", "Nora", "Norbert", "Norberto", "Norene", "Norma", "Norris", "Norval", "Norwood", "Nova", "Novella", "Nya", "Nyah", "Nyasia", "Obie", "Oceane", "Ocie", "Octavia", "Oda", "Odell", "Odessa", "Odie", "Ofelia", "Okey", "Ola", "Olaf", "Ole", "Olen", "Oleta", "Olga", "Olin", "Oliver", "Ollie", "Oma", "Omari", "Omer", "Ona", "Onie", "Opal", "Ophelia", "Ora", "Oral", "Oran", "Oren", "Orie", "Orin", "Orion", "Orland", "Orlando", "Orlo", "Orpha", "Orrin", "Orval", "Orville", "Osbaldo", "Osborne", "Oscar", "Osvaldo", "Oswald", "Oswaldo", "Otha", "Otho", "Otilia", "Otis", "Ottilie", "Ottis", "Otto", "Ova", "Owen", "Ozella", "Pablo", "Paige", "Palma", "Pamela", "Pansy", "Paolo", "Paris", "Parker", "Pascale", "Pasquale", "Pat", "Patience", "Patricia", "Patrick", "Patsy", "Pattie", "Paul", "Paula", "Pauline", "Paxton", "Payton", "Pearl", "Pearlie", "Pearline", "Pedro", "Peggie", "Penelope", "Percival", "Percy", "Perry", "Pete", "Peter", "Petra", "Peyton", "Philip", "Phoebe", "Phyllis", "Pierce", "Pierre", "Pietro", "Pink", "Pinkie", "Piper", "Polly", "Porter", "Precious", "Presley", "Preston", "Price", "Prince", "Princess", "Priscilla", "Providenci", "Prudence", "Queen", "Queenie", "Quentin", "Quincy", "Quinn", "Quinten", "Quinton", "Rachael", "Rachel", "Rachelle", "Rae", "Raegan", "Rafael", "Rafaela", "Raheem", "Rahsaan", "Rahul", "Raina", "Raleigh", "Ralph", "Ramiro", "Ramon", "Ramona", "Randal", "Randall", "Randi", "Randy", "Ransom", "Raoul", "Raphael", "Raphaelle", "Raquel", "Rashad", "Rashawn", "Rasheed", "Raul", "Raven", "Ray", "Raymond", "Raymundo", "Reagan", "Reanna", "Reba", "Rebeca", "Rebecca", "Rebeka", "Rebekah", "Reece", "Reed", "Reese", "Regan", "Reggie", "Reginald", "Reid", "Reilly", "Reina", "Reinhold", "Remington", "Rene", "Renee", "Ressie", "Reta", "Retha", "Retta", "Reuben", "Reva", "Rex", "Rey", "Reyes", "Reymundo", "Reyna", "Reynold", "Rhea", "Rhett", "Rhianna", "Rhiannon", "Rhoda", "Ricardo", "Richard", "Richie", "Richmond", "Rick", "Rickey", "Rickie", "Ricky", "Rico", "Rigoberto", "Riley", "Rita", "River", "Robb", "Robbie", "Robert", "Roberta", "Roberto", "Robin", "Robyn", "Rocio", "Rocky", "Rod", "Roderick", "Rodger", "Rodolfo", "Rodrick", "Rodrigo", "Roel", "Rogelio", "Roger", "Rogers", "Rolando", "Rollin", "Roma", "Romaine", "Roman", "Ron", "Ronaldo", "Ronny", "Roosevelt", "Rory", "Rosa", "Rosalee", "Rosalia", "Rosalind", "Rosalinda", "Rosalyn", "Rosamond", "Rosanna", "Rosario", "Roscoe", "Rose", "Rosella", "Roselyn", "Rosemarie", "Rosemary", "Rosendo", "Rosetta", "Rosie", "Rosina", "Roslyn", "Ross", "Rossie", "Rowan", "Rowena", "Rowland", "Roxane", "Roxanne", "Roy", "Royal", "Royce", "Rozella", "Ruben", "Rubie", "Ruby", "Rubye", "Rudolph", "Rudy", "Rupert", "Russ", "Russel", "Russell", "Rusty", "Ruth", "Ruthe", "Ruthie", "Ryan", "Ryann", "Ryder", "Rylan", "Rylee", "Ryleigh", "Ryley", "Sabina", "Sabrina", "Sabryna", "Sadie", "Sadye", "Sage", "Saige", "Sallie", "Sally", "Salma", "Salvador", "Salvatore", "Sam", "Samanta", "Samantha", "Samara", "Samir", "Sammie", "Sammy", "Samson", "Sandra", "Sandrine", "Sandy", "Sanford", "Santa", "Santiago", "Santina", "Santino", "Santos", "Sarah", "Sarai", "Sarina", "Sasha", "Saul", "Savanah", "Savanna", "Savannah", "Savion", "Scarlett", "Schuyler", "Scot", "Scottie", "Scotty", "Seamus", "Sean", "Sebastian", "Sedrick", "Selena", "Selina", "Selmer", "Serena", "Serenity", "Seth", "Shad", "Shaina", "Shakira", "Shana", "Shane", "Shanel", "Shanelle", "Shania", "Shanie", "Shaniya", "Shanna", "Shannon", "Shanny", "Shanon", "Shany", "Sharon", "Shaun", "Shawn", "Shawna", "Shaylee", "Shayna", "Shayne", "Shea", "Sheila", "Sheldon", "Shemar", "Sheridan", "Sherman", "Sherwood", "Shirley", "Shyann", "Shyanne", "Sibyl", "Sid", "Sidney", "Sienna", "Sierra", "Sigmund", "Sigrid", "Sigurd", "Silas", "Sim", "Simeon", "Simone", "Sincere", "Sister", "Skye", "Skyla", "Skylar", "Sofia", "Soledad", "Solon", "Sonia", "Sonny", "Sonya", "Sophia", "Sophie", "Spencer", "Stacey", "Stacy", "Stan", "Stanford", "Stanley", "Stanton", "Stefan", "Stefanie", "Stella", "Stephan", "Stephania", "Stephanie", "Stephany", "Stephen", "Stephon", "Sterling", "Steve", "Stevie", "Stewart", "Stone", "Stuart", "Summer", "Sunny", "Susan", "Susana", "Susanna", "Susie", "Suzanne", "Sven", "Syble", "Sydnee", "Sydney", "Sydni", "Sydnie", "Sylvan", "Sylvester", "Sylvia", "Tabitha", "Tad", "Talia", "Talon", "Tamara", "Tamia", "Tania", "Tanner", "Tanya", "Tara", "Taryn", "Tate", "Tatum", "Tatyana", "Taurean", "Tavares", "Taya", "Taylor", "Teagan", "Ted", "Telly", "Terence", "Teresa", "Terrance", "Terrell", "Terrence", "Terrill", "Terry", "Tess", "Tessie", "Tevin", "Thad", "Thaddeus", "Thalia", "Thea", "Thelma", "Theo", "Theodora", "Theodore", "Theresa", "Therese", "Theresia", "Theron", "Thomas", "Thora", "Thurman", "Tia", "Tiana", "Tianna", "Tiara", "Tierra", "Tiffany", "Tillman", "Timmothy", "Timmy", "Timothy", "Tina", "Tito", "Titus", "Tobin", "Toby", "Tod", "Tom", "Tomas", "Tomasa", "Tommie", "Toney", "Toni", "Tony", "Torey", "Torrance", "Torrey", "Toy", "Trace", "Tracey", "Tracy", "Travis", "Travon", "Tre", "Tremaine", "Tremayne", "Trent", "Trenton", "Tressa", "Tressie", "Treva", "Trever", "Trevion", "Trevor", "Trey", "Trinity", "Trisha", "Tristian", "Tristin", "Triston", "Troy", "Trudie", "Trycia", "Trystan", "Turner", "Twila", "Tyler", "Tyra", "Tyree", "Tyreek", "Tyrel", "Tyrell", "Tyrese", "Tyrique", "Tyshawn", "Tyson", "Ubaldo", "Ulices", "Ulises", "Una", "Unique", "Urban", "Uriah", "Uriel", "Ursula", "Vada", "Valentin", "Valentina", "Valentine", "Valerie", "Vallie", "Van", "Vance", "Vanessa", "Vaughn", "Veda", "Velda", "Vella", "Velma", "Velva", "Vena", "Verda", "Verdie", "Vergie", "Verla", "Verlie", "Vern", "Verna", "Verner", "Vernice", "Vernie", "Vernon", "Verona", "Veronica", "Vesta", "Vicenta", "Vicente", "Vickie", "Vicky", "Victor", "Victoria", "Vida", "Vidal", "Vilma", "Vince", "Vincent", "Vincenza", "Vincenzo", "Vinnie", "Viola", "Violet", "Violette", "Virgie", "Virgil", "Virginia", "Virginie", "Vita", "Vito", "Viva", "Vivian", "Viviane", "Vivianne", "Vivien", "Vivienne", "Vladimir", "Wade", "Waino", "Waldo", "Walker", "Wallace", "Walter", "Walton", "Wanda", "Ward", "Warren", "Watson", "Wava", "Waylon", "Wayne", "Webster", "Weldon", "Wellington", "Wendell", "Wendy", "Werner", "Westley", "Weston", "Whitney", "Wilber", "Wilbert", "Wilburn", "Wiley", "Wilford", "Wilfred", "Wilfredo", "Wilfrid", "Wilhelm", "Wilhelmine", "Will", "Willa", "Willard", "William", "Willie", "Willis", "Willow", "Willy", "Wilma", "Wilmer", "Wilson", "Wilton", "Winfield", "Winifred", "Winnifred", "Winona", "Winston", "Woodrow", "Wyatt", "Wyman", "Xander", "Xavier", "Xzavier", "Yadira", "Yasmeen", "Yasmin", "Yasmine", "Yazmin", "Yesenia", "Yessenia", "Yolanda", "Yoshiko", "Yvette", "Yvonne", "Zachariah", "Zachary", "Zachery", "Zack", "Zackary", "Zackery", "Zakary", "Zander", "Zane", "Zaria", "Zechariah", "Zelda", "Zella", "Zelma", "Zena", "Zetta", "Zion", "Zita", "Zoe", "Zoey", "Zoie", "Zoila", "Zola", "Zora", "Zula"}, - "middle": {"Abdul", "Abdullah", "Abigail", "Ada", "Adam", "Adelaide", "Adele", "Adelina", "Adrian", "Adriana", "Agnes", "Agnolo", "Ahmed", "Aida", "Aileen", "Aimee", "Akilesh", "Akio", "Alan", "Alana", "Alejandro", "Alex", "Ali", "Alice", "Alicia", "Alina", "Alison", "Alita", "Allegretta", "Alonzo", "Alyssa", "Aman", "Amara", "Amelda", "Amelia", "Amenra", "Amina", "Amir", "Amitabh", "Amy", "Ana", "Anastasia", "André", "Andrea", "Andrei", "Andrew", "Andy", "Angel", "Angela", "Anita", "Ann", "Anna", "Anne", "Annette", "Anthony", "Antioco", "Antonio", "Arduino", "Aria", "Ariana", "Ariel", "Aris", "Arjun", "Armando", "Asha", "Ashton", "Asong", "Athena", "Audrey", "August", "Aura", "Aurelia", "Austen", "Ava", "Avery", "Avril", "Badru", "Bailey", "Bakul", "Baldwin", "Bao", "Barack", "Bear", "Beatrice", "Beau", "Belinda", "Bella", "Belle", "Ben", "Benjamin", "Bertha", "Beverly", "Bharati", "Bhoja", "Bhuma", "Bianca", "Bird", "Birdie", "Bishvajit", "Bjorn", "Blair", "Blake", "Blanca", "Bliss", "Blue", "Bo", "Bobbie", "Bonnie", "Boris", "Bradley", "Brandt", "Braulia", "Breck", "Bree", "Brett", "Brianna", "Bridget", "Brie", "Brielle", "Brittany", "Brizio", "Brook", "Brooke", "Brooks", "Bruce", "Bryce", "Bryn", "Brynn", "Burke", "Cajetan", "Calvin", "Cameron", "Camilla", "Candice", "Carla", "Carlos", "Carmen", "Caroline", "Carson", "Casey", "Cash", "Cassandra", "Cassidy", "Catherine", "Cecelia", "Cecilia", "Cedric", "Celeste", "Celia", "Celso", "Chahna", "Chance", "Chander", "Chandler", "Chang", "Charles", "Charlie", "Charlotte", "Chen", "Chintak", "Chloe", "Chris", "Christine", "Chung", "Cimeron", "Cindy", "Ciprianna", "Ciro", "Claire", "Clara", "Clarissa", "Clark", "Clarke", "Claude", "Claudia", "Clay", "Clementine", "Clint", "Cody", "Cole", "Colette", "Cora", "Cordelia", "Corey", "Corinne", "Cory", "Cosme", "Courtney", "Cree", "Crew", "Cynthia", "Cyprienne", "Cyrus", "Daan", "Dada", "Daisy", "Dakota", "Dale", "Damodar", "Dan", "Dana", "Dane", "Daniel", "Danielle", "Danveer", "Daphne", "Darla", "David", "Davide", "Dawn", "Dax", "Dean", "Deborah", "Delilah", "Denise", "Denver", "Deshal", "Deshawn", "Dev", "Devin", "Dhavala", "Diana", "Diane", "Diego", "Dmitri", "Dolores", "Dolorita", "Donato", "Dong", "Donna", "Donte", "Donya", "Dora", "Doris", "Dorothy", "Drake", "Drew", "Dru", "Dylan", "Ean", "Edith", "Eduardo", "Edward", "Eila", "Eileen", "Elaine", "Elda", "Eleanor", "Elena", "Eliana", "Elias", "Elise", "Eliza", "Elizabeth", "Ella", "Elle", "Ellen", "Ellie", "Ellis", "Eloise", "Elsa", "Elsie", "Em", "Emerson", "Emery", "Emilie", "Emilio", "Emily", "Emma", "Emmett", "Enrico", "Enrique", "Epifania", "Erica", "Erik", "Erin", "Eroica", "Esperanza", "Estelle", "Esther", "Etta", "Ettore", "Eva", "Evan", "Eve", "Evelyn", "Everett", "Faith", "Farid", "Faye", "Federico", "Felicity", "Felipe", "Felix", "Fern", "Fernando", "Finley", "Finn", "Fiona", "Fitz", "Flint", "Flora", "Florence", "Flynn", "Folke", "Fonzo", "Fox", "Frances", "Francis", "Francisco", "Francois", "François", "Frank", "Frankie", "Freya", "Fumio", "Fynn", "Gabriel", "Gabriella", "Gael", "Gage", "Gail", "Gemma", "Genevieve", "George", "Georgia", "Geraldine", "Giannino", "Ginetta", "Gioia", "Giselle", "Giuseppe", "Giustino", "Glenn", "Gloria", "Glory", "Grace", "Grant", "Gray", "Greer", "Greta", "Guido", "Guillermo", "Gulshan", "Gus", "Gwen", "Gyula", "Hank", "Hannah", "Hans", "Harley", "Harper", "Harriet", "Harrison", "Harshad", "Haruki", "Hayden", "Hayes", "Haze", "Hazel", "Heath", "Heather", "Hector", "Helen", "Helena", "Henry", "Hideki", "Hidetoshi", "Himesh", "Hiro", "Hiroaki", "Hirofumi", "Hirokazu", "Hiroshi", "Hiroto", "Hiroyuki", "Holly", "Honor", "Hope", "Hugh", "Hugo", "Hunter", "Ida", "Ignacio", "Imogen", "Ingrid", "Irene", "Iris", "Isaac", "Isabel", "Isabella", "Isabelle", "Ivan", "Ivy", "Jace", "Jack", "Jacqueline", "Jade", "Jaden", "Jae", "Jai", "Jaime", "Jamal", "James", "Jamie", "Jan", "Janak", "Jane", "Janet", "Janice", "Jasmine", "Jasper", "Javier", "Jax", "Jay", "Jayden", "Jayne", "Jean", "Jeanne", "Jed", "Jenna", "Jennifer", "Jesse", "Jessica", "Jill", "Jin", "Joan", "Joanna", "João", "Jocelyn", "Jodi", "Jody", "Joe", "Joey", "Johanna", "Johar", "John", "Jolene", "Jordan", "Jorge", "Jose", "José", "Joseph", "Josephine", "Josie", "Joy", "Joyce", "Juan", "Juanita", "Judd", "Jude", "Judith", "Jules", "Julia", "Julian", "Juliana", "Julianne", "Julie", "June", "Justine", "Kael", "Kai", "Kane", "Karen", "Kate", "Katherine", "Kathleen", "Kathryn", "Katie", "Katrina", "Kay", "Kayla", "Kazuki", "Keira", "Kelly", "Kelsey", "Kendall", "Kendra", "Kennedy", "Kent", "Kenta", "Kerry", "Khaled", "Khloe", "Kiara", "Kim", "Kimberly", "Kit", "Kiyoshi", "Klaus", "Knight", "Knox", "Koen", "Koi", "Koichi", "Koji", "Kolt", "Kristen", "Kristina", "Kurt", "Kwame", "Kye", "Kylie", "Lacey", "Laine", "Lake", "Lakshman", "Lalika", "Lane", "Lark", "Lars", "Laurel", "Layne", "Lee", "Leif", "Lennon", "Leo", "Leon", "Leslie", "Liam", "Liberty", "Lilian", "Lillian", "Lillie", "Link", "Liz", "Locke", "Logan", "Lona", "Lorena", "Lorenzo", "Lou", "Louise", "Love", "Lucia", "Lucy", "Luis", "Luiz", "Luke", "Lupita", "Lux", "Luz", "Lydia", "Lynn", "Mabel", "Mac", "Mack", "Mackenzie", "Madeline", "Madison", "Madona", "Mae", "Mael", "Makoto", "Manuel", "Manuela", "Maple", "Marc", "Marco", "Margaret", "Margo", "Margot", "Maria", "Mariano", "Maricela", "Marilyn", "Mario", "Mark", "Marley", "Mars", "Marti", "Mary", "Mason", "Matthew", "Mavis", "Max", "May", "Mazie", "Mei", "Melody", "Mercy", "Merle", "Micah", "Michael", "Miguel", "Mina", "Ming", "Mohamed", "Mollie", "Monroe", "Morgan", "Muhammad", "Musetta", "Myra", "Nadine", "Naomi", "Nardo", "Nat", "Natalie", "Neal", "Neil", "Nellie", "Nerola", "Nevada", "Neve", "Nikolai", "Niles", "Noel", "Nola", "Nora", "Nuru", "Oakley", "Olive", "Oliver", "Opal", "Orazio", "Ortensa", "Ortensia", "Osamu", "Oscar", "Otto", "Pablo", "Paige", "Pancho", "Paris", "Parker", "Pat", "Patrick", "Paul", "Pauli", "Pax", "Peace", "Pearl", "Pedro", "Penelope", "Penn", "Penny", "Peter", "Petra", "Peyton", "Phoenix", "Pierce", "Pierre", "Pilar", "Porter", "Praise", "Pratap", "Presley", "Priscilla", "Quinn", "Rachanna", "Radames", "Rae", "Rafael", "Rain", "Raine", "Ramiro", "Ramon", "Ramona", "Raphael", "Raul", "Ravi", "Ray", "Rayne", "Reagan", "Reece", "Reed", "Reese", "Rei", "Reid", "Reilly", "Remy", "Ren", "Reyes", "Rhodes", "Ricardo", "Richard", "Riley", "Rita", "River", "Rivera", "Roan", "Robert", "Roberto", "Robin", "Robt", "Rodrigo", "Roma", "Romelia", "Rory", "Rosa", "Rosalee", "Rosalie", "Rosalynn", "Rosario", "Rose", "Ross", "Rowan", "Ruben", "Ruby", "Rue", "Rush", "Russell", "Ruth", "Ryan", "Saad", "Saariq", "Sade", "Sadie", "Sagara", "Sage", "Saige", "Saint", "Salvadora", "Sam", "Samir", "Samuel", "Sante", "Santiago", "Sara", "Sasha", "Satoshi", "Scott", "Sean", "Sebastian", "Sergei", "Sergio", "Seth", "Shae", "Shai", "Shane", "Shannon", "Shashi", "Shaun", "Shawn", "Shawnee", "Shay", "Shea", "Shelby", "Shin", "Sidney", "Simon", "Sky", "Skye", "Skyler", "Sol", "Sophie", "Spencer", "Star", "Starr", "Stella", "Steve", "Stevie", "Storm", "Susan", "Sven", "Sybil", "Sydney", "Tahj", "Takashi", "Takeshi", "Taryn", "Tatum", "Taylor", "Teagan", "Terry", "Tess", "Thea", "Theodore", "Thomas", "Tilly", "Timothy", "Tosca", "Trent", "Tripp", "Tristan", "Truth", "Tyler", "Tyrone", "Uberto", "Ursus", "Val", "Vandelia", "Vaughn", "Vera", "Vernon", "Verona", "Vianna", "Victoria", "Vida", "Vieda", "Vince", "Vincent", "Violet", "Virginia", "Vivian", "Vladimir", "Wade", "Wayne", "Wes", "Wesley", "West", "Whitney", "Will", "Willa", "William", "Willie", "Winston", "Winter", "Wolf", "Wren", "Wynn", "Xavier", "Yasuo", "Yoel", "Yolanda", "Yoshi", "Yoshiaki", "Yoshihiro", "Yoshiki", "Yoshinori", "Yoshio", "Yusuf", "Yutaka", "Zain", "Zane", "Zayd", "Zelda", "Zeus", "Zev", "Zhang", "Zhen", "Zola", "Zora", "Zuni"}, - "last": {"Abbott", "Abernathy", "Abshire", "Adams", "Altenwerth", "Anderson", "Ankunding", "Armstrong", "Auer", "Aufderhar", "Bahringer", "Bailey", "Balistreri", "Barrows", "Bartell", "Bartoletti", "Barton", "Bashirian", "Batz", "Bauch", "Baumbach", "Bayer", "Beahan", "Beatty", "Bechtelar", "Becker", "Bednar", "Beer", "Beier", "Berge", "Bergnaum", "Bergstrom", "Bernhard", "Bernier", "Bins", "Blanda", "Blick", "Block", "Bode", "Boehm", "Bogan", "Bogisich", "Borer", "Bosco", "Botsford", "Boyer", "Boyle", "Bradtke", "Brakus", "Braun", "Breitenberg", "Brekke", "Brown", "Bruen", "Buckridge", "Carroll", "Carter", "Cartwright", "Casper", "Cassin", "Champlin", "Christiansen", "Cole", "Collier", "Collins", "Conn", "Connelly", "Conroy", "Considine", "Corkery", "Cormier", "Corwin", "Cremin", "Crist", "Crona", "Cronin", "Crooks", "Cruickshank", "Cummerata", "Cummings", "Dach", "Damore", "Daniel", "Dare", "Daugherty", "Davis", "Deckow", "Denesik", "Dibbert", "Dickens", "Dicki", "Dickinson", "Dietrich", "Donnelly", "Dooley", "Douglas", "Doyle", "DuBuque", "Durgan", "Ebert", "Effertz", "Eichmann", "Emard", "Emmerich", "Erdman", "Ernser", "Fadel", "Fahey", "Farrell", "Fay", "Feeney", "Feest", "Feil", "Ferry", "Fisher", "Flatley", "Frami", "Franecki", "Friesen", "Fritsch", "Funk", "Gaylord", "Gerhold", "Gerlach", "Gibson", "Gislason", "Gleason", "Gleichner", "Glover", "Goldner", "Goodwin", "Gorczany", "Gottlieb", "Goyette", "Grady", "Graham", "Grant", "Green", "Greenfelder", "Greenholt", "Grimes", "Gulgowski", "Gusikowski", "Gutkowski", "Gutmann", "Haag", "Hackett", "Hagenes", "Hahn", "Haley", "Halvorson", "Hamill", "Hammes", "Hand", "Hane", "Hansen", "Harber", "Harris", "Hartmann", "Harvey", "Hauck", "Hayes", "Heaney", "Heathcote", "Hegmann", "Heidenreich", "Heller", "Herman", "Hermann", "Hermiston", "Herzog", "Hessel", "Hettinger", "Hickle", "Hilll", "Hills", "Hilpert", "Hintz", "Hirthe", "Hodkiewicz", "Hoeger", "Homenick", "Hoppe", "Howe", "Howell", "Hudson", "Huel", "Huels", "Hyatt", "Jacobi", "Jacobs", "Jacobson", "Jakubowski", "Jaskolski", "Jast", "Jenkins", "Jerde", "Jewess", "Johns", "Johnson", "Johnston", "Jones", "Kassulke", "Kautzer", "Keebler", "Keeling", "Kemmer", "Kerluke", "Kertzmann", "Kessler", "Kiehn", "Kihn", "Kilback", "King", "Kirlin", "Klein", "Kling", "Klocko", "Koch", "Koelpin", "Koepp", "Kohler", "Konopelski", "Koss", "Kovacek", "Kozey", "Krajcik", "Kreiger", "Kris", "Kshlerin", "Kub", "Kuhic", "Kuhlman", "Kuhn", "Kulas", "Kunde", "Kunze", "Kuphal", "Kutch", "Kuvalis", "Labadie", "Lakin", "Lang", "Langosh", "Langworth", "Larkin", "Larson", "Leannon", "Lebsack", "Ledner", "Leffler", "Legros", "Lehner", "Lemke", "Lesch", "Leuschke", "Lind", "Lindgren", "Littel", "Little", "Lockman", "Lowe", "Lubowitz", "Lueilwitz", "Luettgen", "Lynch", "Macejkovic", "Maggio", "Mann", "Mante", "Marks", "Marquardt", "Marvin", "Mayer", "Mayert", "McClure", "McCullough", "McDermott", "McGlynn", "McKenzie", "McLaughlin", "Medhurst", "Mertz", "Metz", "Miller", "Mills", "Mitchell", "Moen", "Mohr", "Monahan", "Moore", "Morar", "Morissette", "Mosciski", "Mraz", "Mueller", "Muller", "Murazik", "Murphy", "Murray", "Nader", "Nicolas", "Nienow", "Nikolaus", "Nitzsche", "Nolan", "Oberbrunner", "Okuneva", "Olson", "Ondricka", "OReilly", "Orn", "Ortiz", "Osinski", "Pacocha", "Padberg", "Pagac", "Parisian", "Parker", "Paucek", "Pfannerstill", "Pfeffer", "Pollich", "Pouros", "Powlowski", "Predovic", "Price", "Prohaska", "Prosacco", "Purdy", "Quigley", "Quitzon", "Rath", "Ratke", "Rau", "Raynor", "Reichel", "Reichert", "Reilly", "Reinger", "Rempel", "Renner", "Reynolds", "Rice", "Rippin", "Ritchie", "Robel", "Roberts", "Rodriguez", "Rogahn", "Rohan", "Rolfson", "Romaguera", "Roob", "Rosenbaum", "Rowe", "Ruecker", "Runolfsdottir", "Runolfsson", "Runte", "Russel", "Rutherford", "Ryan", "Sanford", "Satterfield", "Sauer", "Sawayn", "Schaden", "Schaefer", "Schamberger", "Schiller", "Schimmel", "Schinner", "Schmeler", "Schmidt", "Schmitt", "Schneider", "Schoen", "Schowalter", "Schroeder", "Schulist", "Schultz", "Schumm", "Schuppe", "Schuster", "Senger", "Shanahan", "Shields", "Simonis", "Sipes", "Skiles", "Smith", "Smitham", "Spencer", "Spinka", "Sporer", "Stamm", "Stanton", "Stark", "Stehr", "Steuber", "Stiedemann", "Stokes", "Stoltenberg", "Stracke", "Streich", "Stroman", "Strosin", "Swaniawski", "Swift", "Terry", "Thiel", "Thompson", "Tillman", "Torp", "Torphy", "Towne", "Toy", "Trantow", "Tremblay", "Treutel", "Tromp", "Turcotte", "Turner", "Ullrich", "Upton", "Vandervort", "Veum", "Volkman", "Von", "VonRueden", "Waelchi", "Walker", "Walsh", "Walter", "Ward", "Waters", "Watsica", "Weber", "Wehner", "Weimann", "Weissnat", "Welch", "West", "White", "Wiegand", "Wilderman", "Wilkinson", "Will", "Williamson", "Willms", "Windler", "Wintheiser", "Wisoky", "Wisozk", "Witting", "Wiza", "Wolf", "Wolff", "Wuckert", "Wunsch", "Wyman", "Yost", "Yundt", "Zboncak", "Zemlak", "Ziemann", "Zieme", "Zulauf"}, - "hobby": {"3D printing", "Acrobatics", "Acting", "Amateur radio", "Animation", "Aquascaping", "Astrology", "Astronomy", "Baking", "Baton twirling", "Blogging", "Building", "Board/tabletop games", "Book discussion clubs", "Book restoration", "Bowling", "Brazilian jiu-jitsu", "Breadmaking", "Bullet journaling", "Cabaret", "Calligraphy", "Candle making", "Candy making", "Car fixing & building", "Card games", "Cheesemaking", "Cleaning", "Clothesmaking", "Coffee roasting", "Collecting", "Coloring", "Computer programming", "Confectionery", "Cooking", "Cosplaying", "Couponing", "Craft", "Creative writing", "Crocheting", "Cross-stitch", "Crossword puzzles", "Cryptography", "Cue sports", "Dance", "Digital arts", "Distro Hopping", "DJing", "Do it yourself", "Drama", "Drawing", "Drink mixing", "Drinking", "Electronic games", "Electronics", "Embroidery", "Experimenting", "Fantasy sports", "Fashion", "Fashion design", "Fishkeeping", "Filmmaking", "Flower arranging", "Fly tying", "Foreign language learning", "Furniture building", "Gaming", "Genealogy", "Gingerbread house making", "Glassblowing", "Graphic design", "Gunsmithing", "Gymnastics", "Hacking", "Herp keeping", "Home improvement", "Homebrewing", "Houseplant care", "Hula hooping", "Humor", "Hydroponics", "Ice skating", "Jewelry making", "Jigsaw puzzles", "Journaling", "Juggling", "Karaoke", "Karate", "Kendama", "Knife making", "Knitting", "Knot tying", "Kombucha brewing", "Lace making", "Lapidary", "Leather crafting", "Lego building", "Lock picking", "Listening to music", "Listening to podcasts", "Machining", "Macrame", "Magic", "Makeup", "Mazes (indoor/outdoor)", "Metalworking", "Model building", "Model engineering", "Nail art", "Needlepoint", "Origami", "Painting", "Palmistry", "Pet adoption & fostering", "Philately", "Photography", "Practical jokes", "Pressed flower craft", "Playing musical instruments", "Poi", "Pottery", "Powerlifting", "Puzzles", "Quilling", "Quilting", "Quizzes", "Radio-controlled model", "Rail transport modeling", "Rapping", "Reading", "Refinishing", "Reiki", "Robot combat", "Rubik's Cube", "Scrapbooking", "Sculpting", "Sewing", "Shoemaking", "Singing", "Sketching", "Skipping rope", "Slot car", "Soapmaking", "Social media", "Spreadsheets", "Stand-up comedy", "Stamp collecting", "Table tennis", "Tarot", "Taxidermy", "Thrifting", "Video editing", "Video game developing", "Video gaming", "Watching movies", "Watching television", "Videography", "Virtual reality", "Waxing", "Weaving", "Weight training", "Welding", "Whittling", "Wikipedia editing", "Winemaking", "Wood carving", "Woodworking", "Worldbuilding", "Writing", "Word searches", "Yo-yoing", "Yoga", "Zumba", "Amusement park visiting", "Air sports", "Airsoft", "Amateur geology", "Archery", "Astronomy", "Backpacking", "Badminton", "BASE jumping", "Baseball", "Basketball", "Beekeeping", "Birdwatching", "Blacksmithing", "BMX", "Board sports", "Bodybuilding", "Bonsai", "Butterfly watching", "Bus riding", "Camping", "Canoeing", "Canyoning", "Car riding", "Caving", "Composting", "Cycling", "Dowsing", "Driving", "Farming", "Fishing", "Flag football", "Flower growing", "Flying", "Flying disc", "Foraging", "Fossicking", "Freestyle football", "Gardening", "Geocaching", "Ghost hunting", "Gold prospecting", "Graffiti", "Handball", "Herbalism", "Herping", "High-power rocketry", "Hiking", "Hobby horsing", "Hobby tunneling", "Hooping", "Horseback riding", "Hunting", "Inline skating", "Jogging", "Jumping rope", "Kayaking", "Kite flying", "Kitesurfing", "Lacrosse", "LARPing", "Letterboxing", "Longboarding", "Martial arts", "Metal detecting", "Meteorology", "Motor sports", "Mountain biking", "Mountaineering", "Museum visiting", "Mushroom hunting", "Netball", "Nordic skating", "Orienteering", "Paintball", "Parkour", "Photography", "Podcast hosting", "Polo", "Public transport riding", "Rafting", "Railway journeys", "Rappelling", "Road biking", "Rock climbing", "Roller skating", "Rugby", "Running", "Radio-controlled model", "Sailing", "Sand art", "Scouting", "Scuba diving", "Sculling", "Shooting", "Shopping", "Shuffleboard", "Skateboarding", "Skiing", "Skimboarding", "Skydiving", "Slacklining", "Snowboarding", "Snowmobiling", "Snowshoeing", "Soccer", "Stone skipping", "Sun bathing", "Surfing", "Survivalism", "Swimming", "Taekwondo", "Tai chi", "Tennis", "Topiary", "Tourism", "Thru-hiking", "Trade fair visiting", "Travel", "Urban exploration", "Vacation", "Vegetable farming", "Videography", "Vehicle restoration", "Walking", "Water sports", "Astronomy", "Biology", "Chemistry", "Electrochemistry", "Physics", "Psychology", "Sports science", "Geography", "History", "Mathematics", "Railway studies", "Action figure", "Antiquing", "Ant-keeping", "Art collecting", "Book collecting", "Button collecting", "Cartophily", "Coin collecting", "Comic book collecting", "Deltiology", "Die-cast toy", "Digital hoarding", "Dolls", "Element collecting", "Ephemera collecting", "Fusilately", "Knife collecting", "Lotology", "Movie and movie memorabilia collecting", "Fingerprint collecting", "Perfume", "Phillumeny", "Radio-controlled model", "Rail transport modelling", "Record collecting", "Rock tumbling", "Scutelliphily", "Shoes", "Slot car", "Sports memorabilia", "Stamp collecting", "Stuffed toy collecting", "Tea bag collecting", "Ticket collecting", "Toys", "Transit map collecting", "Video game collecting", "Vintage cars", "Vintage clothing", "Vinyl Records", "Antiquities", "Auto audiophilia", "Flower collecting and pressing", "Fossil hunting", "Insect collecting", "Magnet fishing", "Metal detecting", "Mineral collecting", "Rock balancing", "Sea glass collecting", "Seashell collecting", "Stone collecting", "Animal fancy", "Axe throwing", "Backgammon", "Badminton", "Baton twirling", "Beauty pageants", "Billiards", "Bowling", "Boxing", "Bridge", "Checkers (draughts)", "Cheerleading", "Chess", "Color guard", "Cribbage", "Curling", "Dancing", "Darts", "Debate", "Dominoes", "Eating", "Esports", "Fencing", "Go", "Gymnastics", "Ice hockey", "Ice skating", "Judo", "Jujitsu", "Kabaddi", "Knowledge/word games", "Laser tag", "Longboarding", "Mahjong", "Marbles", "Martial arts", "Model United Nations", "Poker", "Pool", "Role-playing games", "Shogi", "Slot car racing", "Speedcubing", "Sport stacking", "Table football", "Table tennis", "Volleyball", "Weightlifting", "Wrestling", "Airsoft", "Archery", "Association football", "Australian rules football", "Auto racing", "Baseball", "Beach volleyball", "Breakdancing", "Climbing", "Cricket", "Croquet", "Cycling", "Disc golf", "Dog sport", "Equestrianism", "Exhibition drill", "Field hockey", "Figure skating", "Fishing", "Footbag", "Frisbee", "Golfing", "Handball", "Horseback riding", "Horseshoes", "Iceboat racing", "Jukskei", "Kart racing", "Knife throwing", "Lacrosse", "Longboarding", "Long-distance running", "Marching band", "Model aircraft", "Orienteering", "Pickleball", "Quidditch", "Race walking", "Racquetball", "Radio-controlled car racing", "Roller derby", "Rugby league football", "Sculling", "Shooting sport", "Skateboarding", "Skiing", "Sled dog racing", "Softball", "Speed skating", "Squash", "Surfing", "Swimming", "Table tennis", "Tennis", "Tennis polo", "Tether car", "Tour skating", "Tourism", "Trapshooting", "Triathlon", "Ultimate frisbee", "Volleyball", "Water polo", "Fishkeeping", "Learning", "Meditation", "Microscopy", "Reading", "Research", "Shortwave listening", "Audiophile", "Aircraft spotting", "Amateur astronomy", "Birdwatching", "Bus spotting", "Geocaching", "Gongoozling", "Herping", "Hiking", "Meteorology", "Photography", "Satellite watching", "Trainspotting", "Whale watching"}, - "phone": {"###-###-####", "(###)###-####", "1-###-###-####", "###.###.####"}, + "prefix": { + "Mr.", "Mrs.", "Ms.", "Miss", "Dr.", + }, + "suffix": { + "Jr.", "Sr.", "I", "II", "III", + "IV", "V", "MD", "DDS", "PhD", + "DVM", + }, + "first": { + "Aaliyah", "Aaron", "Abbey", "Abbie", "Abby", + "Abdul", "Abdullah", "Abe", "Abel", "Abigail", + "Abigale", "Abigayle", "Abner", "Abraham", "Ada", + "Adah", "Adalberto", "Adaline", "Adam", "Adan", + "Addie", "Addison", "Adela", "Adelbert", "Adele", + "Adelia", "Adeline", "Adell", "Adella", "Adelle", + "Aditya", "Adonis", "Adrain", "Adrian", "Adriana", + "Adriel", "Afton", "Agnes", "Agustin", "Agustina", + "Ahmad", "Ahmed", "Aida", "Aidan", "Aiden", + "Aileen", "Aimee", "Aisha", "Aiyana", "Al", + "Alaina", "Alan", "Alana", "Alanis", "Alanna", + "Alayna", "Alba", "Albert", "Alberta", "Albertha", + "Alberto", "Albin", "Albina", "Alda", "Alden", + "Alec", "Aleen", "Alejandra", "Alek", "Alena", + "Alene", "Alessandra", "Alessandro", "Alessia", "Aletha", + "Alex", "Alexa", "Alexander", "Alexandra", "Alexie", + "Alexis", "Alf", "Alfonso", "Alfonzo", "Alford", + "Alfred", "Ali", "Alia", "Alice", "Alicia", + "Alisa", "Alisha", "Alison", "Alivia", "Aliya", + "Aliyah", "Aliza", "Alize", "Allen", "Allene", + "Allie", "Allison", "Ally", "Alphonso", "Alta", + "Althea", "Alva", "Alvah", "Alvena", "Alvera", + "Alverta", "Alvina", "Alvis", "Alyce", "Alycia", + "Alysa", "Alysha", "Alyson", "Alysson", "Amalia", + "Amanda", "Amaya", "Amber", "Amelia", "America", + "Americo", "Amie", "Amina", "Amir", "Amira", + "Amiya", "Amos", "Amparo", "Amy", "Ana", + "Anahi", "Anais", "Anastasia", "Anderson", "Andre", + "Andrew", "Andy", "Angel", "Angela", "Angelica", + "Angelina", "Angeline", "Angelo", "Angie", "Angus", + "Anibal", "Anika", "Anissa", "Anita", "Aniya", + "Aniyah", "Anjali", "Anna", "Annabel", "Annalise", + "Anne", "Annie", "Ansel", "Ansley", "Anthony", + "Antonio", "Anya", "April", "Ara", "Araceli", + "Aracely", "Archibald", "Ardella", "Arden", "Ardith", + "Arely", "Ari", "Ariane", "Arianna", "Aric", + "Ariel", "Arielle", "Arjun", "Arlene", "Arlie", + "Arlo", "Armand", "Armando", "Arnaldo", "Arne", + "Arno", "Arnold", "Arnoldo", "Arnulfo", "Aron", + "Art", "Arthur", "Arvel", "Arvid", "Arvilla", + "Aryanna", "Asa", "Asha", "Ashlee", "Ashleigh", + "Ashley", "Ashly", "Ashlynn", "Ashton", "Ashtyn", + "Asia", "Astrid", "Athena", "Aubree", "Aubrey", + "Audie", "Audra", "Audrey", "August", "Augusta", + "Augustine", "Augustus", "Aurelia", "Austen", "Austin", + "Autumn", "Ava", "Avery", "Avis", "Axel", + "Ayana", "Ayden", "Ayla", "Bailey", "Barbara", + "Barney", "Baron", "Barrett", "Barry", "Bart", + "Barton", "Baylee", "Beatrice", "Beau", "Bell", + "Bella", "Belle", "Ben", "Benedict", "Benjamin", + "Bennett", "Benton", "Bernadette", "Bernadine", "Bernard", + "Bernardo", "Bernice", "Bernie", "Berry", "Bert", + "Berta", "Bertha", "Beryl", "Bessie", "Beth", + "Bethany", "Betty", "Beulah", "Beverly", "Bianka", + "Bill", "Billie", "Billy", "Birdie", "Blair", + "Blaise", "Blake", "Blanca", "Blanche", "Blaze", + "Bo", "Bobbie", "Bobby", "Bonita", "Bonnie", + "Boris", "Boyd", "Brad", "Braden", "Bradley", + "Brady", "Braeden", "Brandi", "Brando", "Brandon", + "Brandt", "Brandy", "Brant", "Braxton", "Breana", + "Breanna", "Breanne", "Brenda", "Brendan", "Brenden", + "Brendon", "Brenna", "Brennan", "Brennon", "Brent", + "Bret", "Brett", "Bria", "Brian", "Brice", + "Bridget", "Brittany", "Brock", "Broderick", "Brody", + "Brook", "Brooke", "Brooklyn", "Brooks", "Bruce", + "Bryana", "Bryce", "Brycen", "Bryon", "Buck", + "Bud", "Buddy", "Buford", "Bulah", "Burley", + "Buster", "Cade", "Caden", "Caesar", "Caitlyn", + "Cale", "Caleb", "Caleigh", "Cali", "Calista", + "Callie", "Camden", "Cameron", "Camila", "Camilla", + "Camille", "Camren", "Camron", "Camryn", "Candace", + "Candice", "Candida", "Cara", "Carey", "Carissa", + "Carlee", "Carleton", "Carley", "Carli", "Carlie", + "Carlo", "Carlos", "Carlotta", "Carmel", "Carmela", + "Carmella", "Carmelo", "Carmen", "Carmine", "Carol", + "Carole", "Carolina", "Caroline", "Carolyn", "Carrie", + "Carroll", "Carson", "Carter", "Cary", "Casandra", + "Casey", "Casper", "Cassandra", "Cassandre", "Cassidy", + "Cassie", "Catalina", "Caterina", "Catherine", "Cathy", + "Cayla", "Cecelia", "Cecil", "Cecile", "Cecilia", + "Cedrick", "Celestine", "Celia", "Celine", "Cesar", + "Chad", "Chadd", "Chadrick", "Chaim", "Chance", + "Chandler", "Chanel", "Chanelle", "Charity", "Charlene", + "Charles", "Charley", "Charlie", "Charlotte", "Chase", + "Chasity", "Chauncey", "Chaya", "Chaz", "Chelsea", + "Chelsey", "Chelsie", "Chester", "Chet", "Cheyanne", + "Cheyenne", "Chloe", "Chris", "Christa", "Christian", + "Christiana", "Christina", "Christine", "Christophe", "Christopher", + "Christy", "Chyna", "Ciara", "Cicero", "Cielo", + "Cierra", "Cindy", "Clair", "Claire", "Clara", + "Clare", "Clarissa", "Clark", "Claud", "Claude", + "Claudia", "Claudie", "Claudine", "Clay", "Clemens", + "Clement", "Clementina", "Clementine", "Cleo", "Cleora", + "Cleta", "Cleve", "Cleveland", "Clifford", "Clifton", + "Clint", "Clinton", "Clovis", "Clyde", "Coby", + "Cody", "Colby", "Cole", "Coleman", "Colin", + "Colleen", "Collin", "Colt", "Colten", "Colton", + "Columbus", "Concepcion", "Conner", "Connie", "Connor", + "Conor", "Conrad", "Constance", "Constantin", "Consuelo", + "Cooper", "Cora", "Coralie", "Corbin", "Cordelia", + "Cordell", "Cordia", "Cordie", "Corene", "Corine", + "Cornelius", "Cornell", "Corrine", "Cortez", "Cortney", + "Cory", "Coty", "Courtney", "Coy", "Craig", + "Crawford", "Cristian", "Cristina", "Cruz", "Crystal", + "Crystel", "Cullen", "Curt", "Curtis", "Cydney", + "Cynthia", "Cyril", "Cyrus", "Dahlia", "Daija", + "Daisha", "Daisy", "Dakota", "Dale", "Dallas", + "Dallin", "Dalton", "Damaris", "Dameon", "Damian", + "Damien", "Damion", "Damon", "Dan", "Dana", + "Dandre", "Dane", "Danny", "Dante", "Danyka", + "Daphne", "Darby", "Daren", "Darian", "Dariana", + "Darien", "Dario", "Darion", "Darius", "Darlene", + "Daron", "Darrel", "Darrell", "Darren", "Darrick", + "Darrin", "Darrion", "Darron", "Darryl", "Darwin", + "Daryl", "Dave", "David", "Davin", "Dawn", + "Dawson", "Dax", "Dayana", "Dayna", "Dayne", + "Dayton", "Dean", "Deangelo", "Deanna", "Deborah", + "Declan", "Dee", "Deja", "Delaney", "Delfina", + "Delia", "Delilah", "Dell", "Della", "Delores", + "Dena", "Denis", "Dennis", "Deon", "Dereck", + "Derek", "Derick", "Deron", "Derrick", "Desiree", + "Desmond", "Dessie", "Destiny", "Devin", "Devon", + "Dewayne", "Dexter", "Diamond", "Diana", "Dianna", + "Diego", "Dillon", "Dimitri", "Dina", "Dino", + "Dion", "Dixie", "Dock", "Dolly", "Dolores", + "Domingo", "Dominic", "Dominique", "Don", "Donald", + "Donna", "Dora", "Dorian", "Doris", "Dorothea", + "Dorothy", "Dorris", "Doug", "Douglas", "Dovie", + "Doyle", "Drake", "Drew", "Duane", "Dudley", + "Dulce", "Duncan", "Dustin", "Dusty", "Dwight", + "Dylan", "Earl", "Earlene", "Earline", "Earnest", + "Earnestine", "Easter", "Easton", "Ebony", "Ed", + "Eda", "Edd", "Eddie", "Eden", "Edgar", + "Edgardo", "Edison", "Edmond", "Edmund", "Edna", + "Eduardo", "Edward", "Edwin", "Edwina", "Effie", + "Eileen", "Elaina", "Elbert", "Eleanora", "Eleanore", + "Elena", "Eli", "Elias", "Elijah", "Elisa", + "Elisabeth", "Elise", "Elizabeth", "Ella", "Ellen", + "Ellie", "Elliot", "Elliott", "Ellis", "Elmer", + "Elmore", "Eloise", "Elsa", "Elsie", "Elton", + "Elvis", "Elyse", "Elyssa", "Emanuel", "Emerson", + "Emery", "Emil", "Emile", "Emilia", "Emilie", + "Emilio", "Emily", "Emma", "Emmanuel", "Emmanuelle", + "Emmet", "Emmett", "Emmie", "Emmy", "Emory", + "Ena", "Enid", "Enoch", "Enos", "Enrico", + "Enrique", "Eric", "Erica", "Ericka", "Erik", + "Erika", "Erin", "Erna", "Ernest", "Ernie", + "Ervin", "Esmeralda", "Esperanza", "Esteban", "Estefania", + "Estella", "Estelle", "Esther", "Estrella", "Ethan", + "Ethel", "Eugene", "Eunice", "Eva", "Evan", + "Evangeline", "Eve", "Evelyn", "Everett", "Ezekiel", + "Ezequiel", "Ezra", "Fabian", "Fae", "Fannie", + "Fanny", "Fatima", "Fay", "Faye", "Federico", + "Felicia", "Felicity", "Felipe", "Felix", "Fernando", + "Fidel", "Finn", "Fiona", "Fletcher", "Flo", + "Florence", "Floyd", "Ford", "Forest", "Forrest", + "Frances", "Francesca", "Francesco", "Francis", "Francisco", + "Franco", "Frank", "Frankie", "Franz", "Fred", + "Freda", "Freddie", "Freddy", "Frederic", "Frederick", + "Fredrick", "Freeman", "Gabe", "Gabriel", "Gabriella", + "Gabrielle", "Gage", "Gail", "Garfield", "Garland", + "Garret", "Garrett", "Garrick", "Garrison", "Garry", + "Garth", "Gaston", "Gavin", "Gay", "Gayle", + "Gaylord", "Gene", "Genesis", "Genevieve", "Gennaro", + "Geoffrey", "George", "Gerald", "Gerard", "Gerardo", + "Gerry", "Gia", "Gideon", "Gilbert", "Gilberto", + "Giles", "Gillian", "Gina", "Gino", "Giovani", + "Giovanna", "Giovanni", "Giovanny", "Gladys", "Glen", + "Glenda", "Gloria", "Gordon", "Grace", "Gracie", + "Graciela", "Grady", "Graham", "Grant", "Grayson", + "Greg", "Gregg", "Gregory", "Greta", "Griffin", + "Guadalupe", "Guido", "Gunnar", "Gunner", "Gustave", + "Guy", "Gwen", "Hadley", "Hailee", "Hailey", + "Hailie", "Hal", "Haley", "Halle", "Hallie", + "Hanna", "Hannah", "Hans", "Hardy", "Harley", + "Harmon", "Harmony", "Harold", "Harrison", "Harry", + "Harvey", "Hattie", "Hayden", "Haylee", "Hayley", + "Haylie", "Hazel", "Heath", "Heather", "Hector", + "Heidi", "Helen", "Helena", "Helene", "Hellen", + "Henri", "Henry", "Herbert", "Herman", "Hilda", + "Hillary", "Hiram", "Hollis", "Holly", "Hope", + "Horace", "Houston", "Howard", "Hubert", "Hudson", + "Hugh", "Humberto", "Hunter", "Ian", "Ibrahim", + "Ida", "Ignacio", "Ike", "Imani", "Ines", + "Irma", "Isaac", "Isabel", "Isabell", "Isabella", + "Isabelle", "Isaiah", "Israel", "Ivy", "Izabella", + "Jabari", "Jace", "Jack", "Jackie", "Jacklyn", + "Jackson", "Jacky", "Jaclyn", "Jacquelyn", "Jada", + "Jade", "Jaden", "Jadon", "Jadyn", "Jaida", + "Jaiden", "Jaime", "Jairo", "Jake", "Jakob", + "Jalen", "Jamaal", "Jamal", "Jamar", "Jameson", + "Jamie", "Jamison", "Jan", "Jana", "Jane", + "Janelle", "Janet", "Janice", "Janie", "Janis", + "Jaqueline", "Jared", "Jaron", "Jarred", "Jarrod", + "Jarvis", "Jasmin", "Jason", "Jasper", "Javier", + "Jay", "Jayda", "Jayden", "Jaydon", "Jayme", + "Jayne", "Jean", "Jeanette", "Jeanne", "Jeff", + "Jeffery", "Jeffrey", "Jena", "Jennie", "Jennifer", + "Jerald", "Jeremy", "Jerome", "Jerrold", "Jerry", + "Jess", "Jesse", "Jessica", "Jessie", "Jessy", + "Jesus", "Jillian", "Jimmie", "Jimmy", "Jo", + "Joan", "Joana", "Joanie", "Joanne", "Joannie", + "Joanny", "Joany", "Joaquin", "Jocelyn", "Jodie", + "Jody", "Joe", "Joel", "Joelle", "Joey", + "Johanna", "John", "Johnathan", "Johnathon", "Johnny", + "Jon", "Jonas", "Jonathan", "Jonathon", "Jordan", + "Jordyn", "Jorge", "Jose", "Josefa", "Josefina", + "Joseph", "Josephine", "Josh", "Joshua", "Josiah", + "Josie", "Josue", "Jovan", "Joy", "Joyce", + "Juanita", "Jude", "Judge", "Judy", "Jules", + "Julia", "Julian", "Juliana", "Julie", "Juliet", + "Julio", "Julius", "June", "Justice", "Justina", + "Justine", "Kacey", "Kade", "Kaden", "Kaia", + "Kailee", "Kailey", "Kailyn", "Kaitlin", "Kaitlyn", + "Kale", "Kaleb", "Kaley", "Kali", "Kallie", + "Kameron", "Kane", "Kara", "Kareem", "Karelle", + "Karen", "Kari", "Kariane", "Karianne", "Karina", + "Karine", "Karl", "Karlee", "Karley", "Karli", + "Karlie", "Karson", "Kasandra", "Kasey", "Kassandra", + "Katelyn", "Katelynn", "Katherine", "Katheryn", "Kathleen", + "Kathryn", "Katrina", "Kattie", "Kay", "Kaya", + "Kayla", "Kaylee", "Kayleigh", "Kayley", "Keanu", + "Keegan", "Keeley", "Keira", "Keith", "Kelli", + "Kellie", "Kelly", "Kelvin", "Ken", "Kendall", + "Kendra", "Kendrick", "Kennedy", "Kenneth", "Kenny", + "Kenton", "Kenya", "Kevin", "Kiana", "Kianna", + "Kiara", "Kiera", "Kiley", "Kim", "Kimberly", + "King", "Kip", "Kira", "Kirk", "Kitty", + "Kobe", "Kody", "Kolby", "Kole", "Kory", + "Kris", "Krista", "Kristian", "Kristin", "Kristina", + "Kristofer", "Kristoffer", "Kristopher", "Kristy", "Krystal", + "Kurt", "Kurtis", "Kyla", "Kyle", "Kylie", + "Kyra", "Lacey", "Lacy", "Laila", "Lance", + "Landen", "Lane", "Laney", "Larry", "Laura", + "Laurel", "Lauren", "Laurie", "Lawrence", "Lawson", + "Layla", "Layne", "Lea", "Leann", "Leanna", + "Leanne", "Lee", "Leif", "Leila", "Leilani", + "Lela", "Leland", "Lenny", "Leo", "Leon", + "Leonard", "Lesley", "Leslie", "Lester", "Leta", + "Levi", "Lew", "Lewis", "Lexi", "Lexie", + "Lia", "Liam", "Liana", "Libby", "Lila", + "Lilian", "Lilla", "Lillian", "Lillie", "Lilly", + "Lily", "Lina", "Lincoln", "Linda", "Lindsay", + "Lindsey", "Lionel", "Lisa", "Lisette", "Liza", + "Lizzie", "Lloyd", "Logan", "Lois", "Lola", + "Lolita", "London", "Lonny", "Loren", "Lorena", + "Lorenzo", "Lori", "Louie", "Louisa", "Lowell", + "Lucas", "Luciano", "Lucie", "Lucienne", "Lucy", + "Luis", "Luisa", "Lukas", "Luna", "Lupe", + "Luther", "Luz", "Lydia", "Lyla", "Lynn", + "Mabel", "Mable", "Mac", "Macey", "Maci", + "Macie", "Mack", "Mackenzie", "Macy", "Madaline", + "Madalyn", "Maddison", "Madeline", "Madelyn", "Madelynn", + "Madge", "Madie", "Madilyn", "Madisen", "Madison", + "Madisyn", "Madonna", "Madyson", "Mae", "Maegan", + "Maeve", "Maggie", "Maia", "Malachi", "Malcolm", + "Mallory", "Mandy", "Manuel", "Manuela", "Mara", + "Marc", "Marcel", "Marcella", "Marcia", "Marco", + "Marcos", "Marcus", "Margaret", "Marge", "Margie", + "Margot", "Maria", "Mariah", "Mariana", "Marianne", + "Marie", "Marina", "Mario", "Marion", "Marjorie", + "Mark", "Marlee", "Marlene", "Marley", "Marlon", + "Marta", "Martin", "Marty", "Marvin", "Mary", + "Matt", "Maureen", "Maurice", "Mavis", "Max", + "Maxime", "Maxine", "Maxwell", "May", "Maya", + "Maynard", "Mayra", "Meagan", "Meaghan", "Meghan", + "Melisa", "Melissa", "Mellie", "Melody", "Melvin", + "Meredith", "Merle", "Mia", "Micaela", "Micah", + "Michael", "Michaela", "Michel", "Michele", "Michelle", + "Miguel", "Mikayla", "Mike", "Milan", "Miles", + "Millie", "Milo", "Milton", "Mina", "Minerva", + "Misty", "Mitchell", "Mohamed", "Mohammad", "Mohammed", + "Moises", "Molly", "Mona", "Monica", "Monique", + "Monte", "Morgan", "Morris", "Moses", "Moshe", + "Muhammad", "Muriel", "Mustafa", "Myles", "Myrna", + "Myron", "Myrtle", "Nadia", "Nannie", "Naomi", + "Nash", "Nasir", "Nat", "Natalie", "Natasha", + "Nathan", "Nathanael", "Nathaniel", "Neal", "Ned", + "Nedra", "Neha", "Neil", "Nellie", "Neva", + "Nicholas", "Nichole", "Nick", "Nicole", "Nikolas", + "Nina", "Noah", "Noel", "Nola", "Nolan", + "Nora", "Norma", "Nova", "Octavia", "Olga", + "Oliver", "Opal", "Ophelia", "Oscar", "Otis", + "Otto", "Owen", "Pablo", "Paige", "Pamela", + "Paris", "Parker", "Patience", "Patricia", "Patrick", + "Paul", "Paula", "Pauline", "Payton", "Pearl", + "Pedro", "Penelope", "Perry", "Pete", "Peter", + "Philip", "Phoebe", "Phyllis", "Pierce", "Pierre", + "Piper", "Polly", "Preston", "Priscilla", "Quincy", + "Quinn", "Rachael", "Rachel", "Rachelle", "Rae", + "Raegan", "Rafael", "Rafaela", "Ralph", "Ramon", + "Ramona", "Randall", "Randy", "Raphael", "Raquel", + "Ray", "Raymond", "Reagan", "Reba", "Rebecca", + "Reggie", "Reginald", "Reid", "Reilly", "Reina", + "Rene", "Renee", "Reuben", "Ricardo", "Richard", + "Richie", "Rick", "Ricky", "Rico", "Rigoberto", + "Riley", "Rita", "River", "Robb", "Robbie", + "Robert", "Roberta", "Roberto", "Robin", "Robyn", + "Rocky", "Roderick", "Roger", "Roman", "Ron", + "Rory", "Rosa", "Rose", "Rowan", "Roxane", + "Roxanne", "Roy", "Royal", "Royce", "Ruben", + "Ruby", "Rudy", "Russell", "Ruth", "Ryan", + "Sabina", "Sabrina", "Sadie", "Sage", "Sally", + "Sam", "Samantha", "Sammy", "Samson", "Sandra", + "Sandy", "Santiago", "Sarah", "Sarai", "Sasha", + "Saul", "Savanna", "Savannah", "Scarlett", "Sean", + "Sebastian", "Selena", "Serena", "Seth", "Shane", + "Shanna", "Shannon", "Sharon", "Shaun", "Shawn", + "Shawna", "Shea", "Sheila", "Sheldon", "Shirley", + "Sidney", "Sienna", "Sierra", "Silas", "Simone", + "Sofia", "Sonia", "Sonya", "Sophia", "Sophie", + "Stanley", "Stella", "Stephanie", "Stephen", "Steve", + "Susan", "Sydney", "Sylvia", "Talia", "Tamara", + "Tanner", "Tanya", "Taylor", "Ted", "Terence", + "Teresa", "Terry", "Theodore", "Thomas", "Tiffany", + "Timothy", "Tina", "Toby", "Tom", "Tomas", + "Tony", "Travis", "Troy", "Tyler", "Tyson", + "Valerie", "Vanessa", "Vaughn", "Vernon", "Veronica", + "Vicente", "Victor", "Victoria", "Vince", "Vincent", + "Violet", "Virginia", "Vivian", "Wayne", "Wendy", + "Will", "Willa", "Willard", "William", "Winston", + "Yolanda", "Zachary", "Zane", "Zoe", "Zoey", + "Zola", + }, + "middle": { + "Abdul", "Abdullah", "Ada", "Adam", "Adele", + "Adrian", "Agnes", "Ahmed", "Aida", "Aileen", + "Aimee", "Alan", "Alex", "Ali", "Alice", + "Alicia", "Amelia", "Amina", "Amir", "Amy", + "Ana", "Anastasia", "Andrea", "Andrei", "Andrew", + "Angel", "Angela", "Anita", "Anna", "Anne", + "Anthony", "Antonio", "Aria", "Ariana", "Armando", + "Ashton", "Audrey", "August", "Aura", "Aurelia", + "Ava", "Avery", "Bailey", "Beatrice", "Belinda", + "Belle", "Ben", "Benjamin", "Bertha", "Beverly", + "Bianca", "Blake", "Blanca", "Bonnie", "Boris", + "Bradley", "Brandt", "Brett", "Brooks", "Bruce", + "Burke", "Calvin", "Camilla", "Carlos", "Carmen", + "Caroline", "Carson", "Casey", "Cassandra", "Cassidy", + "Catherine", "Cecilia", "Cedric", "Celia", "Chance", + "Chandler", "Chang", "Charles", "Charlotte", "Chen", + "Chloe", "Chris", "Christine", "Claire", "Clara", + "Clarissa", "Claude", "Cody", "Cynthia", "Dan", + "Dana", "Dane", "Daniel", "Daphne", "David", + "Denver", "Diana", "Dolores", "Drake", "Edith", + "Eduardo", "Edward", "Eileen", "Eleanor", "Elena", + "Elias", "Elizabeth", "Ella", "Ellen", "Elsa", + "Emery", "Emily", "Emma", "Emmett", "Enrique", + "Epifania", "Erik", "Erin", "Esperanza", "Estelle", + "Esther", "Eva", "Evan", "Eve", "Evelyn", + "Finn", "Francois", "Gabriel", "Gael", "Gail", + "George", "Grace", "Hannah", "Hazel", "Helen", + "Henry", "Ignacio", "Isaac", "Isabella", "Jaime", + "James", "Jane", "Jean", "Joan", "Jose", + "Julia", "Julian", "Katherine", "Kristen", "Kristina", + "Kurt", "Leo", "Mabel", "Manuel", "Margaret", + "Maria", "Mario", "Max", "Rosa", "Rose", + "Ruth", + }, + "last": { + "Abbott", "Abernathy", "Abshire", "Adams", "Allen", + "Anderson", "Armstrong", "Ashley", "Atkins", "Atkinson", + "Auer", "Austin", "Bailey", "Baker", "Baldwin", + "Ball", "Banks", "Barber", "Barker", "Barnes", + "Barnett", "Barrows", "Bartell", "Barton", "Bates", + "Bayer", "Beahan", "Beatty", "Becker", "Bell", + "Bennett", "Berge", "Bergstrom", "Bernhard", "Berry", + "Bird", "Black", "Blake", "Bogan", "Boyer", + "Boyle", "Bradley", "Brady", "Braun", "Brekke", + "Brooks", "Brown", "Bryant", "Burns", "Burton", + "Butler", "Byrd", "Campbell", "Carpenter", "Carr", + "Carroll", "Carter", "Cartwright", "Casper", "Castillo", + "Chapman", "Chen", "Christiansen", "Clark", "Clay", + "Clayton", "Cole", "Coleman", "Collier", "Collins", + "Connelly", "Conroy", "Considine", "Cook", "Cooper", + "Cormier", "Cox", "Craig", "Crawford", "Cronin", + "Cruz", "Cummings", "Cunningham", "Daniel", "Daniels", + "Daugherty", "Davidson", "Davies", "Davis", "Dean", + "Deckow", "Diaz", "Dickinson", "Dietrich", "Dixon", + "Donnelly", "Dooley", "Douglas", "Doyle", "Duncan", + "Dunn", "Edwards", "Elliott", "Ellis", "Evans", + "Farrell", "Feeney", "Ferguson", "Fernandez", "Fields", + "Fisher", "Flatley", "Fleming", "Fletcher", "Flores", + "Ford", "Foster", "Fox", "Franklin", "Frazier", + "Freeman", "Friesen", "Fuller", "Garcia", "Gardner", + "Garrett", "Garrison", "Garza", "George", "Gerhold", + "Gerlach", "Gibbs", "Gibson", "Gilbert", "Gill", + "Gislason", "Gleason", "Gleichner", "Glenn", "Glover", + "Goldner", "Gomez", "Gonzalez", "Goodwin", "Gordon", + "Gottlieb", "Grady", "Graham", "Grant", "Graves", + "Gray", "Green", "Greene", "Gregory", "Griffin", + "Grimes", "Gross", "Guzman", "Hackett", "Hahn", + "Hale", "Haley", "Hall", "Hamilton", "Hammond", + "Hansen", "Hanson", "Harper", "Harrington", "Harris", + "Harrison", "Hart", "Hartmann", "Harvey", "Hawkins", + "Hayes", "Heller", "Henderson", "Henry", "Herman", + "Hermann", "Hernandez", "Herrera", "Herzog", "Hicks", + "Higgins", "Hill", "Hills", "Hoffman", "Holland", + "Holmes", "Holt", "Hopkins", "Horton", "Howard", + "Howe", "Howell", "Hubbard", "Hudson", "Hughes", + "Hunt", "Hunter", "Ingram", "Jackson", "Jacobi", + "Jacobs", "Jacobson", "James", "Jenkins", "Jensen", + "Jimenez", "Johnson", "Johnston", "Jones", "Jordan", + "Joseph", "Keller", "Kelly", "Kennedy", "Kessler", + "Kim", "King", "Klein", "Kling", "Knight", + "Koch", "Kuhn", "Lamb", "Lambert", "Lane", + "Lang", "Larkin", "Larson", "Lawrence", "Lawson", + "Lee", "Leffler", "Leonard", "Lewis", "Li", + "Lin", "Lind", "Lindgren", "Little", "Lopez", + "Love", "Lowe", "Lucas", "Lynch", "Lyons", + "Maldonado", "Malone", "Mann", "Manning", "Marks", + "Marquardt", "Marsh", "Marshall", "Martin", "Martinez", + "Marvin", "Mason", "Matthews", "Maxwell", "May", + "Mayer", "Mccarthy", "McClure", "McCullough", "McDermott", + "Mcdonald", "McKenzie", "McLaughlin", "Medina", "Mendez", + "Mendoza", "Mertz", "Meyer", "Miles", "Miller", + "Mills", "Mitchell", "Monahan", "Montgomery", "Moore", + "Morales", "Moreno", "Morgan", "Morris", "Morrison", + "Mueller", "Muller", "Murphy", "Murray", "Myers", + "Nash", "Navarro", "Nelson", "Newman", "Newton", + "Nguyen", "Nichols", "Nicolas", "Nolan", "Norman", + "Norris", "Oliver", "Olson", "Ortega", "Ortiz", + "Osinski", "Owen", "Owens", "Padilla", "Palmer", + "Parker", "Patel", "Patterson", "Paul", "Payne", + "Pearson", "Pena", "Perez", "Perkins", "Perry", + "Peters", "Peterson", "Phillips", "Pierce", "Porter", + "Powell", "Powers", "Price", "Purdy", "Quigley", + "Quinn", "Ramirez", "Ramos", "Ramsey", "Rath", + "Ray", "Raynor", "Reed", "Reese", "Reid", + "Reilly", "Reyes", "Reynolds", "Rhodes", "Rice", + "Richards", "Richardson", "Riley", "Ritchie", "Rivera", + "Robbins", "Roberts", "Robertson", "Robinson", "Rodriguez", + "Rogers", "Romero", "Rose", "Rosenbaum", "Ross", + "Rowe", "Ruiz", "Russell", "Rutherford", "Ryan", + "Sanchez", "Sanders", "Sandoval", "Santiago", "Santos", + "Satterfield", "Sauer", "Saunders", "Schaefer", "Schiller", + "Schmidt", "Schmitt", "Schneider", "Schroeder", "Schultz", + "Schuster", "Schwartz", "Scott", "Sharp", "Shaw", + "Shelton", "Sherman", "Shields", "Silva", "Simmons", + "Simpson", "Sims", "Singh", "Singleton", "Smith", + "Snyder", "Solomon", "Soto", "Sparks", "Spencer", + "Stanley", "Stanton", "Stark", "Stephens", "Stevens", + "Stevenson", "Stewart", "Stokes", "Stone", "Strong", + "Sullivan", "Summers", "Sutton", "Swanson", "Swift", + "Tate", "Taylor", "Terry", "Thiel", "Thomas", + "Thompson", "Thornton", "Tillman", "Todd", "Torres", + "Towne", "Townsend", "Tran", "Tucker", "Turner", + "Tyler", "Valdez", "Valencia", "Vargas", "Vasquez", + "Vaughn", "Vega", "Velazquez", "Villarreal", "Vincent", + "Wade", "Wagner", "Walker", "Wall", "Wallace", + "Walsh", "Walter", "Walters", "Walton", "Wang", + "Ward", "Warren", "Washington", "Waters", "Watkins", + "Watson", "Watts", "Weaver", "Webb", "Weber", + "Webster", "Welch", "Wells", "West", "Wheeler", + "White", "Whitney", "Wiggins", "Wilcox", "Wiley", + "Wilkins", "Wilkinson", "Williams", "Williamson", "Willis", + "Wilson", "Wise", "Wolf", "Wolfe", "Wolff", + "Wong", "Wood", "Woods", "Wright", "Wu", + "Wyatt", "Wyman", "Yang", "Yates", "Yost", + "Young", "Zamora", "Zimmerman", + }, + "hobby": { + "3D Printing", "Acrobatics", "Acting", "Air Sports", "Aircraft Spotting", + "Airsoft", "Amateur Astronomy", "Amateur Geology", "Amateur Radio", "Animation", + "Antiquing", "Archery", "Art Collecting", "Astrology", "Astronomy", + "Auto Racing", "Axe Throwing", "Backgammon", "Backpacking", "Badminton", + "Baking", "Base Jumping", "Baseball", "Basketball", "Beach Volleyball", + "Beauty Pageants", "Beekeeping", "Billiards", "Birdwatching", "Blacksmithing", + "Blogging", "BMX", "Board Sports", "Board/Tabletop Games", "Bodybuilding", + "Bonsai", "Book Collecting", "Bowling", "Boxing", "Brazilian Jiu-Jitsu", + "Breadmaking", "Breakdancing", "Bridge", "Butterfly Watching", "Calligraphy", + "Camping", "Candle Making", "Candy Making", "Canoeing", "Canyoning", + "Car Fixing & Building", "Card Games", "Caving", "Checkers", "Cheerleading", + "Cheesemaking", "Chess", "Climbing", "Clothesmaking", "Coffee Roasting", + "Coin Collecting", "Coloring", "Comic Book Collecting", "Computer Programming", "Confectionery", + "Cooking", "Cosplaying", "Couponing", "Creative Writing", "Cribbage", + "Cricket", "Crocheting", "Croquet", "Cross-Stitch", "Crossword Puzzles", + "Curling", "Cycling", "Dance", "Darts", "Debate", + "Digital Arts", "Disc Golf", "DJing", "Do It Yourself", "Dolls", + "Dominoes", "Drama", "Drawing", "Drink Mixing", "Electronics", + "Embroidery", "Equestrianism", "Esports", "Fantasy Sports", "Farming", + "Fashion", "Fashion Design", "Fencing", "Field Hockey", "Figure Skating", + "Filmmaking", "Fishing", "Fishkeeping", "Flag Football", "Flower Arranging", + "Flower Collecting", "Flower Growing", "Fly Tying", "Flying", "Flying Disc", + "Foraging", "Foreign Language Learning", "Fossil Hunting", "Frisbee", "Furniture Building", + "Gardening", "Genealogy", "Geocaching", "Ghost Hunting", "Gingerbread House Making", + "Gold Prospecting", "Golfing", "Graffiti", "Graphic Design", "Gymnastics", + "Hacking", "Handball", "Herbalism", "Hiking", "Home Improvement", + "Homebrewing", "Hooping", "Horseback Riding", "Houseplant Care", "Hula Hooping", + "Hunting", "Hydroponics", "Ice Hockey", "Ice Skating", "Inline Skating", + "Jewelry Making", "Jigsaw Puzzles", "Jogging", "Journaling", "Judo", + "Juggling", "Jujitsu", "Jumping Rope", "Karaoke", "Karate", + "Kayaking", "Kendama", "Kite Flying", "Kitesurfing", "Knitting", + "Lacrosse", "LARPing", "Laser Tag", "Leather Crafting", "Lego Building", + "Long-Distance Running", "Longboarding", "Macrame", "Magic", "Mahjong", + "Makeup", "Marbles", "Marching Band", "Martial Arts", "Meditation", + "Metal Detecting", "Model Aircraft", "Model Building", "Motor Sports", "Mountain Biking", + "Mountaineering", "Mushroom Hunting", "Nail Art", "Needlepoint", "Orienteering", + "Origami", "Paintball", "Painting", "Parkour", "Photography", + "Pickleball", "Playing Musical Instruments", "Podcast Hosting", "Poi", "Poker", + "Pool", "Pottery", "Powerlifting", "Practical Jokes", "Pressed Flower Craft", + "Puzzles", "Quilling", "Quilting", "Quizzes", "Racquetball", + "Radio-Controlled Car Racing", "Radio-Controlled Model", "Rafting", "Rappelling", "Rapping", + "Reading", "Record Collecting", "Road Biking", "Rock Climbing", "Role-Playing Games", + "Roller Derby", "Roller Skating", "Rubik's Cube", "Rugby", "Running", + "Sailing", "Sand Art", "Scouting", "Scrapbooking", "Scuba Diving", + "Sculpting", "Sewing", "Shooting", "Shooting Sport", "Shuffleboard", + "Singing", "Skateboarding", "Sketching", "Skiing", "Skimboarding", + "Skipping Rope", "Skydiving", "Slacklining", "Slot Car", "Slot Car Racing", + "Snowboarding", "Snowmobiling", "Snowshoeing", "Soapmaking", "Soccer", + "Softball", "Speed Skating", "Sports Memorabilia", "Squash", "Stamp Collecting", + "Stand-Up Comedy", "Stone Skipping", "Sun Bathing", "Surfing", "Survivalism", + "Swimming", "Table Tennis", "Taekwondo", "Tai Chi", "Tarot", + "Tennis", "Thrifting", "Thru-Hiking", "Travel", "Triathlon", + "Ultimate Frisbee", "Vegetable Farming", "Video Editing", "Video Game Collecting", "Video Gaming", + "Videography", "Vintage Cars", "Vintage Clothing", "Vinyl Records", "Virtual Reality", + "Volleyball", "Walking", "Water Polo", "Water Sports", "Waxing", + "Weaving", "Weight Training", "Weightlifting", "Welding", "Whittling", + "Winemaking", "Wood Carving", "Woodworking", "Word Searches", "Wrestling", + "Writing", "Yo-Yoing", "Yoga", "Zumba", + }, + "ethnicity": { + "African", "African American", "Albanian", "American", "Arab", + "Armenian", "Asian", "Australian", "Austrian", "Belgian", + "Brazilian", "British", "Bulgarian", "Canadian", "Caribbean", + "Chinese", "Croatian", "Czech", "Danish", "Dutch", + "English", "Filipino", "Finnish", "French", "German", + "Greek", "Hispanic", "Hungarian", "Indian", "Indonesian", + "Iranian", "Irish", "Italian", "Japanese", "Jewish", + "Korean", "Latino", "Mexican", "Middle Eastern", "Native American", + "Norwegian", "Pakistani", "Polish", "Portuguese", "Romanian", + "Russian", "Scandinavian", "Scottish", "Slovak", "South Asian", + "Southeast Asian", "Spanish", "Swedish", "Swiss", "Thai", + "Turkish", "Ukrainian", "Vietnamese", "Welsh", "West Indian", + }, + "phone": { + "###-###-####", "(###)###-####", "1-###-###-####", "###.###.####", + }, + "social_media": { + // Generic handles + "{gamertag}", + "@{gamertag}", + + // Social Websites + "https://x.com/{gamertag}", + "https://instagram.com/{gamertag}", + "https://linkedin.com/in/{gamertag}", + "https://github.com/{gamertag}", + "https://tiktok.com/@{gamertag}", + "https://facebook.com/{gamertag}", + }, + // Bio contains sentence templates for personal biographies + // phrased to avoid article/plural pitfalls and work with Generate + "bio": { + // short punchy + "Started in {city} as {jobtitle}, now in {productcategory}", + "{programminglanguage} developer with a side path in {productcategory}", + "Former {jobtitle} now focused on {productcategory}", + "Born in {country}, moved into {jobtitle} after studying {programminglanguage}", + + // hipster flavor + "{jobtitle} into {hipsterword} and {hipsterword}, based in {city}", + "Known for a {hipsterword} aesthetic and work in {country} as {jobtitle}", + "{jobtitle} with a taste for {hipsterword} culture in {city}", + "From {country}, brought {hipsterword} vibes to {city} while working in {jobtitle}", + + // beer + "{jobtitle} by day, brewing {beerstyle} after hours in {city}", + "Known for a {beerhop} IPA recipe and a career in {country} as {jobtitle}", + "Started brewing {beerstyle} in {city}, later specializing in {productcategory} as {jobtitle}", + "{jobtitle} who experiments with {beeryeast} fermentation on weekends", + + // cars + "{jobtitle} who drives a {carmaker} {carmodel} and trains in {hobby} around {city}", + "Collector of {carmaker} models and working in {country} as {jobtitle}", + "{jobtitle} with a soft spot for {carfueltype} builds and hands-on {hobby} in {city}", + "Restored a {carmaker} {carmodel} and shifted into {jobtitle}", + + // movies + "{jobtitle} obsessed with {moviegenre} cinema and hands-on {hobby} in {city}", + "Known for deep dives into {moviegenre} and a career in {country} as {jobtitle}", + "{jobtitle} who studies {moviegenre} and practices {hobby} after work", + + // music + "{jobtitle} who codes to {songgenre} playlists in {city}", + "Workdays in {country} as {jobtitle}, nights with {songgenre} sessions", + "{jobtitle} blending {songgenre} jams with weekend {hobby}", + + // celebrity + "{jobtitle} inspired by {celebrityactor}, active with {hobby} in {city}", + "Takes cues from {celebritybusiness} playbooks while working in {country} as {jobtitle}", + "{jobtitle} who follows {celebritysport} and keeps up daily practice in {hobby}", + + // books + "{jobtitle} who reads {bookgenre} and keeps up with {hobby} in {city}", + "Career in {country} as {jobtitle}, library stacked with {bookgenre}", + "Moved into {jobtitle} after reading {booktitle} by {bookauthor}", + + // school + "{jobtitle} with roots at {school}, now active in {hobby} around {city}", + "Known for a {school} background and work in {country} as {jobtitle}", + "{jobtitle} with a {school} degree and a habit of {verb} {noun} in {city}", + + // pets + "{jobtitle} who cares about {animaltype} welfare and keeps up with {hobby}", + "Career in {country} as {jobtitle}, home life includes {petname}", + "{jobtitle} who volunteers with {animaltype} groups on weekends", + + // color and aesthetic + "{jobtitle} drawn to {color} palettes and practical {hobby} in {city}", + "Recognized in {country} for a {color} aesthetic and work as {jobtitle}", + "{jobtitle} focused on {productfeature} with an eye for {color} design", + + // medium mixed + "From {city}, discovered {hobby} while working as {jobtitle} at {company}", + "{jobtitle} by day and {hobby} enthusiast by night, currently in {country}", + "After {number:5,30} years in {city}, shifted focus to {productcategory}", + "Known for {verb} {noun} and practical {productcategory} work", + + // story-ish + "From {city} to {country}, the path in {jobtitle} involved steady {verb} {noun} and {hobby}", + "Studied {programminglanguage} in {city} and built a name in {productcategory}", + "Moved into {jobtitle} after years of hands-on {hobby} across {country}", + + // creative combos + "What began as {hobby} in {city} led to work in {productfeature} as {jobtitle}", + "Between {verb} {noun} and {hobby}, developed a steady path in {jobtitle} around {country}", + "{jobtitle} who studies {animal} behavior for fun and iterates on {noun} for work", + + // compact one-liners + "{jobtitle} by trade, {hobby} by choice", + "Born in {country}, works in {productcategory}", + "{programminglanguage} roots with a focus on {productfeature}", + "From {city}, building in {productcategory} and keeping up with {hobby}", + + // professional focus + "Specializes in {productcategory} as {jobtitle} in {city}", + "{jobtitle} with {programminglanguage} experience and ongoing {hobby}", + "Works in {productcategory} and keeps a steady practice in {hobby} around {country}", + + // lifestyle crossovers + "Brews {beerstyle} while streaming {songgenre}, day job in {city} as {jobtitle}", + "Known for a {hipsterword} look and a shelf of {color} {animal} art", + "{jobtitle} with a {carmaker} {carmodel} and a taste for {moviegenre}", + "Reads {booktitle} by {bookauthor} and applies lessons in {jobtitle}", + + // safe oddities + "{jobtitle} exploring {language} study and practical {hobby} in {city}", + "From {country}, building a path in {jobtitle} with {productcategory} focus", + "{programminglanguage} practitioner mixing {hobby} and {productfeature}", + }, } diff --git a/vendor/github.com/brianvoe/gofakeit/v7/data/product.go b/vendor/github.com/brianvoe/gofakeit/v7/data/product.go index cbe5b3a5..c4b96efc 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/data/product.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/data/product.go @@ -2,98 +2,380 @@ package data var Product = map[string][]string{ "category": { - "electronics", "clothing", "home appliances", "furniture", - "automotive parts", "beauty and personal care", "books", "sports equipment", - "toys and games", "outdoor gear", "pet supplies", "kitchenware", - "health and wellness", "tools and hardware", "office supplies", - "baby products", "jewelry", "home decor", "musical instruments", - "fitness equipment", "mobile phones", "computer accessories", "cameras and photography", - "gardening supplies", "bedding and linens", "food and groceries", "party supplies", - "craft and diy supplies", "camping gear", "watches", "luggage and travel accessories", - "board games", "art supplies", "stationery", "bath and shower products", - "sunglasses", "educational toys", "headphones and earbuds", "sneakers and athletic shoes", - "coffee and tea products", "bicycles and accessories", "cookware", "cosmetics", - "home improvement", "pet food", "laptop bags and cases", "home security systems", - "musical accessories", "skincare products", "smart home devices", + "accessories", "air quality products", "appliances", "art supplies", "audio equipment", + "automotive accessories", "automotive parts", "baby products", "bath and shower products", + "bathroom accessories", "bathroom fixtures", "bedding and linens", "beverage accessories", + "bicycles and accessories", "board games", "books", "cables and connectors", + "cameras and photography", "camping gear", "car accessories", "care products", + "charging accessories", "cleaning supplies", "clothing", "clothing accessories", + "coffee and tea products", "computer accessories", "cookware", "cosmetics", + "craft and diy supplies", "decorative accessories", "dining furniture", "educational toys", + "electronics", "entertainment systems", "exercise equipment", "fashion accessories", + "fitness equipment", "food preparation", "furniture", "gaming accessories", + "gaming equipment", "gardening supplies", "gift items", "hair care products", + "health and wellness", "heating and cooling", "home accessories", "home automation", + "home decor", "home improvement", "home organization", "home security systems", + "household essentials", "jewelry", "kitchen accessories", "kitchen appliances", + "kitchen tools", "kitchenware", "laptop bags and cases", "lighting", + "luggage and travel accessories", "makeup and cosmetics", "massage and relaxation", + "mobile phones", "musical accessories", "musical instruments", "office accessories", + "office furniture", "office supplies", "outdoor furniture", "outdoor gear", + "party supplies", "personal care", "pet accessories", "pet food", + "pet supplies", "power accessories", "safety and security", "seasonal decorations", + "skincare products", "smart home devices", "sneakers and athletic shoes", + "sports equipment", "storage and organization", "tableware", "tools and hardware", + "toys and games", "travel accessories", "utensils", "video equipment", + "watches", "water accessories", "window treatments", "wireless accessories", }, "adjective": { - "bold", "swift", "pure", "smart", "fresh", - "cool", "sharp", "zen", "bright", "quick", - "robust", "sleek", "versatile", "innovative", "compact", - "luxe", "modular", "precision", "stream", + "advanced", "aesthetic", "agile", "all-in-one", "alluring", + "amazing", "ambitious", "ancient", "artistic", "athletic", + "attractive", "authentic", "automatic", "awesome", "balanced", + "beautiful", "best-selling", "bold", "brilliant", "bright", + "brisk", "budget-friendly", "calm", "capable", "champion", + "charming", "classic", "clean", "clever", "colorful", + "comfortable", "compact", "complete", "cool", "creative", + "crisp", "custom", "cutting-edge", "dazzling", "delicate", + "dependable", "designed", "digital", "durable", "dynamic", + "efficient", "elegant", "elite", "enduring", "energetic", + "enhanced", "essential", "exceptional", "exclusive", "expert", + "extraordinary", "fabulous", "fast", "fashionable", "favorite", + "feature-rich", "fine", "first-class", "flexible", "fresh", + "friendly", "genuine", "gifted", "glamorous", "glossy", + "graceful", "great", "handcrafted", "handy", "harmonious", + "high-end", "high-performance", "high-quality", "high-tech", "highly-rated", + "honest", "ideal", "impressive", "improved", "incredible", + "indispensable", "innovative", "inspiring", "integrated", "intelligent", + "intense", "intuitive", "invaluable", "invincible", "irresistible", + "lasting", "leading", "legendary", "lightweight", "luxe", + "luxurious", "magical", "magnificent", "majestic", "masterful", + "mighty", "minimalist", "modern", "modular", "multi-functional", + "natural", "neat", "new", "next-generation", "noble", + "optimized", "outstanding", "perfect", "performance", "polished", + "portable", "powerful", "practical", "precise", "precision", + "premium", "professional", "pure", "quality", "quick", + "quiet", "radiant", "rapid", "reliable", "remarkable", + "resilient", "responsive", "revolutionary", "robust", "rugged", + "seamless", "secure", "sensitive", "sharp", "sleek", + "smart", "smooth", "sophisticated", "special", "spectacular", + "speed", "splendid", "stainless", "stunning", "stream", + "streamlined", "strong", "sturdy", "stylish", "super", + "superior", "supreme", "swift", "tactical", "tasteful", + "technological", "terrific", "thrilling", "timeless", "top-rated", + "tough", "traditional", "transparent", "trendy", "trusted", + "ultimate", "ultra", "unbeatable", "unique", "universal", + "unmatched", "upgraded", "valuable", "versatile", "vibrant", + "victorious", "vigorous", "virtual", "vital", "zen", }, "name": { - "phone", "laptop", "tablet", "watch", "camera", - "headphones", "speaker", "drone", "car", "bike", - "appliance", "gadget", "tool", "toy", "game", - "computer", "console", "smartwatch", "fitness tracker", "smart home device", - "robot", "router", "television", "smart speaker", "vr headset", - "earbuds", "printer", "mouse", "keyboard", "monitor", - "microwave", "blender", "vacuum", "fan", "toaster", - "clock", "lamp", "shaver", "scale", "thermometer", - "fridge", "oven", "mixer", "iron", "hair dryer", - "fan", "scale", "thermostat", "router", "lightbulb", + "adapter", "air conditioner", "air fryer", "air purifier", "alarm", + "alarm clock", "alarm system", "amplifier", "antenna", "apron", + "appliance", "armchair", "artificial intelligence device", "ashtray", "audio system", + "backpack", "bag", "baggage", "balcony", "ball", + "band", "barbecue", "barometer", "barstool", "basket", + "bath mat", "bathroom scale", "bathroom vanity", "bathtub", "battery", + "beach chair", "beach towel", "bed", "bed frame", "bedding", + "bedside table", "bench", "bicycle", "bidet", "bike", + "bin", "binoculars", "blanket", "blender", "blinds", + "bluetooth device", "board", "body scale", "book", "bookcase", + "bookend", "bookmark", "boot", "bottle", "bottle opener", + "bowl", "box", "bracelet", "bread maker", "briefcase", + "broom", "brush", "bucket", "bug zapper", "bulb", + "bumper", "bundle", "bunk bed", "burner", "cable", + "cabinet", "caddy", "calculator", "calendar", "camera", + "camping chair", "camping gear", "can opener", "candle", "candle holder", + "canister", "canopy", "car", "card", "card reader", + "carpet", "carrier", "cart", "case", "cash register", + "cassette player", "cast iron pan", "cat bed", "cat tree", "cd player", + "ceiling fan", "chair", "chandelier", "charger", "charging dock", + "charging pad", "charging station", "chest", "chest of drawers", "china cabinet", + "chip", "chopsticks", "christmas lights", "cigar cutter", "cigarette lighter", + "circuit breaker", "clamp", "clapper", "cleaning brush", "cleaning kit", + "clock", "closet", "clothes hanger", "clothesline", "clothing rack", + "coaster", "coat rack", "coffee grinder", "coffee maker", "coffee table", + "coffee warmer", "coin sorter", "colander", "comb", "comforter", + "compact disc", "compass", "computer", "console", "container", + "controller", "cookie jar", "cooker", "cooking pot", "cooking timer", + "cooler", "cork", "corkboard", "corkscrew", "corner shelf", + "cosmetic bag", "couch", "counter", "cover", "cradle", + "craft kit", "crayon", "credit card reader", "crock pot", "crystal", + "cup", "cupboard", "curtain", "curtain rod", "cushion", + "cutting board", "dartboard", "decorative pillow", "dehumidifier", "desk", + "desk lamp", "desk organizer", "detector", "dice", "dimmer", + "dining chair", "dining table", "dish", "dish rack", "dishwasher", + "display", "divider", "docking station", "dog bed", "dog bowl", + "door", "doorbell", "doorknob", "doormat", "doorstop", + "drawer", "dresser", "drill", "drink coaster", "drink dispenser", + "drone", "dry erase board", "dryer", "dumbbell", "duster", + "dv player", "dvd player", "earbuds", "earphones", "earplugs", + "egg timer", "electric blanket", "electric can opener", "electric kettle", "electric shaver", + "electric toothbrush", "emergency light", "envelope", "eraser", "espresso machine", + "ethernet cable", "exercise ball", "exercise bike", "exercise mat", "extension cord", + "eyeglasses", "eyeglasses case", "face mask", "fan", "faucet", + "fence", "file", "file cabinet", "filter", "fire alarm", + "fire extinguisher", "fireplace", "fireplace screen", "first aid kit", "fitness tracker", + "flash drive", "flashlight", "flat screen tv", "flip phone", "floor lamp", + "floor mat", "flooring", "flower pot", "fluorescent light", "fly swatter", + "food processor", "foot massager", "footrest", "frame", "freezer", + "fridge", "fruit bowl", "frying pan", "furnace", "game", + "game console", "gaming chair", "gaming desk", "gaming headset", "gaming keyboard", + "gaming mouse", "gaming monitor", "garage door opener", "garbage can", "garbage disposal", + "garden hose", "garden tool", "gate", "gauge", "generator", + "gift box", "gift card", "gift wrap", "glass", "glasses", + "glove", "glue", "goggles", "golf bag", "gps device", + "grater", "grill", "grill pan", "guitar", "guitar pick", + "hair brush", "hair clip", "hair dryer", "hair straightener", "hammer", + "handbag", "handheld vacuum", "hanger", "hard drive", "hat", + "headband", "headphones", "headrest", "heater", "heating pad", + "helmet", "herb garden", "high chair", "hinge", "hockey stick", + "hood", "hook", "hose", "humidifier", "ice bucket", + "ice cream maker", "ice maker", "ice pack", "ice tray", "id card", + "ink", "ink cartridge", "insect repellent", "iron", "ironing board", + "jack", "jacket", "jar", "jewelry box", "jewelry organizer", + "juicer", "jump rope", "kayak", "kettle", "key", + "key holder", "keyboard", "keychain", "keypad", "kitchen scale", + "kitchen timer", "kitchen towel", "knife", "knife block", "knife set", + "ladder", "lamp", "lamp shade", "laptop", "laptop bag", + "laptop case", "laptop stand", "laundry basket", "laundry detergent", "laundry hamper", + "lawn mower", "led light", "led strip", "letter opener", "level", + "license plate", "life jacket", "light", "light bulb", "light fixture", + "light switch", "lightbulb", "lighter", "lighthouse", "linen", + "lint roller", "lock", "locker", "loft bed", "luggage", + "luggage tag", "lunch box", "machine", "magazine rack", "magnet", + "magnifying glass", "mailbox", "makeup bag", "makeup brush", "makeup mirror", + "mannequin", "map", "marble", "marker", "mascara", + "mask", "mat", "match", "matchbox", "mattress", + "mattress pad", "mattress topper", "measuring cup", "measuring spoon", "meat thermometer", + "memory card", "memory foam pillow", "mesh", "microwave", "microwave cover", + "milk frother", "mirror", "mixer", "mobile phone", "monitor", + "monitor stand", "mop", "mop bucket", "mouse", "mouse pad", + "mousetrap", "mug", "multimeter", "music player", "nail", + "nail clipper", "nail file", "napkin", "napkin holder", "necklace", + "needle", "nesting table", "net", "network switch", "newspaper", + "night light", "nightstand", "notebook", "notepad", "nutcracker", + "office chair", "oil", "oil diffuser", "organizer", "ornament", + "outlet", "outlet cover", "oven", "oven mitt", "oven timer", + "overhead light", "pad", "paint", "paint brush", "paint roller", + "painting", "pan", "panel", "paper", "paper clip", + "paper shredder", "paper towel", "paper towel holder", "parking meter", "party favor", + "passport holder", "patio furniture", "pattern", "pc", "pedestal", + "pedestal fan", "pegboard", "pen", "pencil", "pencil case", + "pencil holder", "pencil sharpener", "pendant", "pendant light", "perfume", + "phone", "phone case", "phone charger", "phone holder", "phone stand", + "photo album", "photo frame", "picture", "picture frame", "pillow", + "pillow case", "pillow sham", "pin", "pipe", "pizza cutter", + "pizza pan", "pizza stone", "place mat", "planter", "plate", + "plate holder", "player", "pliers", "plug", "pocket knife", + "pod", "polish", "pool", "pool table", "popcorn maker", + "portable charger", "portable fan", "portable speaker", "poster", "pot", + "pot holder", "potato masher", "power bank", "power cord", "power strip", + "power supply", "printer", "projector", "projector screen", "purse", + "push pin", "puzzle", "quilt", "rack", "radio", + "rain gauge", "rake", "range hood", "razor", "reading light", + "recliner", "record player", "refrigerator", "remote", "remote control", + "resistance band", "rice cooker", "ring", "ring holder", "rivet", + "robot", "robot vacuum", "rocking chair", "roller", "rolling pin", + "room divider", "rope", "router", "rug", "ruler", + "safety pin", "salt shaker", "sander", "sandwich maker", "saucepan", + "scale", "scanner", "scarf", "scissors", "scoop", + "screw", "screwdriver", "scrub brush", "seal", "seating", + "security camera", "security system", "sensor", "serving tray", "sewing machine", + "shaver", "sheet", "shelf", "shelving unit", "shirt", + "shoe", "shoe organizer", "shoe rack", "shoelace", "shopping bag", + "shopping cart", "shower", "shower cap", "shower curtain", "shower head", + "shower rod", "shower stool", "shredder", "side table", "sieve", + "sink", "skateboard", "skewer", "ski", "skillet", + "sleep mask", "slipper", "slow cooker", "smart home device", "smart speaker", + "smart thermostat", "smartwatch", "smoke alarm", "smoke detector", "snap", + "snow shovel", "soap", "soap dispenser", "soap dish", "sofa", + "sofa bed", "sofa table", "solar panel", "solar power bank", "solar light", + "sound bar", "sound system", "spatula", "speaker", "spice rack", + "spoon", "spray bottle", "squeegee", "stain remover", "stand", + "stapler", "staples", "steamer", "step ladder", "step stool", + "stereo", "stethoscope", "stirrer", "stool", "storage", + "storage bin", "storage box", "storage container", "storage unit", "stove", + "strainer", "strap", "straw", "string", "string lights", + "stud finder", "suitcase", "sunglasses", "surge protector", "switch", + "table", "table cloth", "table lamp", "table runner", "tablet", + "tablet case", "tablet stand", "tack", "tag", "tape", + "tape measure", "tap", "target", "tarp", "task light", + "tea kettle", "tea set", "teapot", "teddy bear", "telephone", + "television", "television stand", "tennis racket", "tent", "thermometer", + "thermos", "thermostat", "thread", "throw", "throw pillow", + "tie", "tie rack", "timer", "tin", "tissue box", + "tissue holder", "toaster", "toaster oven", "toilet", "toilet brush", + "toilet paper", "toilet paper holder", "toilet seat", "tongs", "tool", + "tool box", "tool kit", "toothbrush", "toothbrush holder", "toothpaste", + "toothpick", "towel", "towel bar", "towel rack", "toy", + "toy box", "tracker", "tray", "treasure chest", "tree", + "trivet", "trophy", "trowel", "trunk", "tv", + "tv antenna", "tv mount", "tv remote", "tv stand", "umbrella", + "umbrella stand", "under bed storage", "usb cable", "usb drive", "usb hub", + "utensil", "utensil holder", "vacuum", "vacuum bag", "vacuum cleaner", + "vase", "vent", "video camera", "video game", "video player", + "vinyl player", "vr headset", "waffle maker", "wagon", "wall art", + "wall clock", "wall hook", "wall light", "wall mirror", "wall mount", + "wall outlet", "wall sconce", "wallet", "wardrobe", "washboard", + "washing machine", "waste basket", "watch", "watch band", "water bottle", + "water cooler", "water dispenser", "water filter", "water heater", "water pitcher", + "waterproof case", "webcam", "weighing scale", "wheel", "wheelbarrow", + "whisk", "whiteboard", "wick", "wifi extender", "wifi router", + "window", "window blind", "window curtain", "window screen", "window shade", + "wine bottle", "wine glass", "wine rack", "wire", "wireless charger", + "wireless earbuds", "wireless headphone", "wireless mouse", "wireless router", "wood", + "workbench", "wrench", "writing desk", "yoga block", "yoga mat", + "yoga strap", "zipper", "zone", }, "feature": { - "wireless", "smart", "eco-friendly", "advanced", "compact", - "high-performance", "energy-efficient", "portable", "durable", "stylish", - "touchscreen", "water-resistant", "noise-canceling", "voice-controlled", "ultra-lightweight", - "multi-functional", "user-friendly", "fast-charging", "biometric", "gps-enabled", + "3d capable", "4k ready", "5g compatible", "adjustable", "advanced", + "ai-powered", "anti-bacterial", "anti-fog", "anti-glare", "anti-slip", + "anti-static", "auto-adjusting", "auto-cleaning", "auto-focus", "auto-shutoff", + "automatic", "backlit", "battery-powered", "biometric", "bluetooth enabled", + "bluetooth ready", "built-in", "cable-free", "calibrated", "camera-enabled", + "certified", "child-safe", "cloud-connected", "color-changing", "compatible", + "compact", "connectable", "cordless", "customizable", "deep-cleaning", + "detachable", "digital", "dimming", "directional", "dishwasher-safe", + "dual-mode", "dual-purpose", "dust-proof", "easy-clean", "eco-friendly", + "edge-to-edge", "energy-efficient", "energy-saving", "enhanced", "ergonomic", + "expandable", "fast-charging", "fingerprint-resistant", "fire-resistant", "foldable", + "fragrance-free", "free-standing", "full-spectrum", "gps-enabled", "hands-free", + "hd ready", "heat-resistant", "high-capacity", "high-definition", "high-efficiency", + "high-performance", "high-speed", "high-tech", "holographic", "hybrid", + "impact-resistant", "indoor-outdoor", "infrared", "integrated", "interactive", + "ip-rated", "keyless", "led-backlit", "leak-proof", "long-lasting", + "low-energy", "low-maintenance", "magnetic", "maintenance-free", "memory-enabled", + "mesh-enabled", "microfiber", "microwave-safe", "motion-activated", "motion-sensing", + "multi-angle", "multi-color", "multi-functional", "multi-purpose", "multi-zone", + "network-ready", "noise-canceling", "noise-reducing", "non-stick", "odor-resistant", + "oil-resistant", "one-touch", "outdoor-rated", "overheat protection", "padded", + "palm-sized", "password-protected", "pet-friendly", "plug-and-play", "portable", + "precision", "premium", "pressure-sensitive", "programmable", "quick-release", + "quick-start", "radar-enabled", "rapid", "rechargeable", "remote-controlled", + "removable", "resistant", "retractable", "reusable", "rugged", + "rust-proof", "scratch-resistant", "self-adjusting", "self-cleaning", "self-contained", + "self-leveling", "sensor-enabled", "shock-absorbing", "shock-resistant", "smart", + "smartphone-compatible", "solar-powered", "sound-activated", "spill-proof", "splash-proof", + "stain-resistant", "steam-powered", "streaming-ready", "stylish", "submersible", + "tactile", "temperature-controlled", "tether-free", "time-delayed", "touch-activated", + "touchscreen", "ultra-compact", "ultra-fast", "ultra-lightweight", "ultra-quiet", + "ultra-slim", "ultrasonic", "underwater-capable", "upgradeable", "usb-powered", + "user-friendly", "ventilated", "vibration-resistant", "voice-activated", "voice-controlled", + "waterproof", "water-resistant", "weather-resistant", "weatherproof", "wi-fi enabled", + "wi-fi ready", "wireless", "wireless charging", "wrinkle-free", "zero-emission", }, "material": { - "titanium", "carbon", "alloy", "bamboo", "leather", - "glass", "ceramic", "aluminum", "stainless", "wood", - "plastic", "rubber", "silicon", "fabric", "paper", - "gold", "silver", "brass", "copper", "bronze", - "chrome", "marble", "granite", "porcelain", "plexiglass", - "quartz", "felt", "suede", + "abs plastic", "acrylic", "aluminum", "aluminum alloy", "aluminum oxide", + "ash wood", "bamboo", "bauxite", "beech wood", "birch wood", + "brass", "bronze", "burlap", "canvas", "carbon", + "carbon fiber", "carbon steel", "cardboard", "cashmere", "cast iron", + "cedar wood", "ceramic", "cherry wood", "chrome", "chromium", + "clay", "cobalt", "composite", "concrete", "copper", + "cork", "cotton", "denim", "diamond", "ebony wood", + "enamel", "epoxy", "fabric", "faux leather", "faux suede", + "felt", "fiberglass", "foam", "galvanized steel", "gel", + "glass", "gold", "granite", "graphite", "hardwood", + "hemp", "iron", "jade", "jute", "kapok", + "lacquer", "laminate", "latex", "lead", "leather", + "limestone", "linen", "magnesium", "mahogany wood", "marble", + "mdf", "mesh", "metal", "microfiber", "mica", + "mineral", "molybdenum", "neoprene", "nickel", "nylon", + "oak wood", "obsidian", "onyx", "paper", "pearl", + "pewter", "pine wood", "plastic", "plexiglass", "plywood", + "polycarbonate", "polyester", "polyethylene", "polypropylene", "polystyrene", + "polyurethane", "porcelain", "pottery", "quartz", "rattan", + "rawhide", "resin", "rosewood", "rubber", "sapphire", + "satin", "silicone", "silk", "silver", "slate", + "softwood", "spandex", "stainless steel", "steel", "stone", + "styrofoam", "suede", "synthetic", "teak wood", "terracotta", + "textile", "tin", "titanium", "titanium alloy", "tungsten", + "twill", "velvet", "vinyl", "walnut wood", "wicker", + "wood", "wool", "zinc", "zirconium", }, "suffix": { - "tech", "pro", "x", "plus", "elite", - "spark", "nexus", "nova", "fusion", "sync", - "edge", "boost", "max", "link", "prime", - "zoom", "pulse", "dash", "connect", "blaze", - "quantum", "spark", "vertex", "core", "flux", - "turbo", "shift", "wave", "matrix", + "ace", "active", "advanced", "air", "alpha", + "apex", "arc", "art", "aura", "axis", + "beast", "beta", "blaze", "boost", "brave", + "bright", "burst", "catalyst", "charge", "classic", + "connect", "core", "cosmic", "craft", "dash", + "delta", "design", "diamond", "digital", "drive", + "dynamic", "edge", "elite", "energy", "essence", + "evo", "ex", "excel", "express", "extreme", + "fire", "flex", "flow", "flux", "force", + "form", "forte", "fusion", "gen", "genesis", + "glide", "glow", "gold", "grand", "graph", + "gravity", "grid", "guard", "guide", "harmony", + "hero", "hub", "icon", "impact", "infinity", + "ion", "jet", "key", "kinetic", "lab", + "laser", "legend", "level", "light", "link", + "lite", "live", "logic", "loop", "lux", + "max", "matrix", "mega", "micro", "mind", + "mini", "mode", "motion", "move", "nexus", + "nova", "one", "optima", "orbit", "pace", + "peak", "phase", "pilot", "pixel", "plus", + "power", "premium", "prime", "pro", "pulse", + "quantum", "quest", "quick", "radiant", "rapid", + "ray", "rebel", "reign", "rise", "rush", + "scale", "scope", "sense", "shift", "signature", + "silver", "smart", "snap", "solo", "sonic", + "spark", "speed", "sphere", "spirit", "sport", + "star", "stream", "strike", "studio", "style", + "summit", "super", "surge", "sync", "tactical", + "tech", "terra", "thunder", "titan", "touch", + "tour", "trail", "trio", "trooper", "tropic", + "turbo", "ultra", "union", "unity", "vault", + "vector", "velocity", "vertex", "vibe", "vital", + "vortex", "wave", "x", "xenon", "zen", + "zero", "zone", "zoom", }, "benefit": { - "comfort", "efficiency", "safety", "reliability", - "versatility", "ease of use", "long battery life", - "precision", "enhanced connectivity", "portability", - "durability", "energy savings", "aesthetic appeal", - "health benefits", "convenience", "time-saving", - "high performance", "noise reduction", "user satisfaction", - "customizability", "sustainability", "cost-effectiveness", - "innovative features", "improved productivity", "enhanced experience", - "robust construction", "weather resistance", "minimal maintenance", - "increased functionality", "advanced technology", "ergonomic design", + "advanced technology", "aesthetic appeal", "better organization", "cost-effectiveness", + "comfort", "convenience", "customizability", "durability", "ease of use", + "efficiency", "energy savings", "enhanced connectivity", "enhanced experience", + "enhanced performance", "ergonomic design", "excellent value", "fast response time", + "flexibility", "health benefits", "high performance", "improved accuracy", + "improved comfort", "improved productivity", "improved quality", "increased functionality", + "increased lifespan", "innovative features", "long battery life", "minimal maintenance", + "noise reduction", "peace of mind", "portability", "precision", "reliability", + "robust construction", "safety", "seamless integration", "space efficiency", + "sustainability", "time-saving", "user satisfaction", "versatility", + "weather resistance", "wireless freedom", }, "use_case": { - "home", "office", "outdoors", "fitness", "travel", "gaming", - "cooking", "music", "learning", "entertainment", "professional work", - "healthcare", "educational purposes", "commuting", "camping", "hiking", - "sports", "art and craft", "gardening", "cleaning", "personal grooming", - "relaxation", "home security", "pet care", "smart automation", "food preparation", - "baking", "social gatherings", "productivity", "collaboration", "DIY projects", - "childcare", "remote work", "photography", "videography", "wellness routines", + "art and craft", "baking", "barbecuing", "bathroom", "bedroom", + "camping", "childcare", "cleaning", "collaboration", "commuting", + "cooking", "creative projects", "dining", "DIY projects", "educational purposes", + "entertainment", "exercise", "fitness", "food preparation", "gardening", + "gaming", "garage", "healthcare", "hiking", "home", + "home automation", "home security", "indoor activities", "kitchen", "laundry", + "learning", "living room", "music", "office", "outdoor activities", + "outdoors", "personal grooming", "pet care", "photography", "productivity", + "professional work", "remote work", "relaxation", "smart automation", "social gatherings", + "spaces", "sports", "storage", "travel", "videography", + "wellness routines", "workshop", }, "target_audience": { - "children", "adults", "seniors", "students", "professionals", "athletes", - "travelers", "families", "pet owners", "homeowners", "gamers", "cooks", "DIY enthusiasts", - "musicians", "artists", + "adults", "artists", "athletes", "beginners", "children", + "collectors", "cooks", "creative professionals", "DIY enthusiasts", "educators", + "families", "fitness enthusiasts", "gamers", "health-conscious individuals", "homeowners", + "hobbyists", "musicians", "outdoor enthusiasts", "parents", "pet owners", + "photographers", "professionals", "seniors", "small businesses", "students", + "tech enthusiasts", "travelers", "wellness seekers", "working professionals", }, "dimension": { - "small", "medium", "large", "extra-large", "compact", "lightweight", - "heavy", "mini", "standard", "oversized", + "compact", "extra-large", "extra-small", "full-size", "heavy", + "jumbo", "large", "lightweight", "medium", "mini", + "oversized", "petite", "portable", "regular", "small", + "standard", "travel-size", "x-large", "x-small", "xx-large", + "xx-small", }, "description": { diff --git a/vendor/github.com/brianvoe/gofakeit/v7/data/sentence.go b/vendor/github.com/brianvoe/gofakeit/v7/data/sentence.go deleted file mode 100644 index e12319d8..00000000 --- a/vendor/github.com/brianvoe/gofakeit/v7/data/sentence.go +++ /dev/null @@ -1,5 +0,0 @@ -package data - -var Sentence = map[string][]string{ - "phrase": {"what's yer poison", "time will tell", "I'm good", "nice to meet you", "spring forward, fall back", "what's your job", "once or twice", "you could have fooled me", "what's your name", "why not Zoidberg", "time you got a watch", "I'm Hindu", "fair play", "what's your phone number", "after the jump", "cease fire", "as ever", "I'm hot", "best of", "get well soon", "what's your poison", "when is closing time", "yes and amen", "you don't dip your pen in the company inkwell", "I'm hungry", "short of", "what's yours", "duces tecum", "after you", "yes and no", "I'm in love with you", "the pants off", "I'm Jewish", "few sandwiches short of a picnic", "shut the front door", "does a bear shit in the woods", "the party is over", "tomayto tomahto", "I'm looking for a grocery store", "does anyone here speak English", "heads I win, tails you lose", "I'm looking for a job", "stick a fork in it", "the penny drops", "I'm lost", "shut up and take my money", "mind you", "I'm married", "isn't it so", "wham-bam-thank-you-ma'am", "does not compute", "hold your fire", "pardon me", "mind your own beeswax", "I'm mute", "does someone look like", "I'm not being funny", "leave me alone", "going once, going twice, sold", "you get that", "I'm not interested", "talk about", "here be dragons", "always a bridesmaid, never a bride", "the plot thickens", "close, but no cigar", "I'm not religious", "ultra vires", "bound to", "always the bridesmaid, never the bride", "the plural of anecdote is not data", "I'm pregnant", "comedy equals tragedy plus time", "get you", "heads will roll", "all to the better", "I'm rubber, you're glue", "going to", "when push comes to shove", "you had to be there", "I'm scared", "you have beautiful eyes", "enjoy your meal", "I'm sick", "doesn't have both oars in the water", "you have the advantage of me", "here lies", "check is in the mail", "I'm single", "stick 'em up", "when the chips are down", "you just had to", "that'll be the day", "I'm sorry", "very good", "lather, rinse, repeat", "you kiss your mother with that mouth", "that'll do", "the rabbit died", "I'm straight", "in order for", "when the going gets weird, the weird turn pro", "I'm thirsty", "the rest is history", "it depends", "I'm tired", "in order to", "monkeys might fly out of my butt", "oh my life", "do want", "would it hurt", "you know what", "here you are", "all wool and a yard wide", "hit it", "pound for pound", "bottom falls out", "OK yah", "would it kill someone", "you know what I mean", "here you go", "alone in a crowd", "me neither", "chin up", "to be continued", "I'm twenty years old", "such is life", "off with someone's head", "Lord knows", "case closed", "you know what they say", "you've got to laugh", "ten points to Gryffindor", "that's a relief", "I'm worried", "kill the rabbit", "live and learn", "would not throw someone out of bed", "catch you later", "that's a wrap", "the rubber meets the road", "to be honest", "I'm your huckleberry", "off with their head", "you learn something new every day", "catch you on the flip side", "all your base are belong to us", "that's all", "horses for courses", "to be named later", "good night", "would you mind putting on your seat belt", "easy does it", "that's all she wrote", "me too", "oh noes", "that's for me to know and you to find out", "to be truthful", "still got one's communion money", "do you accept American dollars", "winner, winner, chicken dinner", "workers of the world, unite", "speak of the devil", "you must be fun at parties", "that's it", "hit me", "how about that", "ding, ding, ding, we have a winner", "do you accept credit cards", "word has it", "woulda, coulda, shoulda", "you must be new here", "how are you", "do you believe in God", "woulda, shoulda, coulda", "that's life", "safety in numbers", "how are you doing", "do you come here often", "worm has turned", "you never know", "that's my", "how are you getting along", "leave well enough alone", "do you have a boyfriend", "that's saying something", "the shoe is on the other foot", "this is someone", "do you have a girlfriend", "Lord only knows", "that's that", "check yourself before you wreck yourself", "this is the life", "how can you sleep at night", "wake up and die right", "do you have a menu in English", "that's the bunny", "the show must go on", "this is where we came in", "nod's as good as a wink to a blind bat", "wake up and smell the ashes", "on the huh", "do you have any brothers or sisters", "dogs bark", "worm turns", "that's the spirit", "this just in", "how did he die", "more like", "do you have any pets", "alright me babber", "Elvis has left the building", "this means war", "how do", "she could be his mother", "do you have children", "alright me lover", "that's the ticket", "how do I get to", "shoulda, coulda, woulda", "nome sane", "guess what", "whenever one turns around", "do you have Wi-Fi", "alright my babber", "the story goes", "how do I get to the airport", "shoulda, woulda, coulda", "do you kiss your mother with that mouth", "Lord willing and the creek don't rise", "you said it", "alright my lover", "how do I get to the bus station", "ask me one on sport", "need I say more", "sounds like a plan", "put that in your pipe and smoke it", "do you know", "take a picture, it will last longer", "the streets are paved with gold", "how do I get to the train station", "ask my arse", "stop the car", "do you know who I am", "wouldn't you know", "you shouldn't have", "how do ye do", "fans are slans", "use one's coconut", "bit by a barn mouse", "stick that in your pipe and smoke it", "do you mind", "but for the grace of God", "wouldn't you know it", "head in the sand", "the terrorists will have won", "how do you do", "please excuse my dear Aunt Sally", "much of a muchness", "bless someone's cotton socks", "do you need help", "or else", "dress for the slide, not the ride", "that's wassup", "the thick plottens", "much to be said", "bless someone's heart", "a blessing and a curse", "do you speak English", "you think", "that's what I'm talking about", "how do you like that", "art imitates life", "please help me", "five will get you ten", "do you think you can walk", "or so", "that's what she said", "the thing is", "how do you like them apples", "please pass the salt", "I've been robbed", "nature calls", "a boon and a bane", "but me no buts", "or something", "you welcome", "that's what's up", "how do you pronounce this word", "fare thee well", "please repeat after me", "I've been shot", "pot, meet kettle", "a boon or a bane", "where are the snows of yesteryear", "or what", "rolling in it", "the toilet is clogged", "how do you say...in English", "circle gets the square", "more than someone has had hot dinners", "please say that again", "I've burned myself", "different strokes", "where are the toilets", "or words to that effect", "you win", "how do you spell this word", "to hell with", "in virtue of which", "please sit down", "where are we", "out to", "am I right", "please speak more slowly", "I've lost my keys", "where are we going", "but who's counting", "you wish", "am I right or am I right", "how goes it", "methinks the lady doth protest too much", "please turn left", "could be written on the back of a postage stamp", "I've never heard it called that before", "where are you", "you wish, jellyfish", "am I under arrest", "methinks thou dost protest too much", "please turn right", "bang to rights", "gimme a break", "where are you from", "revenge is sweet", "'tis the season", "pull the other one", "where are your parents", "out with it", "have a good one", "how long is a piece of string", "ay up me duck", "before you can say Jack Robinson", "pull the other one, it's got bells on", "where away", "only time will tell", "could fit on the back of a postage stamp", "before you can say knife", "pull the other one, it's got brass bells on", "where can I find a hotel", "the wheels came off", "angel passes", "how many languages do you speak", "could go all day", "sleep tight", "nature vs nurture", "practice, practice, practice", "where do I sign up", "help is on the way", "many thanks", "the wheels came off the bus", "mercy bucket", "how many siblings do you have", "pleased to meet you", "could have fooled me", "where do you live", "the wheels came off the wagon", "mercy buckets", "where do you live at", "you'd better believe it", "than a bygod", "the wheels fell off", "could have, would have, should have", "where does it hurt", "hell if I know", "you'd complain if you were hung with a new rope", "the wheels fell off the bus", "every good boy deserves fudge", "could I see the menu, please", "where does this bus go", "help wanted", "the wheels fell off the wagon", "how much do I owe you", "where does this train go", "how much do you charge", "steady as she goes", "put the same shoe on every foot", "where have you been", "temper temper", "how much does it cost", "coulda, shoulda, woulda", "give credit where credit is due", "boom goes the dynamite", "where is the toilet", "how much is it", "in your dreams", "coulda, woulda, shoulda", "what a lovely day", "to save one's life", "exsqueeze me", "like a martin to his gourd", "what a pity", "you'll be late for your own funeral", "every man for himself", "size matters"}, -} diff --git a/vendor/github.com/brianvoe/gofakeit/v7/data/song.go b/vendor/github.com/brianvoe/gofakeit/v7/data/song.go new file mode 100644 index 00000000..0e275c8e --- /dev/null +++ b/vendor/github.com/brianvoe/gofakeit/v7/data/song.go @@ -0,0 +1,246 @@ +package data + +// Songs: Year-end Charts Hot 100 Songs from Billboard +// Source: https://www.billboard.com/charts/year-end/hot-100-songs/ + +// Artists: Greatest of All Time Artists based on Billboard rate +// Source: https://www.billboard.com/charts/greatest-of-all-time-artists/ + +var Songs = map[string][]string{ + "name": { + "A Bar Song (Tipsy)", + "A Holly Jolly Christmas", + "Act II: Date @ 8", + "Agora Hills", + "Ain't No Love In Oklahoma", + "All I Want For Christmas Is You", + "Austin", + "Beautiful Things", + "Birds Of A Feather", + "Bulletproof", + "Burn It Down", + "Carnival", + "Cowgirls", + "Cruel Summer", + "Dance The Night", + "Die With A Smile", + "Down Bad", + "End Of Beginning", + "Espresso", + "Euphoria", + "Everybody", + "Exes", + "FE!N", + "FTCU", + "Fast Car", + "Feather", + "First Person Shooter", + "Flowers", + "Fortnight", + "Fukumean", + "Gata Only", + "Get It Sexyy", + "Good Good", + "Good Luck, Babe!", + "Greedy", + "High Road", + "Hot To Go!", + "Houdini", + "Houdini", + "I Am Not Okay", + "I Can Do It With A Broken Heart", + "I Had Some Help", + "I Like The Way You Kiss Me", + "I Remember Everything", + "IDGAF", + "Is It Over Now?", + "Jingle Bell Rock", + "La Diabla", + "Last Christmas", + "Last Night", + "Lies Lies Lies", + "Like That", + "Lil Boo Thang", + "Lose Control", + "Lovin On Me", + "Lunch", + "Made For Me", + "Miles On It", + "Million Dollar Baby", + "Monaco", + "Need A Favor", + "Never Lose Me", + "Not Like Us", + "On My Mama", + "Paint The Town Red", + "Pink Skies", + "Please Please Please", + "Pour Me A Drink", + "Pretty Little Poison", + "Redrum", + "Rich Baby Daddy", + "Rockin' Around The Christmas Tree", + "Saturn", + "Save Me", + "Slow It Down", + "Snooze", + "Stargazing", + "Stick Season", + "Taste", + "Texas Hold 'Em", + "The Painter", + "Thinkin' Bout Me", + "Too Sweet", + "Truck Bed", + "Type Shit", + "Vampire", + "Wanna Be", + "Water", + "We Can't Be Friends (Wait For Your Love)", + "What Was I Made For?", + "Whatever She Wants", + "Where It Ends", + "Where The Wild Things Are", + "White Horse", + "Wild Ones", + "Wildflower", + "Wind Up Missin' You", + "World On Fire", + "Yeah Glo!", + "Yes, And?", + }, + "artist": { + "Adele", + "Aerosmith", + "Alicia Keys", + "Aretha Franklin", + "Barbra Streisand", + "Barry Manilow", + "Bee Gees", + "Beyonce", + "Billy Joel", + "Bob Dylan", + "Bob Seger", + "Bon Jovi", + "Boyz II Men", + "Britney Spears", + "Bruce Springsteen & The E Street Band", + "Bruno Mars", + "Bryan Adams", + "Carole King", + "Carpenters", + "Celine Dion", + "Chicago", + "Chris Brown", + "Commodores", + "Creedence Clearwater Revival", + "Daryl Hall John Oates", + "Def Leppard", + "Diana Ross", + "Donna Summer", + "Drake", + "Eagles", + "Earth, Wind & Fire", + "Ed Sheeran", + "Elton John", + "Elvis Presley", + "Eminem", + "Eric Clapton", + "Fleetwood Mac", + "Foreigner", + "Garth Brooks", + "Guns N' Roses", + "Heart", + "Herb Alpert", + "Huey Lewis & The News", + "JAY-Z", + "James Taylor", + "Janet Jackson", + "John Denver", + "John Mellencamp", + "Journey", + "Justin Bieber", + "Justin Timberlake", + "Kanye West", + "Katy Perry", + "Kelly Clarkson", + "Kenny Rogers", + "Lady Gaga", + "Led Zeppelin", + "Linda Ronstadt", + "Linkin Park", + "Lionel Richie", + "Madonna", + "Mariah Carey", + "Maroon 5", + "Marvin Gaye", + "Mary J. Blige", + "Michael Bolton", + "Michael Jackson", + "Miley Cyrus", + "Neil Diamond", + "Nelly", + "Nickelback", + "Olivia Newton-John", + "P!nk", + "Paul McCartney", + "Paula Abdul", + "Phil Collins", + "Pink Floyd", + "Prince", + "Queen", + "R. Kelly", + "Rihanna", + "Rod Stewart", + "Santana", + "Simon & Garfunkel", + "Stevie Wonder", + "Taylor Swift", + "The Beach Boys", + "The Beatles", + "The Black Eyed Peas", + "The Jacksons", + "The Monkees", + "The Rolling Stones", + "The Supremes", + "The Temptations", + "Three Dog Night", + "Tim McGraw", + "U2", + "Usher", + "Van Halen", + "Whitney Houston", + }, + "genre": { + "Acoustic Pop", + "Alternative Hip-Hop", + "Alternative Pop", + "Chillwave", + "Contemporary R&B", + "Country", + "Dancehall", + "Electro-pop", + "Electronic Dance Music (EDM)", + "Emo Rap", + "Funk", + "Gospel-inspired Pop", + "Hip-Hop", + "Indie Pop", + "Latin Pop", + "Lo-fi Hip-Hop", + "Melodic Rap", + "Pop", + "Pop Punk", + "Pop Rock", + "R&B", + "Rap", + "Reggaeton", + "Rock", + "Singer-Songwriter", + "Soul", + "Synthwave", + "Trap", + "Trap Soul", + "Urban Contemporary", + }, +} diff --git a/vendor/github.com/brianvoe/gofakeit/v7/data/text.go b/vendor/github.com/brianvoe/gofakeit/v7/data/text.go new file mode 100644 index 00000000..d1de2260 --- /dev/null +++ b/vendor/github.com/brianvoe/gofakeit/v7/data/text.go @@ -0,0 +1,523 @@ +package data + +var Text = map[string][]string{ + "sentence": { + // Imperatives mixing verbs/adverbs/adjectives + "Choose {adjectivedescriptive} defaults.", + "Deliberately {verbtransitive} the {nouncommon}.", + "Carefully {verbtransitive} the {nounconcrete} {adverbmanner}.", + "Explicitly name the {nouncommon} before you {verbtransitive} it.", + "Ruthlessly remove dead {nouncommon}.", + "Quietly harden the {nouncommon} {adverbtimeindefinite}.", + "Systematically improve the {nouncommon} {adverbfrequencydefinite}.", + "Continuously measure the {nouncommon} and {verbtransitive} the outliers.", + "Optimize {nouncommon} for {adjectivedescriptive} clarity.", + "Protect the {nouncommon} under {adjectivedescriptive} load.", + + // Descriptive statements using linking/helping verbs + adjectives/adverbs + "The {nouncommon} {verblinking} {adjectivedescriptive}.", + "The {adjectivedescriptive} {nouncommon} {verblinking} unexpectedly {adverbmanner}.", + "The {nounabstract} {verbhelping} be {adjectivedescriptive} {adverbdegree}.", + "Clear {nouncommon} {verblinking} better than clever {nouncommon}.", + "Consistent {nouncommon} {verblinking} the foundation of {nounabstract}.", + "Balanced {nouncommon} and {nouncommon} {verblinking} essential.", + "Subtle {color} accents {verblinking} effective {adverbdegree}.", + "Durable {nouncommon} {verblinking} worth the {nounabstract}.", + "Steady {adjectivedescriptive} progress in {city} {verblinking} visible.", + "Defaults at {company} {verblinking} shape {nouncommon}.", + + // Prepositional / connective openers (time, comparison, listing, casual) + "{connectivetime}, {pronounpersonal} {verbaction} {adverbmanner}.", + "{connectivecomparative}, {pronounpersonal} {verbtransitive} the {nouncommon}.", + "{connectivelisting}, document the {nouncommon} and {verbtransitive} the rest.", + "{connectivecasual}, keep the {nouncommon} simple.", + "{connectiveexamplify}, prefer {nouncommon} over {nouncommon}.", + "{prepositionsimple} the {nounconcrete}, {pronounpersonal} {verbaction} {adverbmanner}.", + "{prepositioncompound} the {nouncommon}, we {verbtransitive} a smaller {nouncommon}.", + "{prepositiondouble} the {nouncommon}, align expectations.", + "{connectivetime} the review, {pronounpersonal} {verbtransitive} the {nouncommon}.", + "{connectivecomparative} the prior approach, this {nouncommon} {verblinking} clearer.", + + // Pronoun-focused directions + "{pronounpersonal} {verbaction} {adverbmanner} to stabilize the {nouncommon}.", + "{pronoundemonstrative} {nouncommon} {verblinking} {adjectivedescriptive}; {pronounpersonal} {verbtransitive} it next.", + "{pronounindefinite} {verbintransitive} when the {nouncommon} spikes.", + "{pronounrelative} {verbaction} quickly {verbtransitive} the {nouncommon}.", + "{pronounpossessive} {nouncommon} {verblinking} ready for {nounabstract}.", + + // Narrative / progression with time & frequency adverbs + "After {number:2,12} iterations, {pronounpersonal} {verbtransitive} the {nouncommon} {adverbmanner}.", + "Before launch, {pronounpersonal} {verbaction} {adverbtimedefinite}.", + "Eventually, the {nouncommon} {verbintransitive} {adverbmanner}.", + "Sometimes the {nouncommon} {verbintransitive} {adverbfrequencyindefinite}.", + "Weekly, {pronounpersonal} {verbtransitive} the {nouncommon} with {nouncommon}.", + + // Product / UX flavored with adjectives & prepositions + "Guide {nouncommon} with {adjectivedescriptive} affordances.", + "Reduce cognitive load in the {nouncommon}.", + "Raise contrast where the {nouncommon} hides.", + "Align {nouncommon} with user intent.", + "Scope the {nouncommon} to fit the moment.", + "Map the happy path through {nouncommon}.", + "Remove first friction from the {nouncommon}.", + "Balance {productfeature} with {nouncommon}.", + "Prefer predictable {nouncommon} over surprising {nouncommon}.", + "Compose {nouncommon} from simple parts.", + + // Data / measurement with adverbs and verbs + "Establish a baseline for {nouncommon}.", + "Set a realistic target for {nouncommon}.", + "Track {nouncommon} over time {adverbfrequencydefinite}.", + "Alert on {nouncommon} thresholds {adverbtimedefinite}.", + "Attribute gains to {nouncommon} where possible.", + "Sample {nouncommon} at {number:1,60}s intervals.", + "Compare {nouncommon} before and after you {verbtransitive}.", + "Visualize {nouncommon} for faster decisions.", + "Archive stale {nouncommon} responsibly.", + "Instrument the {nouncommon} for observability.", + + // Reliability / ops with adverbs & prepositions + "Design for failure and graceful {nouncommon}.", + "Create a fallback for {nouncommon}.", + "Rate-limit {nouncommon} by default.", + "Defer {nouncommon} during peak load.", + "Decompose {nouncommon} into smaller {nouncommon}.", + "Guard {nouncommon} with sensible limits.", + "Budget latency for {nouncommon}.", + "Stage {nouncommon} behind feature flags.", + "Automate {nouncommon} recovery {adverbmanner}.", + "Practice {nouncommon} drills regularly.", + + // Collaboration / communication using connectives & pronouns + "Write the one-sentence summary for the {nouncommon}.", + "Share the decision record for the {nouncommon}.", + "Draw a diagram for the {nouncommon} {adverbmanner}.", + "Clarify ownership of the {nouncommon} {adverbtimeindefinite}.", + "Surface risks around the {nouncommon} {adverbdegree}.", + "State assumptions behind the {nouncommon}.", + "Invite review for the {nouncommon} in {city}.", + "Close the loop on the {nouncommon}.", + "Publish a changelog entry for the {nouncommon}.", + "Celebrate wins tied to the {nouncommon}.", + + // Place / time flavor with prepositions and adverbs + "Mornings in {city} favor {nouncommon}.", + "Evenings in {city} invite quieter {nouncommon}.", + "Weekends reserve time for {hobby} and {nouncommon}.", + "Launch the {nouncommon} midweek for clarity.", + "Review the {nouncommon} every {number:1,4} weeks.", + "Retire outdated {nouncommon} each quarter.", + "Short feedback loops {verbaction} {nouncommon} {adverbmanner}.", + "Warm starts beat cold {nouncommon}.", + + // Emphatic / interjectional + "{interjection}! Ship the {nouncommon} now!", + "{interjection}! Great progress on {nouncommon}!", + "Onward to better {nouncommon}!", + "Mind the {nouncommon}, then celebrate!", + }, + "phrase": {"what's yer poison", "time will tell", "I'm good", "nice to meet you", "spring forward, fall back", "what's your job", "once or twice", "you could have fooled me", "what's your name", "why not Zoidberg", "time you got a watch", "I'm Hindu", "fair play", "what's your phone number", "after the jump", "cease fire", "as ever", "I'm hot", "best of", "get well soon", "what's your poison", "when is closing time", "yes and amen", "you don't dip your pen in the company inkwell", "I'm hungry", "short of", "what's yours", "duces tecum", "after you", "yes and no", "I'm in love with you", "the pants off", "I'm Jewish", "few sandwiches short of a picnic", "shut the front door", "does a bear shit in the woods", "the party is over", "tomayto tomahto", "I'm looking for a grocery store", "does anyone here speak English", "heads I win, tails you lose", "I'm looking for a job", "stick a fork in it", "the penny drops", "I'm lost", "shut up and take my money", "mind you", "I'm married", "isn't it so", "wham-bam-thank-you-ma'am", "does not compute", "hold your fire", "pardon me", "mind your own beeswax", "I'm mute", "does someone look like", "I'm not being funny", "leave me alone", "going once, going twice, sold", "you get that", "I'm not interested", "talk about", "here be dragons", "always a bridesmaid, never a bride", "the plot thickens", "close, but no cigar", "I'm not religious", "ultra vires", "bound to", "always the bridesmaid, never the bride", "the plural of anecdote is not data", "I'm pregnant", "comedy equals tragedy plus time", "get you", "heads will roll", "all to the better", "I'm rubber, you're glue", "going to", "when push comes to shove", "you had to be there", "I'm scared", "you have beautiful eyes", "enjoy your meal", "I'm sick", "doesn't have both oars in the water", "you have the advantage of me", "here lies", "check is in the mail", "I'm single", "stick 'em up", "when the chips are down", "you just had to", "that'll be the day", "I'm sorry", "very good", "lather, rinse, repeat", "you kiss your mother with that mouth", "that'll do", "the rabbit died", "I'm straight", "in order for", "when the going gets weird, the weird turn pro", "I'm thirsty", "the rest is history", "it depends", "I'm tired", "in order to", "monkeys might fly out of my butt", "oh my life", "do want", "would it hurt", "you know what", "here you are", "all wool and a yard wide", "hit it", "pound for pound", "bottom falls out", "OK yah", "would it kill someone", "you know what I mean", "here you go", "alone in a crowd", "me neither", "chin up", "to be continued", "I'm twenty years old", "such is life", "off with someone's head", "Lord knows", "case closed", "you know what they say", "you've got to laugh", "ten points to Gryffindor", "that's a relief", "I'm worried", "kill the rabbit", "live and learn", "would not throw someone out of bed", "catch you later", "that's a wrap", "the rubber meets the road", "to be honest", "I'm your huckleberry", "off with their head", "you learn something new every day", "catch you on the flip side", "all your base are belong to us", "that's all", "horses for courses", "to be named later", "good night", "would you mind putting on your seat belt", "easy does it", "that's all she wrote", "me too", "oh noes", "that's for me to know and you to find out", "to be truthful", "still got one's communion money", "do you accept American dollars", "winner, winner, chicken dinner", "workers of the world, unite", "speak of the devil", "you must be fun at parties", "that's it", "hit me", "how about that", "ding, ding, ding, we have a winner", "do you accept credit cards", "word has it", "woulda, coulda, shoulda", "you must be new here", "how are you", "do you believe in God", "woulda, shoulda, coulda", "that's life", "safety in numbers", "how are you doing", "do you come here often", "worm has turned", "you never know", "that's my", "how are you getting along", "leave well enough alone", "do you have a boyfriend", "that's saying something", "the shoe is on the other foot", "this is someone", "do you have a girlfriend", "Lord only knows", "that's that", "check yourself before you wreck yourself", "this is the life", "how can you sleep at night", "wake up and die right", "do you have a menu in English", "that's the bunny", "the show must go on", "this is where we came in", "nod's as good as a wink to a blind bat", "wake up and smell the ashes", "on the huh", "do you have any brothers or sisters", "dogs bark", "worm turns", "that's the spirit", "this just in", "how did he die", "more like", "do you have any pets", "alright me babber", "Elvis has left the building", "this means war", "how do", "she could be his mother", "do you have children", "alright me lover", "that's the ticket", "how do I get to", "shoulda, coulda, woulda", "nome sane", "guess what", "whenever one turns around", "do you have Wi-Fi", "alright my babber", "the story goes", "how do I get to the airport", "shoulda, woulda, coulda", "do you kiss your mother with that mouth", "Lord willing and the creek don't rise", "you said it", "alright my lover", "how do I get to the bus station", "ask me one on sport", "need I say more", "sounds like a plan", "put that in your pipe and smoke it", "do you know", "take a picture, it will last longer", "the streets are paved with gold", "how do I get to the train station", "ask my arse", "stop the car", "do you know who I am", "wouldn't you know", "you shouldn't have", "how do ye do", "fans are slans", "use one's coconut", "bit by a barn mouse", "stick that in your pipe and smoke it", "do you mind", "but for the grace of God", "wouldn't you know it", "head in the sand", "the terrorists will have won", "how do you do", "please excuse my dear Aunt Sally", "much of a muchness", "bless someone's cotton socks", "do you need help", "or else", "dress for the slide, not the ride", "that's wassup", "the thick plottens", "much to be said", "bless someone's heart", "a blessing and a curse", "do you speak English", "you think", "that's what I'm talking about", "how do you like that", "art imitates life", "please help me", "five will get you ten", "do you think you can walk", "or so", "that's what she said", "the thing is", "how do you like them apples", "please pass the salt", "I've been robbed", "nature calls", "a boon and a bane", "but me no buts", "or something", "you welcome", "that's what's up", "how do you pronounce this word", "fare thee well", "please repeat after me", "I've been shot", "pot, meet kettle", "a boon or a bane", "where are the snows of yesteryear", "or what", "rolling in it", "the toilet is clogged", "how do you say...in English", "circle gets the square", "more than someone has had hot dinners", "please say that again", "I've burned myself", "different strokes", "where are the toilets", "or words to that effect", "you win", "how do you spell this word", "to hell with", "in virtue of which", "please sit down", "where are we", "out to", "am I right", "please speak more slowly", "I've lost my keys", "where are we going", "but who's counting", "you wish", "am I right or am I right", "how goes it", "methinks the lady doth protest too much", "please turn left", "could be written on the back of a postage stamp", "I've never heard it called that before", "where are you", "you wish, jellyfish", "am I under arrest", "methinks thou dost protest too much", "please turn right", "bang to rights", "gimme a break", "where are you from", "revenge is sweet", "'tis the season", "pull the other one", "where are your parents", "out with it", "have a good one", "how long is a piece of string", "ay up me duck", "before you can say Jack Robinson", "pull the other one, it's got bells on", "where away", "only time will tell", "could fit on the back of a postage stamp", "before you can say knife", "pull the other one, it's got brass bells on", "where can I find a hotel", "the wheels came off", "angel passes", "how many languages do you speak", "could go all day", "sleep tight", "nature vs nurture", "practice, practice, practice", "where do I sign up", "help is on the way", "many thanks", "the wheels came off the bus", "mercy bucket", "how many siblings do you have", "pleased to meet you", "could have fooled me", "where do you live", "the wheels came off the wagon", "mercy buckets", "where do you live at", "you'd better believe it", "than a bygod", "the wheels fell off", "could have, would have, should have", "where does it hurt", "hell if I know", "you'd complain if you were hung with a new rope", "the wheels fell off the bus", "every good boy deserves fudge", "could I see the menu, please", "where does this bus go", "help wanted", "the wheels fell off the wagon", "how much do I owe you", "where does this train go", "how much do you charge", "steady as she goes", "put the same shoe on every foot", "where have you been", "temper temper", "how much does it cost", "coulda, shoulda, woulda", "give credit where credit is due", "boom goes the dynamite", "where is the toilet", "how much is it", "in your dreams", "coulda, woulda, shoulda", "what a lovely day", "to save one's life", "exsqueeze me", "like a martin to his gourd", "what a pity", "you'll be late for your own funeral", "every man for himself", "size matters"}, + "comment": { + // Quick reactions + "{interjection}", + "{interjection}! nice", + "wow {adjective}", + "solid", + "good call", + "love it", + "clean move", + "tight work", + "so smooth", + "neat", + + // Positive feedback + "{interjection}, this {noun} is {adjective}", + "really like the {adjective} {noun}", + "love the {noun} vibe", + "nice {noun}, great {adjective} touch", + "{interjection}! {adjective} {noun} all around", + "great {noun} structure", + "smooth {noun} flow", + "balanced {noun} and {noun}", + "that {noun} is {adjective}", + "good {noun} execution", + + // Questions / curiosity + "does {noun} need to {verb}", + "why the {adjective} {noun}", + "could we {verb} the {noun} a bit", + "any reason {noun} should {verb}", + "how does this {noun} {verb}", + "what if we {verb} the {noun}", + "would a {adjective} {noun} help", + "where does the {noun} go", + "is {noun} supposed to {verb}", + "does this {noun} scale", + + // Suggestions + "maybe {verb} the {noun} for {noun}", + "try {verb} with {noun}", + "let’s {verb} the {noun} and see", + "consider {adjective} {noun} over {noun}", + "lean into the {adjective} {noun}", + "pull back on the {adjective} {noun}", + "swap {noun} for {adjective} {noun}", + "add some {adjective} {noun}", + "replace {noun} with {noun}", + "simplify the {noun}", + + // Trade-off / compare + "this favors {noun} over {noun}", + "strong {adjective} {noun}, weaker {noun}", + "more {noun}, less {noun} might work", + "balance {noun} with {adjective} {noun}", + "{interjection} — nice {noun}, watch the {noun}", + "trades {noun} for {adjective} {noun}", + "shift from {noun} toward {noun}", + "prioritize {noun} before {noun}", + "watch {noun} vs {noun}", + "weights {noun} higher than {noun}", + + // Clarity / critique + "the {noun} feels {adjective}", + "scope the {noun} tighter", + "the {noun} goal is unclear", + "the {noun} could be simpler", + "the {noun} reads {adjective}", + "unclear {noun} boundaries", + "hard to follow {noun}", + "the {noun} seems overloaded", + "maybe lighten the {noun}", + "focus the {noun}", + + // Performance / reliability + "{noun} looks {adjective} under load", + "{noun} might {verb} under stress", + "watch {noun} when we {verb}", + "{interjection}, {noun} spikes when {verb}", + "handle {noun} if it {verb}s", + "the {noun} might break when {verb}", + "test {noun} under {adjective} load", + "safe {noun}, careful with {noun}", + "optimize {noun} when {verb}", + "guard {noun} against {verb}", + + // Style / tone + "consistent {noun}, good rhythm", + "the {adjective} {noun} sets the tone", + "{noun} and {noun} land well", + "keep the {noun} crisp", + "the {noun} feels polished", + "love the {adjective} {noun} energy", + "stylish {noun} layout", + "sharp {noun} edges", + "smooth {noun} finish", + "elegant {noun} use", + + // Narrative / longer thoughts + "after seeing this {noun}, maybe {verb} the {noun} and adjust the {adjective} {noun}", + "the {noun} could benefit from {verb} before adding more {adjective} {noun}", + "if we {verb} the {noun} first, the {noun} might feel more {adjective}", + "really liking how the {adjective} {noun} ties to the {noun}", + "feels like {noun} wants to {verb} before we add {noun}", + "this {noun} sets a strong base, but {noun} could still {verb}", + "we might {verb} the {noun} later if {noun} grows", + "the {noun} works well; curious if {noun} should {verb} next", + "could chain {verb} {noun} after {noun} for better flow", + "maybe {verb} {noun} once {noun} is {adjective}", + }, + "quote": { + // Short aphorisms + `"less {noun}, more {noun}"`, + `"make the {noun} simple"`, + `"clarity over {noun}"`, + `"ship the {noun}"`, + `"trust the {adjective} {noun}"`, + `"move with {adjective} {noun}"`, + `"small {noun}, big impact"`, + `"choose useful {noun}"`, + `"iterate the {noun}"`, + `"defaults are decisions"`, + + // Imperatives + `"keep the {noun} crisp"`, + `"name the {noun}, then {verb}"`, + `"measure the {noun} you {verb}"`, + `"start with {noun}, end with {noun}"`, + `"protect the {noun} under stress"`, + `"pair {noun} with {adjective} {noun}"`, + `"reduce the {noun} until it hurts, then stop"`, + `"focus on {productfeature}, trim the rest"`, + `"pick the {adjective} path and commit"`, + `"make {noun} boring and {noun} remarkable"`, + + // Reflective + `"in {city}, the {noun} you {verb} becomes your {noun}"`, + `"after {number:3,12} attempts, the {noun} finally felt {adjective}"`, + `"from {country} to {city}, chasing better {noun}"`, + `"we are what we repeatedly {verb}"`, + `"the {adjective} {noun} was a choice, not a chance"`, + `"every {noun} hides a smaller {noun}"`, + `"constraints are the {noun} that shape {noun}"`, + `"the cost of {noun} is usually hidden in {noun}"`, + `"we borrowed {noun} from the future to ship today"`, + `"quality is a series of {adjective} decisions"`, + + // Questions + `"what problem does this {noun} solve"`, + `"why this {noun} and not that {noun}"`, + `"how would this {noun} {verb} at scale"`, + `"what breaks when {noun} {verb}s"`, + `"who owns the {noun} after launch"`, + `"is the {noun} doing too much"`, + `"which {noun} matters right now"`, + `"does {noun} help the {noun} or distract it"`, + `"what happens if we remove this {noun}"`, + `"are we optimizing the wrong {noun}"`, + + // Humor / light + `"make {noun}, not meetings"`, + `"add {adjective} {noun}; regret later"`, + `"it worked on my {noun}"`, + `"one more {noun}, then sleep"`, + `"documents are just {noun} with better {noun}"`, + `"naming is hard, {noun} is harder"`, + `"we {verb} the {noun} and called it agile"`, + `"the backlog is just {noun} with hopes"`, + `"today’s blocker is tomorrow’s {noun}"`, + `"I’ll refactor after {noun}"`, + + // Work / craft + `"craft lives in the space between {noun} and {noun}"`, + `"discipline is the quiet {noun} behind excellence"`, + `"habits turn {noun} into momentum"`, + `"precision is {adjective} kindness"`, + `"consistency beats {adjective} bursts"`, + `"ownership begins where excuses end"`, + `"feedback is a mirror for {noun}"`, + `"slow is smooth, smooth is {adjective}"`, + `"systems scale, heroics do not"`, + `"we get the {noun} we reward"`, + + // Tech / dev flavored + `"delete the {noun}, not the clarity"`, + `"latency is a {noun} tax"`, + `"naming, caching, and {noun}—pick two"`, + `"pragmatism beats perfect {noun}"`, + `"make the happy path obvious"`, + `"logs are stories your {noun} tells"`, + `"APIs are just promises about {noun}"`, + `"complexity compounds like {noun}"`, + `"tests are confidence, not ceremony"`, + `"design for failure, celebrate {noun}"`, + + // Creativity / design + `"constraints create character"`, + `"edit until the {noun} appears"`, + `"taste is trained attention"`, + `"contrast makes {noun} legible"`, + `"delight lives in the last {number:2,10}%"`, + `"the grid is a kind of kindness"`, + `"silence is also part of the {noun}"`, + `"good {noun} feels inevitable"`, + `"remove until it breaks, then add one {noun}"`, + `"friction reveals intent"`, + + // Nature / metaphor + `"gardens, not garages: grow the {noun}"`, + `"rivers remember the path of {noun}"`, + `"sharp tools, soft hands"`, + `"winter disciplines, spring reveals"`, + `"build roots before branches"`, + `"polish is the weathering of {noun}"`, + `"stable soil makes brave {noun}"`, + `"light shows what {noun} hides"`, + `"grain fights every careless {verb}"`, + `"measure twice, cut once"`, + + // People / leadership + `"clear goals, kind {noun}"`, + `"trust is a backlog of kept promises"`, + `"argue like {adjective} peers, decide like {noun}"`, + `"praise in public, critique in private"`, + `"teams ship what they believe"`, + `"culture is the worst {noun} you tolerate"`, + `"leadership is {verb} the {noun} first"`, + `"alignment is the art of saying the same {noun}"`, + `"energy is contagious—so is {noun}"`, + `"hire for slope, teach the {noun}"`, + + // Attribution variants + `"make it work, then make it right" — {firstname} {lastname}`, + `"simplicity scales better than cleverness" — {jobtitle} in {city}`, + `"focus is saying no to good {noun}" — {company}`, + `"good {productcategory} is just clear {noun}" — {firstname} {lastname}`, + `"optimize for trust before speed" — {jobtitle}`, + `"the roadmap is a story about {noun}" — {company} team`, + `"we shipped when {noun} felt {adjective}" — {firstname}`, + `"defaults shape behavior" — {firstname} {lastname} in {city}`, + `"names are the first UX" — {jobtitle} at {company}`, + `"choose boring tech, exciting {noun}" — {firstname} {lastname}`, + }, + "question": { + // Ultra-short checks + "why this {noun}", + "how does this {noun} work", + "what changed", + "what is missing", + "who owns this {noun}", + "where does {noun} live", + "when does {noun} happen", + "is this {noun} necessary", + "does this {noun} scale", + "are we over-optimizing", + + // Clarification / intent + "what problem does this {noun} solve", + "who is the {noun} for", + "what is the goal of this {noun}", + "how will we know {noun} is successful", + "what is the smallest {noun} that works", + "what is the success metric for this {noun}", + "which {noun} matters most right now", + "what is the scope of this {noun}", + "what assumptions are behind this {noun}", + "what constraints shape this {noun}", + + // Why / rationale + "why this {noun} and not that {noun}", + "why now for this {noun}", + "why did we choose {adjective} {noun}", + "why is this {noun} the default", + "why keep this {noun} in {city}", + "why is {noun} coupled to {noun}", + "why does {company} prefer this {noun}", + "why not remove this {noun}", + "why prioritize this {noun} over {noun}", + "why is this {noun} hard", + + // How / approach + "how would this {noun} {verb} at scale", + "how can we simplify this {noun}", + "how does {noun} interact with {noun}", + "how will {noun} fail", + "how do we test this {noun}", + "how can we break this {noun} safely", + "how do we make {noun} observable", + "how do we migrate from {noun} to {noun}", + "how will we monitor this {noun} in {country}", + "how do we roll back if {noun} breaks", + + // Decision / trade-offs + "what are the trade-offs between {noun} and {noun}", + "what do we gain by choosing {noun}", + "what do we lose if we drop {noun}", + "which risks come with this {noun}", + "which {noun} is the constraint", + "what is the opportunity cost of {noun}", + "which {noun} should we do first", + "what is reversible about this {noun}", + "what is the cheapest way to try this {noun}", + "what would we choose if {number:1,10}x more users arrived", + + // Performance / reliability + "what happens when {noun} {verb}s under load", + "where could {noun} become a bottleneck", + "how do we guard {noun} against {verb}", + "what is the latency budget for this {noun}", + "how does {noun} behave under {adjective} load", + "what fails if {noun} is slow", + "how do we detect {noun} regressions", + "can {noun} recover after {verb}", + "what is the fallback when {noun} fails", + "how do we rate-limit this {noun}", + + // Product / UX / design + "who is the primary user of this {noun}", + "what is the happy path for this {noun}", + "where is the first friction in this {noun}", + "what is confusing about this {noun}", + "how does this {noun} reduce effort", + "what is the most common {noun} error", + "how does {color} affect this {noun}", + "which {productfeature} is essential", + "what is the minimum useful {productcategory}", + "how would a new user explain this {noun}", + + // Data / measurement + "what will we measure for this {noun}", + "what is the baseline for this {noun}", + "what is a good target for this {noun}", + "which metric should move if we {verb} the {noun}", + "how do we attribute improvements to this {noun}", + "what data do we need before {verb}ing", + "how noisy is this {noun} metric", + "what is the sample size for this {noun}", + "how do we track {noun} over time", + "what is the alert threshold for {noun}", + + // Planning / timeline + "what can we ship in {number:1,6} weeks", + "what is the critical path for this {noun}", + "what can slip without hurting {noun}", + "what depends on this {noun}", + "what is blocked by {noun}", + "when do we review this {noun}", + "what must happen before {verb}ing the {noun}", + "what is the rollout plan for this {noun}", + + // People / ownership + "who decides if {noun} is done", + "who maintains this {noun}", + "who is on call for {noun}", + "who reviews changes to this {noun}", + "who approves {noun} in {company}", + "who needs to know about this {noun}", + "who can remove this {noun} safely", + "who pairs on this {noun} next", + + // Alternatives / exploration + "what alternatives exist to {noun}", + "what is the simplest alternative to this {noun}", + "what if we did the opposite of {noun}", + "what if we delayed this {noun}", + "what if we automated this {noun}", + "what if we stopped doing this {noun}", + "what would this {noun} look like in {programminglanguage}", + "what happens if we move {noun} to {city}", + + // Risk / failure / safety + "what is the worst outcome of this {noun}", + "what could go wrong with {noun}", + "how do we make {noun} safe to fail", + "what is our recovery plan for {noun}", + "what if {noun} never {verb}s", + "how do we contain a {noun} incident", + "what signs show {noun} is degrading", + "what is the cost of a {noun} outage", + + // Cost / effort + "what is the ongoing cost of this {noun}", + "how much effort is {noun} to maintain", + "what can we drop to fund this {noun}", + "what is the cheapest experiment for this {noun}", + "what is the marginal gain from {noun}", + "what is the payoff time for this {noun}", + + // Communication / docs + "what should the doc for this {noun} say", + "what is the single sentence that explains this {noun}", + "what diagram would clarify this {noun}", + "what is the changelog entry for this {noun}", + "what decision did we make about this {noun}", + "what questions remain about this {noun}", + + // Domain flavored (optional but generic) + "how would {jobtitle} use this {noun}", + "how does this {noun} help {company}", + "what does {language} support look like for this {noun}", + "how does {hobby} influence this {noun}", + "what does this {noun} look like in {country}", + "where in {city} would this {noun} matter", + }, +} diff --git a/vendor/github.com/brianvoe/gofakeit/v7/time.go b/vendor/github.com/brianvoe/gofakeit/v7/datetime.go similarity index 61% rename from vendor/github.com/brianvoe/gofakeit/v7/time.go rename to vendor/github.com/brianvoe/gofakeit/v7/datetime.go index 55589034..4034d3a7 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/time.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/datetime.go @@ -163,13 +163,14 @@ func timeZoneOffset(f *Faker) float32 { return float32(value) } -// javaDateFormatToGolangDateFormat converts java date format into go date format -func javaDateFormatToGolangDateFormat(format string) string { +// javaDateTimeFormatToGolangFormat converts java date/time format into go date/time format +func javaDateTimeFormatToGolangFormat(format string) string { format = strings.Replace(format, "ddd", "_2", -1) format = strings.Replace(format, "dd", "02", -1) format = strings.Replace(format, "d", "2", -1) format = strings.Replace(format, "HH", "15", -1) + format = strings.Replace(format, "H", "15", -1) format = strings.Replace(format, "hh", "03", -1) format = strings.Replace(format, "h", "3", -1) @@ -213,10 +214,16 @@ func javaDateFormatToGolangDateFormat(format string) string { func addDateTimeLookup() { AddFuncLookup("date", Info{ Display: "Date", - Category: "time", + Category: "datetime", Description: "Representation of a specific day, month, and year, often used for chronological reference", Example: "2006-01-02T15:04:05Z07:00", Output: "string", + Aliases: []string{ + "date string", "calendar date", "datetime", "timestamp", "chronological reference", + }, + Keywords: []string{ + "time", "day", "month", "year", "format", "rfc3339", "iso8601", "utc", + }, Params: []Param{ { Field: "format", @@ -259,17 +266,23 @@ func addDateTimeLookup() { return f.Date().Format(time.RFC3339), nil } - return f.Date().Format(javaDateFormatToGolangDateFormat(format)), nil + return f.Date().Format(javaDateTimeFormatToGolangFormat(format)), nil } }, }) AddFuncLookup("daterange", Info{ - Display: "DateRange", - Category: "time", + Display: "Date Range", + Category: "datetime", Description: "Random date between two ranges", - Example: "2006-01-02T15:04:05Z07:00", + Example: "1995-06-15T14:30:00Z", Output: "string", + Aliases: []string{ + "date interval", "date span", "date window", "between dates", "bounded period", + }, + Keywords: []string{ + "daterange", "range", "between", "date", "time", "random", "bounds", "limits", "window", + }, Params: []Param{ { Field: "startdate", @@ -298,7 +311,7 @@ func addDateTimeLookup() { if err != nil { return nil, err } - format = javaDateFormatToGolangDateFormat(format) + format = javaDateTimeFormatToGolangFormat(format) startdate, err := info.GetString(m, "startdate") if err != nil { @@ -324,177 +337,362 @@ func addDateTimeLookup() { AddFuncLookup("pastdate", Info{ Display: "PastDate", - Category: "time", + Category: "datetime", Description: "Date that has occurred before the current moment in time", Example: "2007-01-24 13:00:35.820738079 +0000 UTC", Output: "time", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return pastDate(f), nil + Aliases: []string{ + "past date", "historical date", "previous date", "earlier date", "prior time", }, + Keywords: []string{ + "date", "time", "occurred", "elapsed", "gone", "expired", "finished", "completed", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return pastDate(f), nil }, }) AddFuncLookup("futuredate", Info{ Display: "FutureDate", - Category: "time", + Category: "datetime", Description: "Date that has occurred after the current moment in time", Example: "2107-01-24 13:00:35.820738079 +0000 UTC", Output: "time", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return futureDate(f), nil + Aliases: []string{ + "future date", "upcoming date", "next date", "scheduled date", "later time", }, + Keywords: []string{ + "future", "date", "time", "forthcoming", "prospective", "anticipated", "scheduled", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return futureDate(f), nil }, }) AddFuncLookup("nanosecond", Info{ Display: "Nanosecond", - Category: "time", - Description: "Unit of time equal to One billionth (10^-9) of a second", + Category: "datetime", + Description: "Unit of time equal to one billionth (10^-9) of a second", Example: "196446360", Output: "int", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return nanoSecond(f), nil + Aliases: []string{ + "nano", "ns value", "tiny time", "ultra precision", "fractional second", }, + Keywords: []string{ + "time", "unit", "second", "billionth", "ultra", "high", "resolution", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return nanoSecond(f), nil }, }) AddFuncLookup("second", Info{ Display: "Second", - Category: "time", + Category: "datetime", Description: "Unit of time equal to 1/60th of a minute", Example: "43", Output: "int", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return second(f), nil + Aliases: []string{ + "second value", "sec unit", "time second", "sixtieth minute", "time slice", }, + Keywords: []string{ + "time", "unit", "minute", "sixtieth", "duration", "interval", "sixty", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return second(f), nil }, }) AddFuncLookup("minute", Info{ Display: "Minute", - Category: "time", + Category: "datetime", Description: "Unit of time equal to 60 seconds", Example: "34", Output: "int", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return minute(f), nil + Aliases: []string{ + "minute value", "time minute", "sixty seconds", "short period", "clock minute", }, + Keywords: []string{ + "time", "unit", "60", "seconds", "duration", "interval", "sixtieth", "hour", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return minute(f), nil }, }) AddFuncLookup("hour", Info{ Display: "Hour", - Category: "time", + Category: "datetime", Description: "Unit of time equal to 60 minutes", Example: "8", Output: "int", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return hour(f), nil + Aliases: []string{ + "hour value", "time hour", "sixty minutes", "clock hour", "time period", }, + Keywords: []string{ + "time", "unit", "60", "minutes", "duration", "interval", "day", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return hour(f), nil }, }) AddFuncLookup("day", Info{ Display: "Day", - Category: "time", + Category: "datetime", Description: "24-hour period equivalent to one rotation of Earth on its axis", Example: "12", Output: "int", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return day(f), nil + Aliases: []string{ + "calendar day", "day value", "earth rotation", "daily unit", "full day", }, + Keywords: []string{ + "time", "unit", "axis", "24-hour", "calendar", "sunrise", "sunset", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return day(f), nil }, }) AddFuncLookup("weekday", Info{ Display: "Weekday", - Category: "time", + Category: "datetime", Description: "Day of the week excluding the weekend", Example: "Friday", Output: "string", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return weekDay(f), nil + Aliases: []string{ + "weekday name", "business day", "work day", "monday to friday", "weekday label", }, + Keywords: []string{ + "day", "week", "workday", "business", "calendar", "monday", "tuesday", "wednesday", "thursday", "friday", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return weekDay(f), nil }, }) AddFuncLookup("month", Info{ Display: "Month", - Category: "time", + Category: "datetime", Description: "Division of the year, typically 30 or 31 days long", Example: "1", Output: "string", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return month(f), nil + Aliases: []string{ + "calendar month", "month value", "monthly unit", "date month", "time month", }, + Keywords: []string{ + "year", "time", "30", "31", "days", "calendar", "period", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return month(f), nil }, }) AddFuncLookup("monthstring", Info{ Display: "Month String", - Category: "time", - Description: "String Representation of a month name", + Category: "datetime", + Description: "String representation of a month name", Example: "September", Output: "string", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return monthString(f), nil + Aliases: []string{ + "month name", "calendar month name", "full month", "month label", "month string", }, + Keywords: []string{ + "month", "string", "time", "representation", "january", "september", "december", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return monthString(f), nil }, }) AddFuncLookup("year", Info{ Display: "Year", - Category: "time", + Category: "datetime", Description: "Period of 365 days, the time Earth takes to orbit the Sun", Example: "1900", Output: "int", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return year(f), nil + Aliases: []string{ + "calendar year", "annual period", "orbit year", "year value", "fiscal year", }, + Keywords: []string{ + "time", "365", "days", "leap", "calendar", "decade", "century", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return year(f), nil }, }) AddFuncLookup("timezone", Info{ Display: "Timezone", - Category: "time", + Category: "datetime", Description: "Region where the same standard time is used, based on longitudinal divisions of the Earth", Example: "Kaliningrad Standard Time", Output: "string", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return timeZone(f), nil + Aliases: []string{ + "time zone", "tz name", "standard time zone", "geographic zone", "regional time", }, + Keywords: []string{ + "time", "earth", "utc", "gmt", "pst", "est", "cst", "mst", "dst", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return timeZone(f), nil }, }) AddFuncLookup("timezoneabv", Info{ Display: "Timezone Abbreviation", - Category: "time", + Category: "datetime", Description: "Abbreviated 3-letter word of a timezone", Example: "KST", Output: "string", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return timeZoneAbv(f), nil + Aliases: []string{ + "timezone abbr", "tz short code", "abbreviated zone", "short tz name", "zone abbreviation", }, + Keywords: []string{ + "time", "3-letter", "kst", "pst", "est", "gmt", "utc", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return timeZoneAbv(f), nil }, }) AddFuncLookup("timezonefull", Info{ Display: "Timezone Full", - Category: "time", + Category: "datetime", Description: "Full name of a timezone", Example: "(UTC+03:00) Kaliningrad, Minsk", Output: "string", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return timeZoneFull(f), nil + Aliases: []string{ + "timezone full", "full tz name", "complete zone name", "long tz name", "detailed zone", }, + Keywords: []string{ + "timezone", "full", "time", "standard", "format", "display", "utc", "gmt", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return timeZoneFull(f), nil }, }) AddFuncLookup("timezoneoffset", Info{ Display: "Timezone Offset", - Category: "time", + Category: "datetime", Description: "The difference in hours from Coordinated Universal Time (UTC) for a specific region", - Example: "3", + Example: "-5", Output: "float32", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return timeZoneOffset(f), nil + Aliases: []string{ + "utc offset", "gmt offset", "tz shift", "time difference", "offset value", }, + Keywords: []string{ + "timezone", "offset", "utc", "gmt", "plus", "minus", "east", "west", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return timeZoneOffset(f), nil }, }) AddFuncLookup("timezoneregion", Info{ Display: "Timezone Region", - Category: "time", + Category: "datetime", Description: "Geographic area sharing the same standard time", Example: "America/Alaska", Output: "string", + Aliases: []string{ + "region zone", "geo time region", "tz area", "regional timezone", "country zone", + }, + Keywords: []string{ + "timezone", "time", "america", "europe", "asia", "africa", "australia", "continent", "city", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return timeZoneRegion(f), nil }, + }) + + AddFuncLookup("time", Info{ + Display: "Time", + Category: "datetime", + Description: "Random time string in the specified format", + Example: "14:30:25", + Output: "string", + Aliases: []string{ + "time string", "clock time", "time format", "time value", "hour minute second", + }, + Keywords: []string{ + "clock", "hour", "minute", "second", "format", "24-hour", "12-hour", "am", "pm", + }, + Params: []Param{ + { + Field: "format", + Display: "Format", + Type: "string", + Default: "HH:mm:ss", + Options: []string{"HH:mm:ss", "HH:mm", "hh:mm:ss a", "hh:mm a", "H:mm", "h:mm a"}, + Description: "Time format string. Supports Java time format patterns or Go time format patterns", + }, + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return timeZoneRegion(f), nil + format, err := info.GetString(m, "format") + if err != nil { + return nil, err + } + + // Convert java format to golang format + golangFormat := javaDateTimeFormatToGolangFormat(format) + + // Create a time with today's date but random time + t := time.Date(2000, 1, 1, hour(f), minute(f), second(f), nanoSecond(f), time.UTC) + + return t.Format(golangFormat), nil + }, + }) + + AddFuncLookup("timerange", Info{ + Display: "Time Range", + Category: "datetime", + Description: "Random time string between start and end times", + Example: "10:15:30", + Output: "string", + Aliases: []string{ + "time interval", "time span", "time window", "between times", "bounded time", + }, + Keywords: []string{ + "timerange", "range", "between", "time", "start", "end", "bounds", "limits", "window", + }, + Params: []Param{ + { + Field: "starttime", + Display: "Start Time", + Type: "string", + Default: "00:00:00", + Description: "Start time string in the specified format", + }, + { + Field: "endtime", + Display: "End Time", + Type: "string", + Default: "23:59:59", + Description: "End time string in the specified format", + }, + { + Field: "format", + Display: "Format", + Type: "string", + Default: "HH:mm:ss", + Options: []string{"HH:mm:ss", "HH:mm", "hh:mm:ss a", "hh:mm a", "H:mm", "h:mm a"}, + Description: "Time format string. Supports Java time format patterns or Go time format patterns", + }, + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { + format, err := info.GetString(m, "format") + if err != nil { + return nil, err + } + + startTime, err := info.GetString(m, "starttime") + if err != nil { + return nil, err + } + + endTime, err := info.GetString(m, "endtime") + if err != nil { + return nil, err + } + + // Convert java format to golang format + golangFormat := javaDateTimeFormatToGolangFormat(format) + + // Parse start and end times + start, err := time.Parse(golangFormat, startTime) + if err != nil { + // If parsing fails, use a default start time + start = time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC) + } + + end, err := time.Parse(golangFormat, endTime) + if err != nil { + // If parsing fails, use a default end time + end = time.Date(2000, 1, 1, 23, 59, 59, 999999999, time.UTC) + } + + // Generate random time between start and end + startNano := start.UnixNano() + endNano := end.UnixNano() + + if startNano > endNano { + startNano, endNano = endNano, startNano + } + + randomNano := int64(number(f, int(startNano), int(endNano))) + randomTime := time.Unix(0, randomNano).UTC() + + return randomTime.Format(golangFormat), nil }, }) diff --git a/vendor/github.com/brianvoe/gofakeit/v7/emoji.go b/vendor/github.com/brianvoe/gofakeit/v7/emoji.go index c9e9785e..c21ac28c 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/emoji.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/emoji.go @@ -6,15 +6,32 @@ func Emoji() string { return emoji(GlobalFaker) } // Emoji will return a random fun emoji func (f *Faker) Emoji() string { return emoji(f) } -func emoji(f *Faker) string { return getRandValue(f, []string{"emoji", "emoji"}) } +func emoji(f *Faker) string { + // Explicit allow-list of emoji subgroups (sorted for deterministic behavior) + allowed := []string{ + "animal", + "clothing", + "costume", + "electronics", + "face", + "flag", + "food", + "game", + "gesture", + "hand", + "job", + "landmark", + "music", + "person", + "plant", + "sport", + "tools", + "vehicle", + "weather", + } -// EmojiDescription will return a random fun emoji description -func EmojiDescription() string { return emojiDescription(GlobalFaker) } - -// EmojiDescription will return a random fun emoji description -func (f *Faker) EmojiDescription() string { return emojiDescription(f) } - -func emojiDescription(f *Faker) string { return getRandValue(f, []string{"emoji", "description"}) } + return getRandValue(f, []string{"emoji", randomString(f, allowed)}) +} // EmojiCategory will return a random fun emoji category func EmojiCategory() string { return emojiCategory(GlobalFaker) } @@ -40,6 +57,173 @@ func (f *Faker) EmojiTag() string { return emojiTag(f) } func emojiTag(f *Faker) string { return getRandValue(f, []string{"emoji", "tag"}) } +// EmojiFlag will return a random country flag emoji +func EmojiFlag() string { return emojiFlag(GlobalFaker) } + +// EmojiFlag will return a random country flag emoji +func (f *Faker) EmojiFlag() string { return emojiFlag(f) } + +func emojiFlag(f *Faker) string { return getRandValue(f, []string{"emoji", "flag"}) } + +// EmojiAnimal will return a random animal emoji +func EmojiAnimal() string { return emojiAnimal(GlobalFaker) } + +// EmojiAnimal will return a random animal emoji +func (f *Faker) EmojiAnimal() string { return emojiAnimal(f) } + +func emojiAnimal(f *Faker) string { return getRandValue(f, []string{"emoji", "animal"}) } + +// EmojiFood will return a random food emoji +func EmojiFood() string { return emojiFood(GlobalFaker) } + +// EmojiFood will return a random food emoji +func (f *Faker) EmojiFood() string { return emojiFood(f) } + +func emojiFood(f *Faker) string { return getRandValue(f, []string{"emoji", "food"}) } + +// EmojiPlant will return a random plant emoji +func EmojiPlant() string { return emojiPlant(GlobalFaker) } + +// EmojiPlant will return a random plant emoji +func (f *Faker) EmojiPlant() string { return emojiPlant(f) } + +func emojiPlant(f *Faker) string { return getRandValue(f, []string{"emoji", "plant"}) } + +// EmojiMusic will return a random music-related emoji +func EmojiMusic() string { return emojiMusic(GlobalFaker) } + +// EmojiMusic will return a random music-related emoji +func (f *Faker) EmojiMusic() string { return emojiMusic(f) } + +func emojiMusic(f *Faker) string { return getRandValue(f, []string{"emoji", "music"}) } + +// EmojiVehicle will return a random vehicle/transport emoji +func EmojiVehicle() string { return emojiVehicle(GlobalFaker) } + +// EmojiVehicle will return a random vehicle/transport emoji +func (f *Faker) EmojiVehicle() string { return emojiVehicle(f) } + +func emojiVehicle(f *Faker) string { return getRandValue(f, []string{"emoji", "vehicle"}) } + +// EmojiSport will return a random sports emoji +func EmojiSport() string { return emojiSport(GlobalFaker) } + +// EmojiSport will return a random sports emoji +func (f *Faker) EmojiSport() string { return emojiSport(f) } + +func emojiSport(f *Faker) string { return getRandValue(f, []string{"emoji", "sport"}) } + +// EmojiFace will return a random face emoji +func EmojiFace() string { return emojiFace(GlobalFaker) } + +// EmojiFace will return a random face emoji +func (f *Faker) EmojiFace() string { return emojiFace(f) } + +func emojiFace(f *Faker) string { return getRandValue(f, []string{"emoji", "face"}) } + +// EmojiHand will return a random hand emoji +func EmojiHand() string { return emojiHand(GlobalFaker) } + +// EmojiHand will return a random hand emoji +func (f *Faker) EmojiHand() string { return emojiHand(f) } + +func emojiHand(f *Faker) string { return getRandValue(f, []string{"emoji", "hand"}) } + +// EmojiClothing will return a random clothing or accessory emoji +func EmojiClothing() string { return emojiClothing(GlobalFaker) } + +// EmojiClothing will return a random clothing or accessory emoji +func (f *Faker) EmojiClothing() string { return emojiClothing(f) } + +func emojiClothing(f *Faker) string { return getRandValue(f, []string{"emoji", "clothing"}) } + +// EmojiLandmark will return a random landmark or place emoji +func EmojiLandmark() string { return emojiLandmark(GlobalFaker) } + +// EmojiLandmark will return a random landmark or place emoji +func (f *Faker) EmojiLandmark() string { return emojiLandmark(f) } + +func emojiLandmark(f *Faker) string { return getRandValue(f, []string{"emoji", "landmark"}) } + +// EmojiElectronics will return a random electronics/media device emoji +func EmojiElectronics() string { return emojiElectronics(GlobalFaker) } + +// EmojiElectronics will return a random electronics/media device emoji +func (f *Faker) EmojiElectronics() string { return emojiElectronics(f) } + +func emojiElectronics(f *Faker) string { return getRandValue(f, []string{"emoji", "electronics"}) } + +// EmojiGame will return a random game/leisure emoji +func EmojiGame() string { return emojiGame(GlobalFaker) } + +// EmojiGame will return a random game/leisure emoji +func (f *Faker) EmojiGame() string { return emojiGame(f) } + +func emojiGame(f *Faker) string { return getRandValue(f, []string{"emoji", "game"}) } + +// EmojiTools will return a random tools/weapons emoji +func EmojiTools() string { return emojiTools(GlobalFaker) } + +// EmojiTools will return a random tools/weapons emoji +func (f *Faker) EmojiTools() string { return emojiTools(f) } + +func emojiTools(f *Faker) string { return getRandValue(f, []string{"emoji", "tools"}) } + +// EmojiWeather will return a random weather/celestial emoji +func EmojiWeather() string { return emojiWeather(GlobalFaker) } + +// EmojiWeather will return a random weather/celestial emoji +func (f *Faker) EmojiWeather() string { return emojiWeather(f) } + +func emojiWeather(f *Faker) string { return getRandValue(f, []string{"emoji", "weather"}) } + +// EmojiJob will return a random job/occupation emoji +func EmojiJob() string { return emojiJob(GlobalFaker) } + +// EmojiJob will return a random job/occupation emoji +func (f *Faker) EmojiJob() string { return emojiJob(f) } + +func emojiJob(f *Faker) string { return getRandValue(f, []string{"emoji", "job"}) } + +// EmojiPerson will return a random person variant emoji +func EmojiPerson() string { return emojiPerson(GlobalFaker) } + +// EmojiPerson will return a random person variant emoji +func (f *Faker) EmojiPerson() string { return emojiPerson(f) } + +func emojiPerson(f *Faker) string { return getRandValue(f, []string{"emoji", "person"}) } + +// EmojiGesture will return a random gesture emoji +func EmojiGesture() string { return emojiGesture(GlobalFaker) } + +// EmojiGesture will return a random gesture emoji +func (f *Faker) EmojiGesture() string { return emojiGesture(f) } + +func emojiGesture(f *Faker) string { return getRandValue(f, []string{"emoji", "gesture"}) } + +// EmojiCostume will return a random costume/fantasy emoji +func EmojiCostume() string { return emojiCostume(GlobalFaker) } + +// EmojiCostume will return a random costume/fantasy emoji +func (f *Faker) EmojiCostume() string { return emojiCostume(f) } + +func emojiCostume(f *Faker) string { return getRandValue(f, []string{"emoji", "costume"}) } + +// EmojiSentence will return a random sentence with emojis interspersed +func EmojiSentence() string { return emojiSentence(GlobalFaker) } + +// EmojiSentence will return a random sentence with emojis interspersed +func (f *Faker) EmojiSentence() string { return emojiSentence(f) } + +func emojiSentence(f *Faker) string { + sentence, err := generate(f, getRandValue(f, []string{"emoji", "sentence"})) + if err != nil { + return "" + } + + return sentence +} + func addEmojiLookup() { AddFuncLookup("emoji", Info{ Display: "Emoji", @@ -47,28 +231,37 @@ func addEmojiLookup() { Description: "Digital symbol expressing feelings or ideas in text messages and online chats", Example: "🤣", Output: "string", + Aliases: []string{ + "emoticon symbol", + "chat icon", + "unicode pictograph", + "emotional glyph", + "digital expression", + }, + Keywords: []string{ + "symbol", "text", "message", "online", "chats", "ideas", "feelings", "digital", "reaction", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return emoji(f), nil }, }) - AddFuncLookup("emojidescription", Info{ - Display: "Emoji Description", - Category: "emoji", - Description: "Brief explanation of the meaning or emotion conveyed by an emoji", - Example: "face vomiting", - Output: "string", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return emojiDescription(f), nil - }, - }) - AddFuncLookup("emojicategory", Info{ Display: "Emoji Category", Category: "emoji", Description: "Group or classification of emojis based on their common theme or use, like 'smileys' or 'animals'", Example: "Smileys & Emotion", Output: "string", + Aliases: []string{ + "emoji group", + "emoji theme", + "emoji section", + "emoji classification", + "emoji grouping", + }, + Keywords: []string{ + "emoji", "smileys", "emotion", "animals", "theme", "classification", "set", "category", "collection", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return emojiCategory(f), nil }, @@ -80,6 +273,16 @@ func addEmojiLookup() { Description: "Alternative name or keyword used to represent a specific emoji in text or code", Example: "smile", Output: "string", + Aliases: []string{ + "emoji nickname", + "emoji shorthand", + "emoji label", + "emoji alt text", + "emoji identifier", + }, + Keywords: []string{ + "emoji", "alias", "smile", "code", "specific", "represent", "alternative", "keyword", "mapping", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return emojiAlias(f), nil }, @@ -91,8 +294,279 @@ func addEmojiLookup() { Description: "Label or keyword associated with an emoji to categorize or search for it easily", Example: "happy", Output: "string", + Aliases: []string{ + "emoji keyword", + "emoji marker", + "emoji label", + "emoji hashtag", + "emoji reference", + }, + Keywords: []string{ + "emoji", "tag", "happy", "associated", "categorize", "search", "label", "index", "metadata", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return emojiTag(f), nil }, }) + + AddFuncLookup("emojiflag", Info{ + Display: "Emoji Flag", + Category: "emoji", + Description: "Unicode symbol representing a specific country's flag", + Example: "🇺🇸", + Output: "string", + Aliases: []string{"country flag", "flag emoji", "national flag"}, + Keywords: []string{"emoji", "flag", "country", "national", "unicode", "symbol", "banner"}, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { + return emojiFlag(f), nil + }, + }) + + AddFuncLookup("emojianimal", Info{ + Display: "Emoji Animal", + Category: "emoji", + Description: "Unicode symbol representing an animal", + Example: "🐌", + Output: "string", + Aliases: []string{"animal emoji", "creature emoji", "wildlife emoji"}, + Keywords: []string{"emoji", "animal", "creature", "wildlife", "pet", "nature", "zoo", "mammal"}, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { + return emojiAnimal(f), nil + }, + }) + + AddFuncLookup("emojifood", Info{ + Display: "Emoji Food", + Category: "emoji", + Description: "Unicode symbol representing food or drink", + Example: "🍾", + Output: "string", + Aliases: []string{"food emoji", "drink emoji", "meal emoji"}, + Keywords: []string{"emoji", "food", "drink", "meal", "snack", "beverage", "cuisine", "restaurant"}, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { + return emojiFood(f), nil + }, + }) + + AddFuncLookup("emojiplant", Info{ + Display: "Emoji Plant", + Category: "emoji", + Description: "Unicode symbol representing a plant, flower, or tree", + Example: "🌻", + Output: "string", + Aliases: []string{"plant emoji", "flower emoji", "tree emoji"}, + Keywords: []string{"emoji", "plant", "flower", "tree", "nature", "botanical", "leaf", "garden"}, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { + return emojiPlant(f), nil + }, + }) + + AddFuncLookup("emojimusic", Info{ + Display: "Emoji Music", + Category: "emoji", + Description: "Unicode symbol representing music or musical instruments", + Example: "🎵", + Output: "string", + Aliases: []string{"music emoji", "instrument emoji", "audio emoji"}, + Keywords: []string{"emoji", "music", "instrument", "audio", "song", "sound", "melody"}, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { + return emojiMusic(f), nil + }, + }) + + AddFuncLookup("emojivehicle", Info{ + Display: "Emoji Vehicle", + Category: "emoji", + Description: "Unicode symbol representing vehicles or transportation", + Example: "🚗", + Output: "string", + Aliases: []string{"vehicle emoji", "transport emoji", "transportation emoji"}, + Keywords: []string{"emoji", "vehicle", "transport", "transportation", "car", "train", "plane", "boat"}, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { + return emojiVehicle(f), nil + }, + }) + + AddFuncLookup("emojisport", Info{ + Display: "Emoji Sport", + Category: "emoji", + Description: "Unicode symbol representing sports, activities, or awards", + Example: "⚽", + Output: "string", + Aliases: []string{"sport emoji", "sports emoji", "activity emoji"}, + Keywords: []string{"emoji", "sport", "activity", "game", "award", "team", "fitness", "exercise"}, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { + return emojiSport(f), nil + }, + }) + + AddFuncLookup("emojiface", Info{ + Display: "Emoji Face", + Category: "emoji", + Description: "Unicode symbol representing faces/smileys (including cat/creature faces)", + Example: "😀", + Output: "string", + Aliases: []string{"face emoji", "smiley emoji", "cat face emoji"}, + Keywords: []string{"emoji", "face", "smiley", "emotion", "expression", "cat", "creature"}, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { + return emojiFace(f), nil + }, + }) + + AddFuncLookup("emojihand", Info{ + Display: "Emoji Hand", + Category: "emoji", + Description: "Unicode symbol representing hand gestures and hand-related symbols", + Example: "👍", + Output: "string", + Aliases: []string{"hand emoji", "gesture emoji", "hand symbol"}, + Keywords: []string{"emoji", "hand", "gesture", "thumbs", "fingers", "clap", "pray"}, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { + return emojiHand(f), nil + }, + }) + + AddFuncLookup("emojiclothing", Info{ + Display: "Emoji Clothing", + Category: "emoji", + Description: "Unicode symbol representing clothing and accessories", + Example: "👗", + Output: "string", + Aliases: []string{"clothing emoji", "accessory emoji", "garment emoji", "wardrobe emoji"}, + Keywords: []string{"emoji", "clothing", "apparel", "accessory", "shoes", "hat", "jewelry"}, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { + return emojiClothing(f), nil + }, + }) + + AddFuncLookup("emojilandmark", Info{ + Display: "Emoji Landmark", + Category: "emoji", + Description: "Unicode symbol representing landmarks and notable places/buildings", + Example: "🗽", + Output: "string", + Aliases: []string{"landmark emoji", "place emoji", "building emoji"}, + Keywords: []string{"emoji", "landmark", "place", "building", "monument", "site"}, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { + return emojiLandmark(f), nil + }, + }) + + AddFuncLookup("emojielectronics", Info{ + Display: "Emoji Electronics", + Category: "emoji", + Description: "Unicode symbol representing electronic and media devices", + Example: "💻", + Output: "string", + Aliases: []string{"electronics emoji", "device emoji", "media emoji"}, + Keywords: []string{"emoji", "electronics", "device", "media", "computer", "phone", "camera"}, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { + return emojiElectronics(f), nil + }, + }) + + AddFuncLookup("emojigame", Info{ + Display: "Emoji Game", + Category: "emoji", + Description: "Unicode symbol representing games and leisure", + Example: "🎮", + Output: "string", + Aliases: []string{"game emoji", "leisure emoji", "gaming emoji", "play emoji"}, + Keywords: []string{"emoji", "game", "leisure", "cards", "dice", "puzzle", "toy"}, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { + return emojiGame(f), nil + }, + }) + + AddFuncLookup("emojitools", Info{ + Display: "Emoji Tools", + Category: "emoji", + Description: "Unicode symbol representing tools or similar equipment", + Example: "🔨", + Output: "string", + Aliases: []string{"tool emoji", "equipment emoji", "hardware emoji", "repair emoji"}, + Keywords: []string{"emoji", "tools", "equipment", "hardware", "fix", "build"}, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { + return emojiTools(f), nil + }, + }) + + AddFuncLookup("emojiweather", Info{ + Display: "Emoji Weather", + Category: "emoji", + Description: "Unicode symbol representing weather and celestial bodies", + Example: "☀️", + Output: "string", + Aliases: []string{"weather emoji", "sky emoji", "forecast emoji", "climate emoji"}, + Keywords: []string{"emoji", "weather", "sky", "celestial", "cloud", "rain", "sun"}, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { + return emojiWeather(f), nil + }, + }) + + AddFuncLookup("emojijob", Info{ + Display: "Emoji Job", + Category: "emoji", + Description: "Unicode symbol representing people in a role of employment", + Example: "🧑‍💻", + Output: "string", + Aliases: []string{"job emoji", "occupation emoji", "career emoji", "profession emoji"}, + Keywords: []string{"emoji", "job", "role", "profession", "worker", "person"}, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { + return emojiJob(f), nil + }, + }) + + AddFuncLookup("emojiperson", Info{ + Display: "Emoji Person", + Category: "emoji", + Description: "Unicode symbol representing human person variants", + Example: "👩", + Output: "string", + Aliases: []string{"person emoji", "human emoji", "adult emoji", "child emoji"}, + Keywords: []string{"emoji", "person", "human", "man", "woman", "adult", "child"}, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { + return emojiPerson(f), nil + }, + }) + + AddFuncLookup("emojigesture", Info{ + Display: "Emoji Gesture", + Category: "emoji", + Description: "Unicode symbol representing person gestures/poses", + Example: "🙋", + Output: "string", + Aliases: []string{"gesture emoji", "pose emoji", "action emoji"}, + Keywords: []string{"emoji", "gesture", "pose", "action", "person", "hand", "sign"}, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { + return emojiGesture(f), nil + }, + }) + + AddFuncLookup("emojicostume", Info{ + Display: "Emoji Costume", + Category: "emoji", + Description: "Unicode symbol representing costume/fantasy people and roles", + Example: "🦸", + Output: "string", + Aliases: []string{"costume emoji", "fantasy emoji", "role emoji"}, + Keywords: []string{"emoji", "costume", "fantasy", "superhero", "prince", "princess"}, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { + return emojiCostume(f), nil + }, + }) + + AddFuncLookup("emojisentence", Info{ + Display: "Emoji Sentence", + Category: "emoji", + Description: "Sentence with random emojis interspersed throughout", + Example: "Weekends reserve time for 🖼️ Disc 🏨 golf and day.", + Output: "string", + Aliases: []string{"sentence with emojis", "emoji text", "emoji message"}, + Keywords: []string{"emoji", "sentence", "text", "message", "interspersed", "random", "words", "expression"}, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { + return emojiSentence(f), nil + }, + }) + } diff --git a/vendor/github.com/brianvoe/gofakeit/v7/error.go b/vendor/github.com/brianvoe/gofakeit/v7/error.go index e3726e7e..ef007223 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/error.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/error.go @@ -146,96 +146,152 @@ func addErrorLookup() { Description: "Message displayed by a computer or software when a problem or mistake is encountered", Example: "syntax error", Output: "string", + Aliases: []string{ + "fault", "problem", "issue", "bug", "failure", + }, + Keywords: []string{ + "software", "computer", "crash", "exception", "warning", "alert", "diagnostic", "system", "message", "malfunction", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return err(f), nil }, }) AddFuncLookup("errorobject", Info{ - Display: "Error object word", + Display: "Error Object", Category: "error", Description: "Various categories conveying details about encountered errors", Example: "protocol", Output: "string", + Aliases: []string{ + "category", "classification", "entity", "type", "object detail", + }, + Keywords: []string{ + "protocol", "context", "identifier", "descriptor", "domain", "nature", "tag", "origin", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return errorObject(f), nil }, }) AddFuncLookup("errordatabase", Info{ - Display: "Database error", + Display: "Database Error", Category: "error", Description: "A problem or issue encountered while accessing or managing a database", Example: "sql error", Output: "string", + Aliases: []string{ + "db error", "query issue", "storage failure", "sql fault", "data access problem", + }, + Keywords: []string{ + "connection", "query", "timeout", "transaction", "integrity", "constraint", "lock", "schema", "management", "corruption", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return errorDatabase(f), nil }, }) AddFuncLookup("errorgrpc", Info{ - Display: "gRPC error", + Display: "gRPC Error", Category: "error", Description: "Communication failure in the high-performance, open-source universal RPC framework", Example: "client protocol error", Output: "string", + Aliases: []string{ + "grpc failure", "rpc error", "rpc failure", "communication fault", "transport issue", + }, + Keywords: []string{ + "protocol", "transport", "client", "server", "connection", "status", "unavailable", "timeout", "stream", "call", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return errorGRPC(f), nil }, }) AddFuncLookup("errorhttp", Info{ - Display: "HTTP error", + Display: "HTTP Error", Category: "error", - Description: "A problem with a web http request", + Description: "A problem with a web HTTP request", Example: "invalid method", Output: "string", + Aliases: []string{ + "http failure", "network error", "web problem", "request fault", "protocol issue", + }, + Keywords: []string{ + "invalid", "method", "status", "response", "request", "header", "url", "timeout", "redirect", "forbidden", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return errorHTTP(f), nil }, }) AddFuncLookup("errorhttpclient", Info{ - Display: "HTTP client error", + Display: "HTTP Client Error", Category: "error", Description: "Failure or issue occurring within a client software that sends requests to web servers", Example: "request timeout", Output: "string", + Aliases: []string{ + "client failure", "browser error", "request timeout", "frontend fault", "http client issue", + }, + Keywords: []string{ + "timeout", "request", "forbidden", "unauthorized", + "network", "connectivity", "invalid", "failure", "rejected", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return errorHTTPClient(f), nil }, }) AddFuncLookup("errorhttpserver", Info{ - Display: "HTTP server error", + Display: "HTTP Server Error", Category: "error", - Description: "Failure or issue occurring within a server software that recieves requests from clients", + Description: "Failure or issue occurring within a server software that receives requests from clients", Example: "internal server error", Output: "string", + Aliases: []string{ + "server fault", "backend error", "host issue", "service failure", "internal error", + }, + Keywords: []string{ + "unavailable", "overload", "gateway", "crash", "timeout", "backend", "processing", "failure", "503", "unexpected", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return errorHTTPServer(f), nil }, }) AddFuncLookup("errorruntime", Info{ - Display: "Runtime error", + Display: "Runtime Error", Category: "error", - Description: "Malfunction occuring during program execution, often causing abrupt termination or unexpected behavior", + Description: "Malfunction occurring during program execution, often causing abrupt termination or unexpected behavior", Example: "address out of bounds", Output: "string", + Aliases: []string{ + "execution error", "program crash", "runtime failure", "unexpected fault", "software halt", + }, + Keywords: []string{ + "execution", "segmentation", "overflow", "invalid", "null", "panic", "crash", "termination", "exception", "bug", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return errorRuntime(f), nil }, }) AddFuncLookup("errorvalidation", Info{ - Display: "Validation error", + Display: "Validation Error", Category: "error", Description: "Occurs when input data fails to meet required criteria or format specifications", Example: "missing required field", Output: "string", + Aliases: []string{ + "invalid input", "format error", "data check failure", "input rejection", "criteria mismatch", + }, + Keywords: []string{ + "missing", "required", "field", "constraint", "format", "rule", "criteria", "restriction", "validation", "check", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return errorValidation(f), nil }, }) + } diff --git a/vendor/github.com/brianvoe/gofakeit/v7/faker.go b/vendor/github.com/brianvoe/gofakeit/v7/faker.go index 2271eac4..7383bb40 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/faker.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/faker.go @@ -27,13 +27,11 @@ func New(seed uint64) *Faker { // If seed is 0, use a random crypto seed if seed == 0 { faker := NewFaker(source.NewCrypto(), false) - seed = faker.Uint64() + + return NewFaker(rand.NewPCG(faker.Uint64(), faker.Uint64()), true) } - return &Faker{ - Rand: rand.NewPCG(seed, seed), - Locked: true, - } + return NewFaker(rand.NewPCG(seed, seed), true) } // NewFaker takes in a rand.Source and thread lock state and returns a new Faker struct diff --git a/vendor/github.com/brianvoe/gofakeit/v7/file.go b/vendor/github.com/brianvoe/gofakeit/v7/file.go index ffefe863..6d5e6fc0 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/file.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/file.go @@ -23,6 +23,12 @@ func addFileLookup() { Description: "Suffix appended to a filename indicating its format or type", Example: "nes", Output: "string", + Aliases: []string{ + "extension", "file suffix", "filename ending", "type indicator", "file ending", "format suffix", + }, + Keywords: []string{ + "file", "appended", "indicating", "format", "type", "filename", "suffix", "descriptor", "notation", "identifier", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return fileExtension(f), nil }, @@ -34,6 +40,12 @@ func addFileLookup() { Description: "Defines file format and nature for browsers and email clients using standardized identifiers", Example: "application/json", Output: "string", + Aliases: []string{ + "mime type", "content type", "internet media type", "media format", "standard identifier", "file format", + }, + Keywords: []string{ + "file", "defines", "nature", "clients", "identifiers", "application", "json", "browser", "email", "protocol", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return fileMimeType(f), nil }, diff --git a/vendor/github.com/brianvoe/gofakeit/v7/finance.go b/vendor/github.com/brianvoe/gofakeit/v7/finance.go index 4e363326..dca8e57c 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/finance.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/finance.go @@ -110,6 +110,8 @@ func addFinanceLookup() { Description: "Unique identifier for securities, especially bonds, in the United States and Canada", Example: "38259P508", Output: "string", + Aliases: []string{"identifier", "bond", "security", "us", "canada", "unique"}, + Keywords: []string{"finance", "investment", "trading", "securities", "38259p508", "checksum", "validation", "market"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return cusip(f), nil }, @@ -120,6 +122,8 @@ func addFinanceLookup() { Description: "International standard code for uniquely identifying securities worldwide", Example: "CVLRQCZBXQ97", Output: "string", + Aliases: []string{"international", "securities", "identifier", "stock", "bond", "security"}, + Keywords: []string{"finance", "investment", "trading", "cvlrqczbxq97", "worldwide", "standard", "code", "global"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return isin(f), nil }, diff --git a/vendor/github.com/brianvoe/gofakeit/v7/food.go b/vendor/github.com/brianvoe/gofakeit/v7/food.go index 111dbc25..78bfd675 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/food.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/food.go @@ -93,6 +93,18 @@ func addFoodLookup() { Description: "Edible plant part, typically sweet, enjoyed as a natural snack or dessert", Example: "Peach", Output: "string", + Aliases: []string{ + "fruit item", + "natural snack", + "sweet produce", + "edible plant food", + "dessert fruit", + }, + Keywords: []string{ + "edible", "plant", "peach", + "snack", "dessert", "sweet", "natural", + "produce", "fresh", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return fruit(f), nil }, @@ -104,6 +116,18 @@ func addFoodLookup() { Description: "Edible plant or part of a plant, often used in savory cooking or salads", Example: "Amaranth Leaves", Output: "string", + Aliases: []string{ + "veggie", + "plant food", + "green produce", + "savory food", + "leafy edible", + }, + Keywords: []string{ + "greens", "produce", "amaranth", + "leaves", "cooking", "salads", "plant", + "edible", "savory", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return vegetable(f), nil }, @@ -115,6 +139,18 @@ func addFoodLookup() { Description: "First meal of the day, typically eaten in the morning", Example: "Blueberry banana happy face pancakes", Output: "string", + Aliases: []string{ + "morning meal", + "first meal", + "day starter", + "early food", + "sunrise meal", + }, + Keywords: []string{ + "morning", "meal", "start", + "pancakes", "blueberry", "banana", "food", + "first", "early", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return breakfast(f), nil }, @@ -126,6 +162,18 @@ func addFoodLookup() { Description: "Midday meal, often lighter than dinner, eaten around noon", Example: "No bake hersheys bar pie", Output: "string", + Aliases: []string{ + "midday meal", + "noon food", + "afternoon meal", + "light meal", + "daytime meal", + }, + Keywords: []string{ + "meal", "midday", "noon", + "lighter", "food", "pie", "bar", + "afternoon", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return lunch(f), nil }, @@ -137,6 +185,18 @@ func addFoodLookup() { Description: "Evening meal, typically the day's main and most substantial meal", Example: "Wild addicting dip", Output: "string", + Aliases: []string{ + "evening meal", + "main meal", + "days supper", + "night food", + "hearty meal", + }, + Keywords: []string{ + "supper", "evening", "meal", + "main", "substantial", "night", "food", + "heavy", "course", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return dinner(f), nil }, @@ -148,6 +208,18 @@ func addFoodLookup() { Description: "Liquid consumed for hydration, pleasure, or nutritional benefits", Example: "Soda", Output: "string", + Aliases: []string{ + "beverage", + "refreshment", + "hydration", + "liquid food", + "consumable fluid", + }, + Keywords: []string{ + "soda", "liquid", + "pleasure", "nutrition", "fluid", "quencher", + "consumed", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return drink(f), nil }, @@ -156,9 +228,21 @@ func addFoodLookup() { AddFuncLookup("snack", Info{ Display: "Snack", Category: "food", - Description: "Random snack", - Example: "Small, quick food item eaten between meals", + Description: "Small, quick food item eaten between meals", + Example: "Trail mix", Output: "string", + Aliases: []string{ + "light bite", + "quick food", + "mini meal", + "finger food", + "nibble", + }, + Keywords: []string{ + "between", "meals", "quick", + "small", "food", "item", "random", + "bite", "treat", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return snack(f), nil }, @@ -170,8 +254,21 @@ func addFoodLookup() { Description: "Sweet treat often enjoyed after a meal", Example: "French napoleons", Output: "string", + Aliases: []string{ + "after meal sweet", + "pastry treat", + "confection", + "final course", + "delicacy", + }, + Keywords: []string{ + "sweet", "treat", "meal", + "after", "pastry", "cake", "enjoyed", + "final", "sugar", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return dessert(f), nil }, }) + } diff --git a/vendor/github.com/brianvoe/gofakeit/v7/game.go b/vendor/github.com/brianvoe/gofakeit/v7/game.go index 0e137000..66af13d9 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/game.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/game.go @@ -69,6 +69,18 @@ func addGameLookup() { Description: "User-selected online username or alias used for identification in games", Example: "footinterpret63", Output: "string", + Aliases: []string{ + "player handle", + "gaming nickname", + "online tag", + "user alias", + "profile name", + }, + Keywords: []string{ + "user-selected", "username", + "alias", "identification", "online", "gaming", + "video", "games", "player", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return gamertag(f), nil }, @@ -80,6 +92,18 @@ func addGameLookup() { Description: "Small, cube-shaped objects used in games of chance for random outcomes", Example: "[5, 2, 3]", Output: "[]uint", + Aliases: []string{ + "rolling cubes", + "chance cubes", + "game dice", + "random rollers", + "luck blocks", + }, + Keywords: []string{ + "games", "cube-shaped", "chance", + "random", "outcomes", "roll", "sides", + "objects", "probability", + }, Params: []Param{ {Field: "numdice", Display: "Number of Dice", Type: "uint", Default: "1", Description: "Number of dice to roll"}, {Field: "sides", Display: "Number of Sides", Type: "[]uint", Default: "[6]", Description: "Number of sides on each dice"}, @@ -98,4 +122,5 @@ func addGameLookup() { return dice(f, numDice, sides), nil }, }) + } diff --git a/vendor/github.com/brianvoe/gofakeit/v7/generate.go b/vendor/github.com/brianvoe/gofakeit/v7/generate.go index 9ca091e7..358caefe 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/generate.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/generate.go @@ -47,84 +47,77 @@ func generate(f *Faker, dataVal string) (string, error) { dataVal = replaceWithLetters(f, dataVal) // Check if string has any replaceable values - // Even if it doesnt its ok we will just return the string - if !strings.Contains(dataVal, "{") && !strings.Contains(dataVal, "}") { + if !strings.Contains(dataVal, "{") { return dataVal, nil } - // Variables to identify the index in which it exists - startCurly := -1 - startCurlyIgnore := []int{} - endCurly := -1 - endCurlyIgnore := []int{} + var result strings.Builder + result.Grow(len(dataVal) * 2) // Pre-allocate with estimate - // Loop through string characters - for i := 0; i < len(dataVal); i++ { - // Check for ignores if equal skip - shouldSkip := false - for _, igs := range startCurlyIgnore { - if i == igs { - shouldSkip = true + i := 0 + for i < len(dataVal) { + // Find next opening brace + start := strings.IndexByte(dataVal[i:], '{') + if start == -1 { + // No more replacements, append rest and break + result.WriteString(dataVal[i:]) + break + } + start += i + + // Append everything before the brace + result.WriteString(dataVal[i:start]) + + // Find matching closing brace (handle nested brackets) + end := -1 + depth := 0 + for j := start; j < len(dataVal); j++ { + if dataVal[j] == '{' { + depth++ + } else if dataVal[j] == '}' { + depth-- + if depth == 0 { + end = j + break + } } } - for _, ige := range endCurlyIgnore { - if i == ige { - shouldSkip = true - } - } - if shouldSkip { - continue + + if end == -1 { + // No closing brace, append rest and break + result.WriteString(dataVal[start:]) + break } - // Identify items between brackets. Ex: {firstname} - if string(dataVal[i]) == "{" { - startCurly = i - continue - } - if startCurly != -1 && string(dataVal[i]) == "}" { - endCurly = i - } - if startCurly == -1 || endCurly == -1 { - continue - } + // Extract function name and params + fParts := dataVal[start+1 : end] + fName, fParams, _ := strings.Cut(fParts, ":") - // Get the value between brackets - fParts := dataVal[startCurly+1 : endCurly] - - // Check if has params separated by : - fNameSplit := strings.SplitN(fParts, ":", 2) - fName := "" - fParams := "" - if len(fNameSplit) >= 1 { - fName = fNameSplit[0] - } - if len(fNameSplit) >= 2 { - fParams = fNameSplit[1] - } - - // Check to see if its a replaceable lookup function + // Check if it's a replaceable lookup function if info := GetFuncLookup(fName); info != nil { - // Get parameters, make sure params and the split both have values - mapParams := NewMapParams() + // Get parameters + var mapParams *MapParams paramsLen := len(info.Params) - // If just one param and its a string simply just pass it - if paramsLen == 1 && info.Params[0].Type == "string" { - mapParams.Add(info.Params[0].Field, fParams) - } else if paramsLen > 0 && fParams != "" { - var err error - splitVals, err := funcLookupSplit(fParams) - if err != nil { - return "", err + if paramsLen > 0 && fParams != "" { + mapParams = NewMapParams() + // If just one param and its a string simply just pass it + if paramsLen == 1 && info.Params[0].Type == "string" { + mapParams.Add(info.Params[0].Field, fParams) + } else { + splitVals, err := funcLookupSplit(fParams) + if err != nil { + return "", err + } + mapParams, err = addSplitValsToMapParams(splitVals, info, mapParams) + if err != nil { + return "", err + } } - mapParams, err = addSplitValsToMapParams(splitVals, info, mapParams) - if err != nil { - return "", err + if mapParams.Size() == 0 { + mapParams = nil } } - if mapParams.Size() == 0 { - mapParams = nil - } // Call function fValue, err := info.Generate(f, mapParams, info) @@ -132,30 +125,17 @@ func generate(f *Faker, dataVal string) (string, error) { return "", err } - // Successfully found, run replace with new value - dataVal = strings.Replace(dataVal, "{"+fParts+"}", fmt.Sprintf("%v", fValue), 1) - - // Reset the curly index back to -1 and reset ignores - startCurly = -1 - startCurlyIgnore = []int{} - endCurly = -1 - endCurlyIgnore = []int{} - i = -1 // Reset back to the start of the string - continue + // Write the generated value + result.WriteString(fmt.Sprintf("%v", fValue)) + i = end + 1 + } else { + // Not a valid function, keep the braces + result.WriteString(dataVal[start : end+1]) + i = end + 1 } - - // Couldnt find anything - mark curly brackets to skip and rerun - startCurlyIgnore = append(startCurlyIgnore, startCurly) - endCurlyIgnore = append(endCurlyIgnore, endCurly) - - // Reset the curly index back to -1 - startCurly = -1 - endCurly = -1 - i = -1 // Reset back to the start of the string - continue } - return dataVal, nil + return result.String(), nil } // FixedWidthOptions defines values needed for csv generation @@ -489,6 +469,19 @@ func addGenerateLookup() { Description: "Random string generated from string value based upon available data sets", Example: "{firstname} {lastname} {email} - Markus Moen markusmoen@pagac.net", Output: "string", + Aliases: []string{ + "template expander", + "placeholder interpolator", + "variable substitution", + "token formatter", + "pattern builder", + "macro resolver", + }, + Keywords: []string{ + "upon", "datasets", "random", + "string", "value", "available", "data", + "sets", "based", + }, Params: []Param{ {Field: "str", Display: "String", Type: "string", Description: "String value to generate from"}, }, @@ -517,6 +510,12 @@ Alayna Wuckert santinostanton@carroll.biz g7sLrS0gEwLO 46 Lura Lockman zacherykuhic@feil.name S8gV7Z64KlHG 12`, Output: "[]byte", ContentType: "text/plain", + Aliases: []string{ + "fixed rows", "columnar data", "padded text", "aligned output", "structured fields", + }, + Keywords: []string{ + "tabular", "data", "format", "alignment", "columns", "rows", "layout", "monospace", "table", "presentation", + }, Params: []Param{ {Field: "rowcount", Display: "Row Count", Type: "int", Default: "10", Description: "Number of rows"}, {Field: "fields", Display: "Fields", Type: "[]Field", Description: "Fields name, function and params"}, @@ -562,6 +561,19 @@ Lura Lockman zacherykuhic@feil.name S8gV7Z64KlHG 12`, Description: "Pattern-matching tool used in text processing to search and manipulate strings", Example: "[abcdef]{5} - affec", Output: "string", + Aliases: []string{ + "regular expression", + "string matcher", + "text parser", + "pattern engine", + "token analyzer", + "rule evaluator", + }, + Keywords: []string{ + "strings", "re2", "syntax", + "pattern-matching", "tool", "search", + "validation", "compile", "replace", + }, Params: []Param{ {Field: "str", Display: "String", Type: "string", Description: "Regex RE2 syntax string"}, }, @@ -593,6 +605,19 @@ Lura Lockman zacherykuhic@feil.name S8gV7Z64KlHG 12`, }`, Output: "map[string]any", ContentType: "application/json", + Aliases: []string{ + "associative array", + "lookup table", + "symbol table", + "keyed collection", + "map structure", + "object store", + }, + Keywords: []string{ + "stores", "key", "value", + "dictionary", "hash", "collection", + "pairs", "keys", "values", "structure", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return mapFunc(f), nil }, diff --git a/vendor/github.com/brianvoe/gofakeit/v7/hacker.go b/vendor/github.com/brianvoe/gofakeit/v7/hacker.go index 22a78eb8..19143325 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/hacker.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/hacker.go @@ -75,6 +75,13 @@ func addHackerLookup() { Description: "Informal jargon and slang used in the hacking and cybersecurity community", Example: "If we calculate the program, we can get to the AI pixel through the redundant XSS matrix!", Output: "string", + Aliases: []string{ + "hacker jargon", "cyber phrase", "security slang", "tech quip", "infosec phrase", + }, + Keywords: []string{ + "phrase", "jargon", "slang", "informal", "community", + "calculate", "program", "ai", "pixel", "redundant", "xss", "matrix", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return hackerPhrase(f), nil }, @@ -86,6 +93,13 @@ func addHackerLookup() { Description: "Abbreviations and acronyms commonly used in the hacking and cybersecurity community", Example: "ADP", Output: "string", + Aliases: []string{ + "infosec acronym", "tech abbreviation", "security acronym", "cyber acronym", "hacker shorthand", + }, + Keywords: []string{ + "abbreviation", "acronym", "short", "code", "initialism", + "common", "used", "security", "community", "terminology", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return hackerAbbreviation(f), nil }, @@ -97,6 +111,13 @@ func addHackerLookup() { Description: "Adjectives describing terms often associated with hackers and cybersecurity experts", Example: "wireless", Output: "string", + Aliases: []string{ + "hacker descriptor", "cyber adjective", "infosec modifier", "security adjective", "tech describing word", + }, + Keywords: []string{ + "adjective", "descriptive", "term", "modifier", "attribute", + "wireless", "connected", "digital", "virtual", "networked", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return hackerAdjective(f), nil }, @@ -108,6 +129,13 @@ func addHackerLookup() { Description: "Noun representing an element, tool, or concept within the realm of hacking and cybersecurity", Example: "driver", Output: "string", + Aliases: []string{ + "hacking tool", "cyber noun", "security concept", "tech object", "infosec element", + }, + Keywords: []string{ + "noun", "element", "tool", "concept", "object", + "driver", "exploit", "payload", "virus", "device", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return hackerNoun(f), nil }, @@ -119,6 +147,13 @@ func addHackerLookup() { Description: "Verbs associated with actions and activities in the field of hacking and cybersecurity", Example: "synthesize", Output: "string", + Aliases: []string{ + "hacking verb", "cyber action", "infosec verb", "tech activity", "security verb", + }, + Keywords: []string{ + "verb", "action", "activity", "task", "operation", + "synthesize", "exploit", "inject", "bypass", "scan", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return hackerVerb(f), nil }, @@ -130,6 +165,13 @@ func addHackerLookup() { Description: "Verb describing actions and activities related to hacking, often involving computer systems and security", Example: "connecting", Output: "string", + Aliases: []string{ + "hacking action", "present participle", "cyber verb", "infosec activity", "progressive verb", + }, + Keywords: []string{ + "verb", "ing", "connecting", "probing", "listening", + "systems", "process", "computer", "security", "operation", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return hackeringVerb(f), nil }, diff --git a/vendor/github.com/brianvoe/gofakeit/v7/hipster.go b/vendor/github.com/brianvoe/gofakeit/v7/hipster.go index 8cf7cba9..0c6de6d8 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/hipster.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/hipster.go @@ -1,8 +1,6 @@ package gofakeit -import ( - "errors" -) +import "strings" // HipsterWord will return a single hipster word func HipsterWord() string { return hipsterWord(GlobalFaker) } @@ -12,36 +10,43 @@ func (f *Faker) HipsterWord() string { return hipsterWord(f) } func hipsterWord(f *Faker) string { return getRandValue(f, []string{"hipster", "word"}) } -// HipsterSentence will generate a random sentence -func HipsterSentence(wordCount int) string { return hipsterSentence(GlobalFaker, wordCount) } +// HipsterSentence will generate a random hipster sentence +func HipsterSentence() string { return hipsterSentence(GlobalFaker) } -// HipsterSentence will generate a random sentence -func (f *Faker) HipsterSentence(wordCount int) string { return hipsterSentence(f, wordCount) } +// HipsterSentence will generate a random hipster sentence +func (f *Faker) HipsterSentence() string { return hipsterSentence(f) } -func hipsterSentence(f *Faker, wordCount int) string { - return sentenceGen(f, wordCount, hipsterWord) +func hipsterSentence(f *Faker) string { + sentence, err := generate(f, getRandValue(f, []string{"hipster", "sentence"})) + if err != nil { + return "" + } + + // Capitalize the first letter + sentence = strings.ToUpper(sentence[:1]) + sentence[1:] + + return sentence } -// HipsterParagraph will generate a random paragraphGenerator -// Set Paragraph Count -// Set Sentence Count -// Set Word Count -// Set Paragraph Separator -func HipsterParagraph(paragraphCount int, sentenceCount int, wordCount int, separator string) string { - return hipsterParagraph(GlobalFaker, paragraphCount, sentenceCount, wordCount, separator) +// HipsterParagraph will generate a random hipster paragraph +func HipsterParagraph() string { + return hipsterParagraph(GlobalFaker) } -// HipsterParagraph will generate a random paragraphGenerator -// Set Paragraph Count -// Set Sentence Count -// Set Word Count -// Set Paragraph Separator -func (f *Faker) HipsterParagraph(paragraphCount int, sentenceCount int, wordCount int, separator string) string { - return hipsterParagraph(f, paragraphCount, sentenceCount, wordCount, separator) +// HipsterParagraph will generate a random hipster paragraph +func (f *Faker) HipsterParagraph() string { + return hipsterParagraph(f) } -func hipsterParagraph(f *Faker, paragraphCount int, sentenceCount int, wordCount int, separator string) string { - return paragraphGen(f, paragrapOptions{paragraphCount, sentenceCount, wordCount, separator}, hipsterSentence) +func hipsterParagraph(f *Faker) string { + // generate 2-5 sentences + sentenceCount := f.Number(2, 5) + sentences := make([]string, sentenceCount) + for i := 0; i < sentenceCount; i++ { + sentences[i] = hipsterSentence(f) + } + + return strings.Join(sentences, " ") } func addHipsterLookup() { @@ -51,6 +56,8 @@ func addHipsterLookup() { Description: "Trendy and unconventional vocabulary used by hipsters to express unique cultural preferences", Example: "microdosing", Output: "string", + Aliases: []string{"word", "trendy", "unconventional", "vocabulary", "culture", "modern"}, + Keywords: []string{"hipster", "preferences", "microdosing", "artisanal", "craft", "organic", "sustainable", "authentic"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return hipsterWord(f), nil }, @@ -60,21 +67,12 @@ func addHipsterLookup() { Display: "Hipster Sentence", Category: "hipster", Description: "Sentence showcasing the use of trendy and unconventional vocabulary associated with hipster culture", - Example: "Microdosing roof chia echo pickled.", + Example: "Soul loops with you probably haven't heard of them undertones.", Output: "string", - Params: []Param{ - {Field: "wordcount", Display: "Word Count", Type: "int", Default: "5", Description: "Number of words in a sentence"}, - }, + Aliases: []string{"sentence", "trendy", "unconventional", "vocabulary", "culture", "modern"}, + Keywords: []string{"hipster", "showcasing", "microdosing", "roof", "chia", "echo", "pickled", "artisanal"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - wordCount, err := info.GetInt(m, "wordcount") - if err != nil { - return nil, err - } - if wordCount <= 0 || wordCount > 50 { - return nil, errors.New("invalid word count, must be greater than 0, less than 50") - } - - return hipsterSentence(f, wordCount), nil + return hipsterSentence(f), nil }, }) @@ -82,49 +80,12 @@ func addHipsterLookup() { Display: "Hipster Paragraph", Category: "hipster", Description: "Paragraph showcasing the use of trendy and unconventional vocabulary associated with hipster culture", - Example: `Microdosing roof chia echo pickled meditation cold-pressed raw denim fingerstache normcore sriracha pork belly. Wolf try-hard pop-up blog tilde hashtag health butcher waistcoat paleo portland vinegar. Microdosing sartorial blue bottle slow-carb freegan five dollar toast you probably haven't heard of them asymmetrical chia farm-to-table narwhal banjo. Gluten-free blog authentic literally synth vinyl meh ethical health fixie banh mi Yuccie. Try-hard drinking squid seitan cray VHS echo chillwave hammock kombucha food truck sustainable. - -Pug bushwick hella tote bag cliche direct trade waistcoat yr waistcoat knausgaard pour-over master. Pitchfork jean shorts franzen flexitarian distillery hella meggings austin knausgaard crucifix wolf heirloom. Crucifix food truck you probably haven't heard of them trust fund fixie gentrify pitchfork stumptown mlkshk umami chambray blue bottle. 3 wolf moon swag +1 biodiesel knausgaard semiotics taxidermy meh artisan hoodie +1 blue bottle. Fashion axe forage mixtape Thundercats pork belly whatever 90's beard selfies chambray cred mlkshk. - -Shabby chic typewriter VHS readymade lo-fi bitters PBR&B gentrify lomo raw denim freegan put a bird on it. Raw denim cliche dreamcatcher pug fixie park trust fund migas fingerstache sriracha +1 mustache. Tilde shoreditch kickstarter franzen dreamcatcher green juice mustache neutra polaroid stumptown organic schlitz. Flexitarian ramps chicharrones kogi lo-fi mustache tilde forage street church-key williamsburg taxidermy. Chia mustache plaid mumblecore squid slow-carb disrupt Thundercats goth shoreditch master direct trade.`, - Output: "string", - Params: []Param{ - {Field: "paragraphcount", Display: "Paragraph Count", Type: "int", Default: "2", Description: "Number of paragraphs"}, - {Field: "sentencecount", Display: "Sentence Count", Type: "int", Default: "2", Description: "Number of sentences in a paragraph"}, - {Field: "wordcount", Display: "Word Count", Type: "int", Default: "5", Description: "Number of words in a sentence"}, - {Field: "paragraphseparator", Display: "Paragraph Separator", Type: "string", Default: "
", Description: "String value to add between paragraphs"}, - }, + Example: "Single-origin austin, double why. Tag it Yuccie, keep it any. Ironically pug, sincerely several. Roof > helvetica, discuss. From France to Jersey, chasing ennui.", + Output: "string", + Aliases: []string{"paragraph", "trendy", "unconventional", "vocabulary", "culture", "modern"}, + Keywords: []string{"hipster", "showcasing", "meditation", "cold-pressed", "raw", "denim", "fingerstache", "normcore", "sriracha"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - paragraphCount, err := info.GetInt(m, "paragraphcount") - if err != nil { - return nil, err - } - if paragraphCount <= 0 || paragraphCount > 20 { - return nil, errors.New("invalid paragraph count, must be greater than 0, less than 20") - } - - sentenceCount, err := info.GetInt(m, "sentencecount") - if err != nil { - return nil, err - } - if sentenceCount <= 0 || sentenceCount > 20 { - return nil, errors.New("invalid sentence count, must be greater than 0, less than 20") - } - - wordCount, err := info.GetInt(m, "wordcount") - if err != nil { - return nil, err - } - if wordCount <= 0 || wordCount > 50 { - return nil, errors.New("invalid word count, must be greater than 0, less than 50") - } - - paragraphSeparator, err := info.GetString(m, "paragraphseparator") - if err != nil { - return nil, err - } - - return hipsterParagraph(f, paragraphCount, sentenceCount, wordCount, paragraphSeparator), nil + return hipsterParagraph(f), nil }, }) } diff --git a/vendor/github.com/brianvoe/gofakeit/v7/html.go b/vendor/github.com/brianvoe/gofakeit/v7/html.go index 27385ecd..82f5f24f 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/html.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/html.go @@ -99,6 +99,12 @@ func addHtmlLookup() { Description: "Attribute used to define the name of an input element in web forms", Example: "first_name", Output: "string", + Aliases: []string{ + "form field", "field name", "html input", "input identifier", "web attribute", + }, + Keywords: []string{ + "define", "attribute", "element", "parameter", "submission", "mapping", "key", "entry", "binding", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return inputName(f), nil }, @@ -114,6 +120,13 @@ func addHtmlLookup() { `, Output: "string", ContentType: "image/svg+xml", + Aliases: []string{ + "vector graphic", "xml image", "scalable format", "web graphic", "svg file", + }, + Keywords: []string{ + "scalable", "vector", "graphics", "image", "drawing", "markup", "shape", "color", "path", "render", + }, + Params: []Param{ {Field: "width", Display: "Width", Type: "int", Default: "500", Description: "Width in px"}, {Field: "height", Display: "Height", Type: "int", Default: "500", Description: "Height in px"}, diff --git a/vendor/github.com/brianvoe/gofakeit/v7/id.go b/vendor/github.com/brianvoe/gofakeit/v7/id.go new file mode 100644 index 00000000..db04c0f5 --- /dev/null +++ b/vendor/github.com/brianvoe/gofakeit/v7/id.go @@ -0,0 +1,127 @@ +package gofakeit + +const ( + idLength = 20 + idBitsPerChar = 5 + idAlphabetMask = (1 << idBitsPerChar) - 1 + // readable 32 chars, (no 0, o, 1, i, l) + // 0 and o are removed to avoid confusion with each other + // 1, i, l are removed to avoid confusion with each other + // extra g was added to fit 32 chars + idAlphabetStr = "23456789abcdefgghjkmnpqrstuvwxyz" + hexDigits = "0123456789abcdef" +) + +var ( + idAlphabet = []byte(idAlphabetStr) +) + +// ID will return a random unique identifier +func ID() string { return id(GlobalFaker) } + +// ID will return a random unique identifier +func (f *Faker) ID() string { return id(f) } + +func id(f *Faker) string { + out := make([]byte, idLength) + + var cache uint64 + var bits uint + + for i := 0; i < idLength; { + if bits < idBitsPerChar { + cache = f.Uint64() + bits = 64 + } + + index := cache & idAlphabetMask + cache >>= idBitsPerChar + bits -= idBitsPerChar + + // optimization: remove this check to avoid bounds check + // if index >= uint64(idAlphabetLen) { + // continue + // } + + out[i] = idAlphabet[index] + i++ + } + + return string(out) +} + +// UUID (version 4) will generate a random unique identifier based upon random numbers +func UUID() string { return uuid(GlobalFaker) } + +// UUID (version 4) will generate a random unique identifier based upon random numbers +func (f *Faker) UUID() string { return uuid(f) } + +func uuid(f *Faker) string { + const version = byte(4) + + var uuid [16]byte + var r uint64 + + r = f.Uint64() + for i := 0; i < 8; i++ { + uuid[i] = byte(r) + r >>= 8 + } + + r = f.Uint64() + for i := 8; i < 16; i++ { + uuid[i] = byte(r) + r >>= 8 + } + + uuid[6] = (uuid[6] & 0x0f) | (version << 4) + uuid[8] = (uuid[8] & 0x3f) | 0x80 + + var buf [36]byte + encodeHexLower(buf[0:8], uuid[0:4]) + buf[8] = dash + encodeHexLower(buf[9:13], uuid[4:6]) + buf[13] = dash + encodeHexLower(buf[14:18], uuid[6:8]) + buf[18] = dash + encodeHexLower(buf[19:23], uuid[8:10]) + buf[23] = dash + encodeHexLower(buf[24:], uuid[10:]) + + return string(buf[:]) +} + +func encodeHexLower(dst, src []byte) { + for i, b := range src { + dst[i*2] = hexDigits[b>>4] + dst[i*2+1] = hexDigits[b&0x0f] + } +} + +func addIDLookup() { + AddFuncLookup("id", Info{ + Display: "ID", + Category: "id", + Description: "Generates a short, URL-safe base32 identifier using a custom alphabet that avoids lookalike characters", + Example: "pfsfktb87rcmj6bqha2fz9", + Output: "string", + Aliases: []string{"unique id", "random id", "base32 id", "url-safe id", "slug id", "short id"}, + Keywords: []string{"random", "base32", "slug", "token", "url", "identifier", "nonsequential"}, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { + return id(f), nil + }, + }) + + AddFuncLookup("uuid", Info{ + Display: "UUID", + Category: "id", + Description: "Generates a RFC 4122 compliant version 4 UUID using the faker random source", + Example: "b4ddf623-4ea6-48e5-9292-541f028d1fdb", + Output: "string", + Aliases: []string{"identifier", "guid", "uuid v4", "128-bit", "uuid generator"}, + Keywords: []string{"unique", "v4", "hex", "computer", "system", "random"}, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { + return uuid(f), nil + }, + }) +} diff --git a/vendor/github.com/brianvoe/gofakeit/v7/image.go b/vendor/github.com/brianvoe/gofakeit/v7/image.go index 9a04bcd6..d20f227d 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/image.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/image.go @@ -63,6 +63,8 @@ func addImageLookup() { Example: "file.jpeg - bytes", Output: "[]byte", ContentType: "image/jpeg", + Aliases: []string{"jpeg", "jpg", "format", "compression", "compatibility", "photo"}, + Keywords: []string{"image", "efficient", "file", "bytes", "known", "rgba", "pixel", "width", "height"}, Params: []Param{ {Field: "width", Display: "Width", Type: "int", Default: "500", Description: "Image width in px"}, {Field: "height", Display: "Height", Type: "int", Default: "500", Description: "Image height in px"}, @@ -95,6 +97,8 @@ func addImageLookup() { Example: "file.png - bytes", Output: "[]byte", ContentType: "image/png", + Aliases: []string{"png", "format", "lossless", "compression", "transparency", "graphic"}, + Keywords: []string{"image", "support", "file", "bytes", "known", "rgba", "pixel", "width", "height"}, Params: []Param{ {Field: "width", Display: "Width", Type: "int", Default: "500", Description: "Image width in px"}, {Field: "height", Display: "Height", Type: "int", Default: "500", Description: "Image height in px"}, diff --git a/vendor/github.com/brianvoe/gofakeit/v7/internet.go b/vendor/github.com/brianvoe/gofakeit/v7/internet.go index 68cf8580..e9fdd633 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/internet.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/internet.go @@ -53,6 +53,25 @@ func url(f *Faker) string { return url } +// UrlSlug will generate a random url slug with the specified number of words +func UrlSlug(words int) string { return urlSlug(GlobalFaker, words) } + +// UrlSlug will generate a random url slug with the specified number of words +func (f *Faker) UrlSlug(words int) string { return urlSlug(f, words) } + +func urlSlug(f *Faker, words int) string { + if words <= 0 { + words = 3 + } + + slug := make([]string, words) + for i := 0; i < words; i++ { + slug[i] = strings.ToLower(word(f)) + } + + return strings.Join(slug, "-") +} + // HTTPMethod will generate a random http method func HTTPMethod() string { return httpMethod(GlobalFaker) } @@ -225,6 +244,18 @@ func operaUserAgent(f *Faker) string { return "Opera/" + strconv.Itoa(randIntRange(f, 8, 10)) + "." + strconv.Itoa(randIntRange(f, 10, 99)) + " " + platform } +// APIUserAgent will generate a random API user agent string +func APIUserAgent() string { return apiUserAgent(GlobalFaker) } + +// APIUserAgent will generate a random API user agent string +func (f *Faker) APIUserAgent() string { return apiUserAgent(f) } + +func apiUserAgent(f *Faker) string { + ua := getRandValue(f, []string{"internet", "api"}) + result, _ := generate(f, ua) + return result +} + // linuxPlatformToken will generate a random linux platform func linuxPlatformToken(f *Faker) string { return "X11; Linux " + getRandValue(f, []string{"computer", "linux_processor"}) @@ -268,17 +299,45 @@ func addInternetLookup() { Description: "Web address that specifies the location of a resource on the internet", Example: "http://www.principalproductize.biz/target", Output: "string", + Aliases: []string{"url string", "web address", "internet link", "website url", "resource locator"}, + Keywords: []string{"web", "address", "http", "https", "www", "protocol", "scheme", "path", "domain", "location", "resource"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return url(f), nil }, }) + AddFuncLookup("urlslug", Info{ + Display: "URL Slug", + Category: "internet", + Description: "Simplified and URL-friendly version of a string, typically used in web addresses", + Example: "bathe-regularly-quiver", + Output: "string", + Aliases: []string{"slug", "url path", "permalink", "friendly url"}, + Keywords: []string{"url", "path", "hyphen", "dash", "seo", "friendly", "web", "address", "kebab", "separator"}, + Params: []Param{ + {Field: "words", Display: "Words", Type: "int", Default: "3", Description: "Number of words in the slug"}, + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { + words, err := info.GetInt(m, "words") + if err != nil { + return nil, err + } + if words <= 0 { + words = 3 + } + + return urlSlug(f, words), nil + }, + }) + AddFuncLookup("domainname", Info{ Display: "Domain Name", Category: "internet", Description: "Human-readable web address used to identify websites on the internet", Example: "centraltarget.biz", Output: "string", + Aliases: []string{"domain name", "website name", "internet domain", "dns name", "site domain"}, + Keywords: []string{"domain", "name", "web", "address", "dns", "hostname", "resolve", "centraltarget", "biz", "website"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return domainName(f), nil }, @@ -290,6 +349,8 @@ func addInternetLookup() { Description: "The part of a domain name that comes after the last dot, indicating its type or purpose", Example: "org", Output: "string", + Aliases: []string{"domain suffix", "domain extension", "top level domain", "domain ending"}, + Keywords: []string{"domain", "suffix", "tld", "top-level", "extension", "org", "com", "net", "gov", "edu", "mil", "int"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return domainSuffix(f), nil }, @@ -301,6 +362,8 @@ func addInternetLookup() { Description: "Numerical label assigned to devices on a network for identification and communication", Example: "222.83.191.222", Output: "string", + Aliases: []string{"ip address", "network address", "internet address", "device ip", "ipv4 label"}, + Keywords: []string{"ipv4", "ip", "network", "internet", "protocol", "communication", "dotted", "decimal"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return ipv4Address(f), nil }, @@ -312,6 +375,8 @@ func addInternetLookup() { Description: "Numerical label assigned to devices on a network, providing a larger address space than IPv4 for internet communication", Example: "2001:cafe:8898:ee17:bc35:9064:5866:d019", Output: "string", + Aliases: []string{"ip address", "network address", "internet address", "hex ip", "ipv6 label"}, + Keywords: []string{"ipv6", "ip", "network", "protocol", "hexadecimal", "identification"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return ipv6Address(f), nil }, @@ -323,6 +388,8 @@ func addInternetLookup() { Description: "Verb used in HTTP requests to specify the desired action to be performed on a resource", Example: "HEAD", Output: "string", + Aliases: []string{"http verb", "http action", "http request", "http command", "method name"}, + Keywords: []string{"http", "method", "verb", "get", "post", "put", "delete", "patch", "options", "head", "request", "action"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return httpMethod(f), nil }, @@ -334,6 +401,8 @@ func addInternetLookup() { Description: "Classification used in logging to indicate the severity or priority of a log entry", Example: "error", Output: "string", + Aliases: []string{"log severity", "logging level", "log classification", "priority level", "event level"}, + Keywords: []string{"log", "level", "severity", "priority", "classification", "error", "warn", "info", "debug", "trace", "fatal", "critical"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return logLevel(f, ""), nil }, @@ -345,6 +414,8 @@ func addInternetLookup() { Description: "String sent by a web browser to identify itself when requesting web content", Example: "Mozilla/5.0 (Windows NT 5.0) AppleWebKit/5362 (KHTML, like Gecko) Chrome/37.0.834.0 Mobile Safari/5362", Output: "string", + Aliases: []string{"ua string", "browser ua", "http user agent", "client identifier", "browser identifier"}, + Keywords: []string{"useragent", "browser", "http", "request", "mozilla", "applewebkit", "chrome", "firefox", "safari", "opera", "mobile"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return userAgent(f), nil }, @@ -356,6 +427,8 @@ func addInternetLookup() { Description: "The specific identification string sent by the Google Chrome web browser when making requests on the internet", Example: "Mozilla/5.0 (X11; Linux i686) AppleWebKit/5312 (KHTML, like Gecko) Chrome/39.0.836.0 Mobile Safari/5312", Output: "string", + Aliases: []string{"chrome ua", "chrome browser ua", "google chrome ua", "chrome identifier", "chrome user agent"}, + Keywords: []string{"chrome", "google", "browser", "ua", "useragent", "applewebkit", "khtml", "gecko", "safari", "version"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return chromeUserAgent(f), nil }, @@ -367,6 +440,8 @@ func addInternetLookup() { Description: "The specific identification string sent by the Firefox web browser when making requests on the internet", Example: "Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10_8_3 rv:7.0) Gecko/1900-07-01 Firefox/37.0", Output: "string", + Aliases: []string{"firefox ua", "firefox browser ua", "mozilla firefox ua", "gecko ua", "firefox identifier"}, + Keywords: []string{"firefox", "mozilla", "browser", "ua", "useragent", "gecko", "macintosh", "ppc", "version"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return firefoxUserAgent(f), nil }, @@ -378,6 +453,8 @@ func addInternetLookup() { Description: "The specific identification string sent by the Opera web browser when making requests on the internet", Example: "Opera/8.39 (Macintosh; U; PPC Mac OS X 10_8_7; en-US) Presto/2.9.335 Version/10.00", Output: "string", + Aliases: []string{"opera ua", "opera browser ua", "opera identifier", "opera client", "opera user agent"}, + Keywords: []string{"opera", "presto", "ua", "browser", "useragent", "macintosh", "ppc", "os", "version"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return operaUserAgent(f), nil }, @@ -389,17 +466,34 @@ func addInternetLookup() { Description: "The specific identification string sent by the Safari web browser when making requests on the internet", Example: "Mozilla/5.0 (iPad; CPU OS 8_3_2 like Mac OS X; en-US) AppleWebKit/531.15.6 (KHTML, like Gecko) Version/4.0.5 Mobile/8B120 Safari/6531.15.6", Output: "string", + Aliases: []string{"safari ua", "apple safari ua", "safari browser ua", "safari identifier", "safari user agent"}, + Keywords: []string{"safari", "apple", "ipad", "os", "applewebkit", "khtml", "gecko", "browser", "ua", "mobile"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return safariUserAgent(f), nil }, }) + AddFuncLookup("apiuseragent", Info{ + Display: "API User Agent", + Category: "internet", + Description: "String sent by API clients, tools, or libraries to identify themselves when making HTTP requests", + Example: "curl/8.2.5", + Output: "string", + Aliases: []string{"api ua", "api client ua", "http client ua", "api identifier", "client user agent"}, + Keywords: []string{"api", "useragent", "client", "http", "request", "curl", "python", "go", "java", "node", "postman", "tool", "library"}, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { + return apiUserAgent(f), nil + }, + }) + AddFuncLookup("httpstatuscode", Info{ Display: "HTTP Status Code", Category: "internet", - Description: "Random http status code", + Description: "Random HTTP status code", Example: "200", Output: "int", + Aliases: []string{"http status", "response code", "http response", "server status", "status identifier"}, + Keywords: []string{"http", "status", "code", "server", "response", "200", "404", "500", "301", "302", "401", "403"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return httpStatusCode(f), nil }, @@ -411,6 +505,8 @@ func addInternetLookup() { Description: "Three-digit number returned by a web server to indicate the outcome of an HTTP request", Example: "404", Output: "int", + Aliases: []string{"http status simple", "simple response code", "http response simple", "status code", "server code"}, + Keywords: []string{"http", "status", "code", "server", "response", "200", "404", "500", "301", "302", "401", "403"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return httpStatusCodeSimple(f), nil }, @@ -422,6 +518,8 @@ func addInternetLookup() { Description: "Number indicating the version of the HTTP protocol used for communication between a client and a server", Example: "HTTP/1.1", Output: "string", + Aliases: []string{"http version", "protocol version", "http protocol", "http identifier", "http version string"}, + Keywords: []string{"http", "version", "protocol", "communication", "client", "server"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return httpVersion(f), nil }, @@ -433,8 +531,11 @@ func addInternetLookup() { Description: "Unique identifier assigned to network interfaces, often used in Ethernet networks", Example: "cb:ce:06:94:22:e9", Output: "string", + Aliases: []string{"mac address", "hardware address", "ethernet address", "network identifier", "link-layer address"}, + Keywords: []string{"mac", "address", "hardware", "ethernet", "network", "identifier", "oui", "vendor", "colon", "hexadecimal"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return macAddress(f), nil }, }) + } diff --git a/vendor/github.com/brianvoe/gofakeit/v7/json.go b/vendor/github.com/brianvoe/gofakeit/v7/json.go index 858e5233..fd21f9d3 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/json.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/json.go @@ -79,7 +79,7 @@ func jsonFunc(f *Faker, jo *JSONOptions) ([]byte, error) { return nil, errors.New("invalid type, must be array or object") } - if jo.Fields == nil || len(jo.Fields) <= 0 { + if len(jo.Fields) <= 0 { return nil, errors.New("must pass fields in order to build json object(s)") } @@ -201,6 +201,8 @@ func addFileJSONLookup() { ]`, Output: "[]byte", ContentType: "application/json", + Aliases: []string{"data", "interchange", "structured", "format", "serialization", "api"}, + Keywords: []string{"object", "array", "fields", "indent", "rowcount", "type", "serialize", "deserialize", "marshal", "unmarshal"}, Params: []Param{ {Field: "type", Display: "Type", Type: "string", Default: "object", Options: []string{"object", "array"}, Description: "Type of JSON, object or array"}, {Field: "rowcount", Display: "Row Count", Type: "int", Default: "100", Description: "Number of rows in JSON array"}, diff --git a/vendor/github.com/brianvoe/gofakeit/v7/languages.go b/vendor/github.com/brianvoe/gofakeit/v7/languages.go index 5ac0db80..ace4f84a 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/languages.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/languages.go @@ -41,6 +41,8 @@ func addLanguagesLookup() { Description: "System of communication using symbols, words, and grammar to convey meaning between individuals", Example: "Kazakh", Output: "string", + Aliases: []string{"spoken tongue", "dialect name", "native language", "speech form", "linguistic system"}, + Keywords: []string{"communication", "symbols", "words", "grammar", "meaning", "system", "convey", "individuals"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return language(f), nil }, @@ -52,6 +54,8 @@ func addLanguagesLookup() { Description: "Shortened form of a language's name", Example: "kk", Output: "string", + Aliases: []string{"language code", "iso code", "locale code", "short form", "abbreviated tag"}, + Keywords: []string{"abbreviation", "identifier", "shortened", "representation", "two-letter", "three-letter", "standard", "locale"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return languageAbbreviation(f), nil }, @@ -63,6 +67,8 @@ func addLanguagesLookup() { Description: "Set of guidelines and standards for identifying and representing languages in computing and internet protocols", Example: "en-US", Output: "string", + Aliases: []string{"bcp47 tag", "language tag", "locale identifier", "regional code", "protocol language code"}, + Keywords: []string{"guidelines", "standards", "rfc", "internet", "protocols", "representation", "locale", "region", "country"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return languageBCP(f), nil }, @@ -74,6 +80,8 @@ func addLanguagesLookup() { Description: "Formal system of instructions used to create software and perform computational tasks", Example: "Go", Output: "string", + Aliases: []string{"coding language", "scripting language", "software language", "development language", "computer language"}, + Keywords: []string{"programming", "instructions", "formal", "system", "tasks", "development", "compilation", "execution"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return programmingLanguage(f), nil }, diff --git a/vendor/github.com/brianvoe/gofakeit/v7/lookup.go b/vendor/github.com/brianvoe/gofakeit/v7/lookup.go index 51dda64c..5468bff0 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/lookup.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/lookup.go @@ -11,7 +11,7 @@ import ( // FuncLookups is the primary map array with mapping to all available data var FuncLookups map[string]Info -var lockFuncLookups sync.Mutex +var lockFuncLookups sync.RWMutex // MapParams is the values to pass into a lookup generate type MapParams map[string]MapParamsValue @@ -20,15 +20,17 @@ type MapParamsValue []string // Info structures fields to better break down what each one generates type Info struct { - Display string `json:"display"` - Category string `json:"category"` - Description string `json:"description"` - Example string `json:"example"` - Output string `json:"output"` - ContentType string `json:"content_type"` - Params []Param `json:"params"` - Any any `json:"any"` - Generate func(f *Faker, m *MapParams, info *Info) (any, error) `json:"-"` + Display string `json:"display"` // display name + Category string `json:"category"` // category + Description string `json:"description"` // description + Example string `json:"example"` // example + Output string `json:"output"` // output type + Aliases []string `json:"aliases"` // alt names users might type + Keywords []string `json:"keywords"` // free words and domain terms + ContentType string `json:"content_type"` // content type + Params []Param `json:"params"` // params + Any any `json:"any"` // any + Generate func(f *Faker, m *MapParams, info *Info) (any, error) `json:"-"` // generate function } // Param is a breakdown of param requirements and type definition @@ -54,6 +56,7 @@ func init() { initLookup() } // init will add all the functions to MapLookups func initLookup() { addAddressLookup() + addAirlineLookup() addAnimalLookup() addAppLookup() addAuthLookup() @@ -78,10 +81,10 @@ func initLookup() { addHackerLookup() addHipsterLookup() addHtmlLookup() + addIDLookup() addImageLookup() addInternetLookup() addLanguagesLookup() - addLoremLookup() addMinecraftLookup() addMiscLookup() addMovieLookup() @@ -90,6 +93,7 @@ func initLookup() { addPersonLookup() addProductLookup() addSchoolLookup() + addSongLookup() addStringLookup() addTemplateLookup() addWeightedLookup() @@ -97,15 +101,12 @@ func initLookup() { addWordAdverbLookup() addWordConnectiveLookup() addWordGeneralLookup() - addWordGrammerLookup() addWordNounLookup() - addWordPhraseLookup() addWordPrepositionLookup() addWordPronounLookup() - addWordSentenceLookup() addWordVerbLookup() - addWordCommentLookup() addWordMiscLookup() + addTextLookup() } // internalFuncLookups is the internal map array with mapping to all available data @@ -192,6 +193,9 @@ func (m *MapParamsValue) UnmarshalJSON(data []byte) error { } func GetRandomSimpleFunc(f *Faker) (string, Info) { + lockFuncLookups.RLock() + defer lockFuncLookups.RUnlock() + // Loop through all the functions and add them to a slice var keys []string for k, info := range FuncLookups { @@ -210,18 +214,19 @@ func GetRandomSimpleFunc(f *Faker) (string, Info) { // AddFuncLookup takes a field and adds it to map func AddFuncLookup(functionName string, info Info) { - if FuncLookups == nil { - FuncLookups = make(map[string]Info) - } - // Check content type if info.ContentType == "" { info.ContentType = "text/plain" } lockFuncLookups.Lock() + defer lockFuncLookups.Unlock() + + if FuncLookups == nil { + FuncLookups = make(map[string]Info) + } + FuncLookups[functionName] = info - lockFuncLookups.Unlock() } // GetFuncLookup will lookup @@ -235,7 +240,10 @@ func GetFuncLookup(functionName string) *Info { return &info } + lockFuncLookups.RLock() info, ok = FuncLookups[functionName] + lockFuncLookups.RUnlock() + if ok { return &info } @@ -245,14 +253,15 @@ func GetFuncLookup(functionName string) *Info { // RemoveFuncLookup will remove a function from lookup func RemoveFuncLookup(functionName string) { + lockFuncLookups.Lock() + defer lockFuncLookups.Unlock() + _, ok := FuncLookups[functionName] if !ok { return } - lockFuncLookups.Lock() delete(FuncLookups, functionName) - lockFuncLookups.Unlock() } // GetAny will retrieve Any field from Info diff --git a/vendor/github.com/brianvoe/gofakeit/v7/lorem.go b/vendor/github.com/brianvoe/gofakeit/v7/lorem.go deleted file mode 100644 index b4e48f58..00000000 --- a/vendor/github.com/brianvoe/gofakeit/v7/lorem.go +++ /dev/null @@ -1,126 +0,0 @@ -package gofakeit - -import ( - "errors" -) - -// LoremIpsumWord will generate a random word -func LoremIpsumWord() string { return loremIpsumWord(GlobalFaker) } - -// LoremIpsumWord will generate a random word -func (f *Faker) LoremIpsumWord() string { return loremIpsumWord(f) } - -func loremIpsumWord(f *Faker) string { return getRandValue(f, []string{"lorem", "word"}) } - -// LoremIpsumSentence will generate a random sentence -func LoremIpsumSentence(wordCount int) string { - return loremIpsumSentence(GlobalFaker, wordCount) -} - -// LoremIpsumSentence will generate a random sentence -func (f *Faker) LoremIpsumSentence(wordCount int) string { - return loremIpsumSentence(f, wordCount) -} - -func loremIpsumSentence(f *Faker, wordCount int) string { - return sentenceGen(f, wordCount, loremIpsumWord) -} - -// LoremIpsumParagraph will generate a random paragraphGenerator -func LoremIpsumParagraph(paragraphCount int, sentenceCount int, wordCount int, separator string) string { - return loremIpsumParagraph(GlobalFaker, paragraphCount, sentenceCount, wordCount, separator) -} - -// LoremIpsumParagraph will generate a random paragraphGenerator -func (f *Faker) LoremIpsumParagraph(paragraphCount int, sentenceCount int, wordCount int, separator string) string { - return loremIpsumParagraph(f, paragraphCount, sentenceCount, wordCount, separator) -} - -func loremIpsumParagraph(f *Faker, paragraphCount int, sentenceCount int, wordCount int, separator string) string { - return paragraphGen(f, paragrapOptions{paragraphCount, sentenceCount, wordCount, separator}, loremIpsumSentence) -} - -func addLoremLookup() { - AddFuncLookup("loremipsumword", Info{ - Display: "Lorem Ipsum Word", - Category: "word", - Description: "Word of the Lorem Ipsum placeholder text used in design and publishing", - Example: "quia", - Output: "string", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return loremIpsumWord(f), nil - }, - }) - - AddFuncLookup("loremipsumsentence", Info{ - Display: "Lorem Ipsum Sentence", - Category: "word", - Description: "Sentence of the Lorem Ipsum placeholder text used in design and publishing", - Example: "Quia quae repellat consequatur quidem.", - Output: "string", - Params: []Param{ - {Field: "wordcount", Display: "Word Count", Type: "int", Default: "5", Description: "Number of words in a sentence"}, - }, - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - wordCount, err := info.GetInt(m, "wordcount") - if err != nil { - return nil, err - } - if wordCount <= 0 || wordCount > 50 { - return nil, errors.New("invalid word count, must be greater than 0, less than 50") - } - - return loremIpsumSentence(f, wordCount), nil - }, - }) - - AddFuncLookup("loremipsumparagraph", Info{ - Display: "Lorem Ipsum Paragraph", - Category: "word", - Description: "Paragraph of the Lorem Ipsum placeholder text used in design and publishing", - Example: `Quia quae repellat consequatur quidem nisi quo qui voluptatum accusantium quisquam amet. Quas et ut non dolorem ipsam aut enim assumenda mollitia harum ut. Dicta similique veniam nulla voluptas at excepturi non ad maxime at non. Eaque hic repellat praesentium voluptatem qui consequuntur dolor iusto autem velit aut. Fugit tempore exercitationem harum consequatur voluptatum modi minima aut eaque et et. - -Aut ea voluptatem dignissimos expedita odit tempore quod aut beatae ipsam iste. Minus voluptatibus dolorem maiores eius sed nihil vel enim odio voluptatem accusamus. Natus quibusdam temporibus tenetur cumque sint necessitatibus dolorem ex ducimus iusto ex. Voluptatem neque dicta explicabo officiis et ducimus sit ut ut praesentium pariatur. Illum molestias nisi at dolore ut voluptatem accusantium et fugiat et ut. - -Explicabo incidunt reprehenderit non quia dignissimos recusandae vitae soluta quia et quia. Aut veniam voluptas consequatur placeat sapiente non eveniet voluptatibus magni velit eum. Nobis vel repellendus sed est qui autem laudantium quidem quam ullam consequatur. Aut iusto ut commodi similique quae voluptatem atque qui fugiat eum aut. Quis distinctio consequatur voluptatem vel aliquid aut laborum facere officiis iure tempora.`, - Output: "string", - Params: []Param{ - {Field: "paragraphcount", Display: "Paragraph Count", Type: "int", Default: "2", Description: "Number of paragraphs"}, - {Field: "sentencecount", Display: "Sentence Count", Type: "int", Default: "2", Description: "Number of sentences in a paragraph"}, - {Field: "wordcount", Display: "Word Count", Type: "int", Default: "5", Description: "Number of words in a sentence"}, - {Field: "paragraphseparator", Display: "Paragraph Separator", Type: "string", Default: "
", Description: "String value to add between paragraphs"}, - }, - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - paragraphCount, err := info.GetInt(m, "paragraphcount") - if err != nil { - return nil, err - } - if paragraphCount <= 0 || paragraphCount > 20 { - return nil, errors.New("invalid paragraph count, must be greater than 0, less than 20") - } - - sentenceCount, err := info.GetInt(m, "sentencecount") - if err != nil { - return nil, err - } - if sentenceCount <= 0 || sentenceCount > 20 { - return nil, errors.New("invalid sentence count, must be greater than 0, less than 20") - } - - wordCount, err := info.GetInt(m, "wordcount") - if err != nil { - return nil, err - } - if wordCount <= 0 || wordCount > 50 { - return nil, errors.New("invalid word count, must be greater than 0, less than 50") - } - - paragraphSeparator, err := info.GetString(m, "paragraphseparator") - if err != nil { - return nil, err - } - - return loremIpsumParagraph(f, paragraphCount, sentenceCount, wordCount, paragraphSeparator), nil - }, - }) -} diff --git a/vendor/github.com/brianvoe/gofakeit/v7/merch.png b/vendor/github.com/brianvoe/gofakeit/v7/merch.png new file mode 100644 index 0000000000000000000000000000000000000000..a7e7276872351aefd66a3b63747474911f124554 GIT binary patch literal 48523 zcmV+DKo`G>P)`005o{1^@s6F)(wo00001b5ch_0olnc ze*gdg1ZP1_K>z@;j|==^1poj5AY({UO#lFTCIA3{ga82g0001h=l}q9FaQARU;qF* zm;eA5aGbhPJOBUy24YJ`L;(K){{a7>y{D4^000SaNLh0L04^f{04^f|c%?sf00007 zbV*G`2k8eM0Rk-kRd@gZ03ZNKL_t(|+U&h~lpR-f@BcaHRMj1OZq1T4Se9j3o((nz zJOd#(BtXKHmq3P>;TOmwAtb*%SU?EBtR!nCgskKdczH~Tfdoh%i5+Yb8w_|h$nreN zlC7~d^xS=isygTQM-8{?4&ApWsU_XBR!c42d+XM4&iU^B-Fttxc;JUWFwA2JA!dIM z0>s>3JGnzNxi*&7FviULv#)s7HM3sL!Gi}4opHvFb?erNm8<&X(V-FD?Dg@{G1JaE zZ}t0Z%S*Ia=)obxocER4-fzVG_NygYykHi8ZO`<%`q%Qs!mJnn*zPAy{k^KcXRZDn z|G%g`>4y7VHxE*;i}Vbc@mYz++0^X)vp(+`=H+7J;7Ofs(VR;RYZ+ndN%+d;Vpw!W zXPlb#zueboXLLqqEbf>S$SvuJa-U8Y{l@3phd zkTUZ@`T1H0^L_4}=XEB6yjZpiaj6Z64z1rAXT5j1@(5w3#RPrlk6JP-ImY* z?2MD-$`*5y?TRb+lChijA;G-lW95P+zEK9bgI-cS} z9*CKBk66%t)8g5Pc2U0bXira&pHC9o%)mUGcP)}RYwi0E%eK_7Gr>CJH1r-j<7~F= z=IPvv&c?TVLB8GOw|RQ&vw{AcX4qWcf2u)#=}y#ilIuI;BpRsETsEDF*BPC0dV#z+ z$5MbaTW>aH%`yUw#`6=c-*XDqUt3{} zd7hSN>-$gFFy6wnyXW=kvq&!7Y6-)leejxXVl5TmEhUh4K)N%|l(BMEpO~o&-Hda6 zSviKHz4t7mrE3=L%G+T^PgA*LRHC&ghJe7vCA3F)xsB4SJ%bAjZk{*%!y_ zUer>cwxxDEE`4V#iy;BOa~nDUe;T>+ovm{zJo#3DI4eLd@&j2oAlK13Vu42xr|LR7 zqcav|s1C??0KS9s9Z$Y(kZR`5M)Q3kxVA7*6bt_T_!x5n_Qm+#&ghIKH|DFI+t3-O znHAO9SWlKGA4B_$s&*&;kUgGa2yBE_uJazL2Pm*W!uSP)*rvEy?7p4)-+2aVR); zlZhqLj#U${c1BOWg%>OuOS@9LWPRz73I%D1os1D+TKb2)6ns|vK>0P(v16tdgqSg0`W~Vh%Vzij? zdxL#@w)ZqSj%WdRrtTc)xmc(l@|}dAGQ2dWQ4v4kZ@x`Z}W>@YP!5`#zrMF*!NO zv(G-urcIkzy?QmaZO7jHe2pxvHRW=dM<0C@$8lJ{emy-sJvfdNY<3+Zt~1VPv`;6K z8LdO?Y69{zdeo-D1TaQpkTuUb+{aDQv@mJoz(mIpb%1BmUz-*mYXnPebjz(;B)4%P zTDaT6_r}EoCTSSe&baoP!RTi9OElR#g5w&aa>uqcUl3!|{- z$9b66#1AvJpj;pqv3SSojP~~&gE1xscHj4Tbk{C!z4a^X+qV~O48>xRzxvc)F*rDg zlL^3hrg_=S%TF2)pZn}*`NlWDNq2WQQcBjYS;L1v{9)FuTaT}OETw{tP>2-pcSdKN zp4N4^-#1wo#zYWiT7q9~e>5}hl_EqyZFIOkW$3AgpA|I#Gl^p*AuR@ESv$Dv`Y5G> zbFA~SBOH@LrZCMIojBiC5NYc6AyGp;8Jwloe*Ct;01)x_MiGwsYJtY@2-u7C`=a}# z69&=T!N*#x39=Yon^=V7EM?5R1MVju+=B(|QYw|W|Ni^A{r210zkfdy6B8&Qky7G& z9us5Z{N```23xmo<+pz8w>amV4cK<*d5h)`FFJli&HKL3U3cBZKmYSTbL7YowC|%m zA885pzr3Fh{ErWi%jMX-c{6W$%UihU;){?$unfdwxEI_`xRUf=`2^8q$) z+{h&tU&3|QU&qFCHexw;@V@Z#k;gtXG{ld7^doltco)w+`wU}aWAUa^EEW+$;Q1aH zL%CAM*FKN#ew5w2cQY_Jz_#KS1}9>JKob17CQA4-%uSWm zn%{~NDXS{wMYou6OAV-Vjlp&RetCky)YKHackkw|yY6E5?%hR7dy zzOUUl0m=6@M*Aq0K`B9{QsLOKql^rX@W{gtGdM8FD_-#muD||zww${K+p+_Otj3tQ5;#u#SGUvG#_ zFB0%KLT+PPUbrk{;1&Xm_5q2I4pIamFztI`4Yxoj85?|p`WSx;pci4FK}Zdqwq6DDaafHXmvz?aee35*S+fn;KB2(kg72Ip-=fO|q$kwW5Y36{hN z6|%@0VRY@L*TAV>AdHK!H#gDG)}1G3bz4Y1(eLMPi?og6K9= z7VEpKH5Wvf`=W{`cw`JV=~+KFIzF8Ql2lJJrq24D#r{8;wf0S(-5EW!){Kvj^S}cS z@QrVLgZ=yWV_8=0rAJh_2~&2o(P*P_9GhyT8hG(qqm)GJTCC0(gKvENs^;+F!yG?UmZh6x! zTzTac6bi-o+|Ok9(qIs#`74h<@i?FR+~?4~Zdjg6QF^=FK|UI`ZPU}!!;T$0mc{wS z`>&`+@T6cTsOz z2$Yox)+7?Ed=PQuzYs_7MH&y&e=(IU{|#YvHQCQaJRbxGTTkI0_)qN7htNX8&OgAn zy5sjrDN=F%=2dP!h%DCFy;S#n1)&`5bKi~M|LPVVb`2>}|77zP*yPbW+1Co_s<;Cjwj{qU5K54nYw7uKi#$ z^>xTt)Z!nl&NYsIAdpI7Sqh~TmI}+E!q`Zp5M(g1@v_|ZgENtIFvd`=RyltBIFCH? z2w(sD*O{1@2*Fz6d0tF;HJxY1U|SCOI*5r2fz}40P19r_zI@+`uG8NzfYO`AFNsmXc}+`hjskJl~#v`e~kf_Bs0d`+3*9-pQ3$UP(_+ zFSczj8Kt@mP@YyPMcNO_9P2ZUmkiiM9eE^3U%Thgr^avjHHTRK8hNB3h&^5Qn=z1lm>4^<1{>%eVT#zk0JrjG5#obxuYx@Ngpf#r=;e(v+QlTTxmh1&UnRBJ}l z6u0WE93)wNobjjsH};W#!?#zUx;H}qRaoVNSi|=ssp8se@dvKP2s|nyyA9*l|dL6Z8AvrMJ#s+GyNmviCeM#LsVD%BEGejTHeFlhqX{}PkGpNlg9G{ zdM>Ir9v>fPbaa%-$w{WBrWhR^Wol|F{uvz|qf#kTE?1~j%E4z{57+bXT$id_#q+#? z0@6CP?ix1^+p<+Qg+xO^tG~G{9z-sVCozjz1|NKF5PaYauBjV-mII(fUm9g_1&f zH&eU*gs#aSqA>V!9izDm(ZZ%seUU=t8K!gR;$_xi5-k;)I)$QEf~WAP?)e6p!~Yj$ zNhWibVAk9~ws#F~roSOcFd6=cu0bfsOh1X#lbG%u7-Tw0k|5V7BKD05OW;I8C%}M9 z4&$uEWY$p4@1T4A4V0@V@P_WC^zereQed`!i0q2jV8SlX5>4s(Z(<$zSA_8KQHYh- zkh$XHc&Z!c#Q$OPzVA~m zmnoOaxUS3dd-lYWAeYH7G&IEbzyE!H{No=pIXM|;4MtRYGHIc4!J76aw@6yC z;Kqj=PH%57KmYST&$ZWGi&6^5ae^4C6jUo!#>U2YdH;Sq&*hR!E}@VwFgiLyrBY#f zdYTo3E6C;Zc%F-G+bCr%smFaf8WhdWS!>PM*ci9odMo$ee?O&CDW<#YE%{`IYT!Yb zI2IpJ>|s`Wm|-7#(uw-g=#UxPV#UA!mX!Ea7imLpZ!dOq43e==lfoV$YFV*TDzx^4 zvKAqbfM*QC7<6cB3mwpYk%4K9p*%g!7ykbLaNED%#w|bjlU#rO^%M$)aNd}z*`&V)pkpRG+2fNCK)^ z$9r|Xv*UP&slMFw=h+qU!3+NV$`vSrI=y1To1=9y=B z!|UI`_6xRSSvJYI|LF?OOR?c;t(l&l=Anlk;%i^~8V3&^OnL1Q{Z~(2Hr_uGC$nko zV+n~L()+$2c;rIJxc)VG6u}3i2#mZ)y-vGs@c&V=imCHp5(aFvp-CywIyjC{f%h*( zn(vgV#zZ8;O0Mg2_`pHF_{A^so$q{yH^2GKTzBnt0b`V+C^xMAg*AW@9(fV-49-(~b zJJ|jNlevrGf)7y2oloY(4^hGc4241#CZEP5hbU~pnRpWag6Zc95BR0`KX6Wa99B zsPa*y6nIAB=hsub^h5Y7uSJ%g<;Ze<`4f8kdYPV{X1ZLW zR4()Q6HkO9`hM-e^s#D;=bgB2;OR*raU46InNmo?YLS4J50O_%85peEAVWO}A`0JE0x=CA9eVFlN~BUvB&JPlw4`fr z4V6lTy?gia*MI%j96WG^X8B5`z-_ZMl3$~3YwQO&MZi>G~RP$ z>_mp&VunZN^V;G;KJj(MhU~E?>GBTZ%K~!n62=RckadTt?D|u*_7KKH7#HJDqb~ad zIpI=y@-yU)-G&m1Qf56a4qe{!$gzV=KRtn5eKqCDA@U5P^aKcnFEV(tfQ?U9AIE+0 z<8)0tg0Xv<%5FlM3YoFT$@*gfPiDDD?J|AhS&D%4HHFH{Og#T3di@iWy02!5ia< zjaC_aX9ZSaEk0I&J(1IG46e2@_GWr6{{{RXe-Gn7g z)XYRILzr%7*x2<*CMaOXvdHK2jE#+9DJzKSN|Be zsjpS46(%MomXuIE9Sw&lbo0m~kMQ}=f1V>pjsS$ok19yUs=C-VaU46LdllQIu|#d? zaZ4faMQsC(DE*m;ZP#`A(wDwOPfrh)vhX~Y;gJzs*A0&m&Y_ek9I*l}anegvx0+Zh znjk^1W*`UI`K>luL6}TxH2?HZ|HOk2KFF{9+OM%=$BtMw_e?VsTY9SkEM+bJ^)?Yd zs?XkDs(W0|MpY`6rXKu)J#KUIVml&9>(-rna?cdrBubQ-W)QpQ9E_0&zl8Kgv8*hr zI*en+&_*INP(E-snd9FJ=<}7=Qr!7VfWdv}&k&=#$&K%#G_jxTh8xMseN>a-sNz~!zYWMkWjFqbLu5-&fiQT-p2OCYxD;@@ z)*$GEV6MfQEV7Rn)9|qcjus7#vcW_%0eR$Wr4`Z z76Ny3f4Ej7z1&QW(4^>eQOBn%slp9hqHLQ=rHnC}iScnpMn@SQ9>y5RWHYrA1v4Lu zQ=|Yp0`&TS*K5*CDPu`PA;KgpqZt_)#j=8AJRxLQ8YNOO#?`AAjU zm+tOvDwQfyE~E|7{H^EHVN)L&8R7Gv|2+5Ha}S>9Appy^a9tP2cF2SYb5*w*zGl~+ zV+gAzQSG30R971s12LmStT-nC5ys7vY?SYN?Ay0D-lLLP!H9&2rqN*xzE*%E5XeO6 zNQOp7l;mT=1rf_|ChLY1MsSoffD}QUt`3f~Z{I#X@rgg;O>g{3-udo#Qz#S~f!-{5 z8kkc(A6f+7$Yk?%tw$HdDn6y`Rr*wd`RIjt3M7TQOlYV!#3~a}hb5%q+nF@U8vS%N zRAcx-YcD8yQ$DzpD8GUb8M?0c1BA5Tz}N8)-$zyTlT#9J>^TZ%oJnf~T|0h(;oMEw zW}Ix_MVPUt5XwcED%I{+pF}XEhIZPdYfi69P=d4E-*D!5z#Z^O@S#E4{qVq%%Jna5)#%h$vV zajYmNnSQ-C6(ae*o~|y+r81u9Q7)IMRw}sFYG~oMc~&ht$A~7DO?f`$!0TDT^G-FaosA8ZKH$=f!D+iS)>PTjK=7IT9=8b5tTtk24oN`1kVe))78)G z0CYrrY!j+U2`Q;D_6F_yq2VEGnfjrsl`$runoa%Wtz{)g5h)Rtjs@TiBGl^EA$@0j z&F#0}&aPd%__IIzGrGIFkTM+x(6j98vxK%Qr)oY35eNr0e^G(V)MPuQz!#@^iJ@q` zM1e%RexQ~n0HfnKm!`|XeAT9fjWf60g2goe(tBFOp0-kl(<+ZIEi8YEsh94dch#FA zyOC1gn~?5tRP~>O$!M>knCSvt0$cLvLJ0xx7)S-U;EiKTMZS87iSd^a#>0~ZWcNAP{uEU9Bb0+@3P>@D=a(3L z@;~Um<{FeL;`+fnH8P7>dm)aq0fR3`+_q?Fs9s*`UihSNA1YIOZf6Qn(Q%Y@Wts6$-$R&63LDM9_u5K9t>Mi8(0 zaET@cGSRXy-0)4!1B*8O=pC|&mz4znB=wZ^)@l@OT2DBVF^nj>vppQ|c^=O{|2%*G z=YP)7&``Q#TgbQ=A@Dp8&+<@GVOtiB@)xx^6jOqMN_3prKC`Biu?UOeZd zltGeS6b1;(=K^C+BtV~<_gyznlItZIdly0=ltNHzc2hHugPafTYjmV#9nGC^-RFYSQ$LIB=0g?CZMwIXqlsu-$vD5gRuM1IAp!UO#JAtDfK)o zNGk9E03ZNKL_t)9ntGnT$w%>Ij>`JCqR9qv_sS?r3VeG2ikm~jR^ev`!3d1^0Xy^+lvV4Z>g-)YMRbe-M`*@>flvfBb38GoQltPXLe~y_=~$JCQrL z&tQ7Z{yeP!z)aIRMKNQBYUKUPIZV=}M?np!MWDNrwa+yMBWwq#Yk3LPkfm5U3Qe3A z#zVPW=I*=i=F4CHG7}RMjYgrUft~NV7}p0|qGTvBseQEX<07!540x@BdmT5q2o&$; za;Jq5*mh9j7#%CAYaN|a4d81=g^;z4F6zV;*c4$MtquS?U=c$3%Y?L~2y1JT+4fPX zUX4{UVYne+jE0AY`NKc_Lw@O(euqzF&T7W8v{Gv!<4m^t_{~xI`JUc((~k} z9w#^TEC`LK`cNw_MPBwX%Ceun%5Exd6^#RSAHM3xm@2N!BR9VpGkhN!pRVEWB1Y~) z+I{#Kv;dJ`i7T?WBQKGaHolQ~{jWr>yM@aAAI8#SjPCjy27AsUyX8hEk9?nec@O!K z`^k>oi%}VTtq^;v$d#AE^j<(?L>}3_8DFi$2%Ajr+K^83$WJ_oJ$^sZ{2@YF2x&0J zMy&sN^y)WZ>|Ttufr=aiqY=hsdhM@bulO0n^Z~qwe-|d6z~25paL)e?szNoz=otY% z@m|Z+i3c?6k6w!dq`maHB(I!~(yYl+k;L6gl>Y^F^-Bx#%aoN&&~{^EV|?|iU**m_ z@2qLfH}U990^EGW$GX+7Z$XV-n8UEPh&cK-|Z+IQ<_^)A~xEE<>kzMEETiH-|U!Y|UliPu7K*f@bmBhVuF;*9T_l2Sm{PbAb;`<8=$sSg+b3PZ}xUCCj#2gu_&# zcXrUYQ{Yuy{^gtBWNLDf_x{p*$z-!FH5hb?Qayz$%GAmY<8(%&XUwiQ$-mD!svauU z%Ay@>F$|8>-j%@kcy_gD1QtAVkFpfV6<6a9UJk|s6N*hq8zVAkVaMTC@Tqid!`uG< zBJjhtr$9Ie>|j-B&6K?valr?eLSRG&V+2}HBYhuFW-#mCPIc{dDD7i}O<5?!j;ol$ z!N?582eIZBN^5Tnam>cj8vlY1;Tero*&wG(3uIv@-i6!nb?9mciADheUu2o;z8)^R z2G3N{LLr2W0$lqXl z#WhZx7#!B+)R|wV(Fnsdc=S!Q>(i9HOvKpJf*p}(X3}1XXue{biQk-RFFk1()ztSI zUr#FN)yLmm*NavD^7(v%;!l@anG_2^+(yuZ`3jBTo=!Mijx5K}&=7z3cYntN4?Gam zzPBj<5-AafV3Y7Yucl8>L&doK$rytliY7(57-o@nvt;X2Z5jbSsVvp17j`W4V}gw+ z^<0zTqeQQH9nt&EjEPioXvncwtK4zN9h?|G!Ta9-{QXpB%8ktd@M zphxZ}FAh>>6_x%s<749^K=q+jXsCo0F~(R}jcNDkhr>NLF!DzG)NEE&-phZ3but6@6 zG(oPV@Qz^#AK#rOXH}W9cHo#3WKR4C)xMv`$YM~YYxm<}1tM;cU>X~9%me$Ok;+d} z_Exf-Lo`ibYW6?{HS8Cuc7u{u2^!1MBb}Y94`^cWEtLTjI~v2ph8+ebE= zMGA@Qxp|-1| zb}TmVqWdQ6!a62Vl4bxU1g;L6mMEpb5Lag?AyJMMC$B|a;%cC}aOT;VBomd;eGuvR zdtA3~TF~|Nld0a*4ioK?A)k7-pjoa;Qeo0{T^@SyA%6S0-{y}$`NynVyS7fWXZep# z;$7>}(f~S@VFPn2Pa|E1ngk~04>3MDNq*H1#tuBeVBtYbygzVbaf2f z_eQGL2ITk;plbzWw_~emwEH}cJ3+N{f=qr8e|#6J>tZU}BU60>U#-AZtH~;jTP;&Q zdN0}D)s(#gGP8$*w+XNBbx6x4GyPqJ8K6FH%w$qx;`ja|48E;Gj*~ zu5KQaZ>1qEW@*`2`@KzF`O}O`i@mc%{k+xzt(41Ux#*&c7#tjAXy^!g_wMDn=bmHe z$PiQ0Q!$+#>8g8vt+9>}0!sy5(R{60vt|wJ*RSJMuXfux^K-YD-^Ugc@ z$AA1s#>U1{^C0rNBY^h`d_>nJ-6ajlr07r0;10YI9fIVsr$kcKA6~ZXTq1_?!5JtI)^qL$CO0EKy}-cnVh( z8CaEJ?9dNbSvZ8_PIL62WaX+sre6L!#_qx`oR2E5XZ)qxv9iO+{+rMu3j(SpOSURF ze)I(fvYT*6oJYkU$MD<(X%m;n=a`^!N91%{ABXp7*?m4}S22R4NsG z&u4UOH14!ltyaUhPgu&OBuXhVnGEaKuV?4ZoowH}ow2boHf`EO#;FMhEtg)?<#L&? zeeG-9cH3=~%jMV$Pur4)SZg_|V~^_GiR#DebqPem(+&EzC)+y;Lo8VvghFbu^kfoU zjq*#+^TsmJu!cR6xu0aTlC*nT;HLTWXvQ+=dO!~iyokCt$t-*kS`;ZJAoxS*pijH@ zHA6#3_>)imf4t+J@8IT}ZYGn>rt)M=owC+)m^V>JzGll`B13u=QtboJq7gAz^95=% z1GTo0-)>uTga`pXxHgSONQo=+kY7)+ZzU7Q4h41jm800%E{v5&0Va1o zLOcOTgg=I63}1Gkq(r(eBbCGm8{=%oQ5j@mJ3@_saIr**iP58U7hLd0sd$2{axu9+ zT&3_tkzVI8!WzYs=Taz0CdNnUI_Db9_!H>rH2L0hC{K0U=QA=DKt5fksVHMBLU%ATem zpZ2)Yi(FU(~_X) zx#sZU!`yrCy_~z{TpoMuF-As$j&-iWR#!c&01?ssZ`=KpZN^m|Ni&mp6@X#CTq)Qm>VKtXr@Za z;wCUMVO*J1+=+y;TswY8?f7W}Gu5e1)Oq-(elD66qf>uTqJEPzb)J4*>0I(0A|oKq zQy_|U)pDG}6wj0p)$s5zfB(P#p3?L*x7_kmmZc8&|D>{AMa%55+atqYDaqY6M$MW7Gfx&LHK9N6BxzoU&MjkX<0(O~u+sZdC@) zI)RZ{9D4#p2|u%e?B@TDTXhk|O8^UcZ^SLX2&RgbtFQ;U@QORAI$P*H_d=#8ULw=G z4J$v6>#V^TheB5mR^KbATAS!vYf&j3C6gJ%ST2RN9y(XVsNNs}&+a9+?q|T7WT1a9 zVsesFWjb^WgM>ZP;7}zs44UJhH3x>X4T?nsb$-Sjn zctVHwveq~xU3E|@m9Z>~LZLt|o1;`N(bLnzp+kqTEQ^&ZSJKndLnf1n6^kP8JSn77 zE>{>IALGD*1N_Bb{so5)AL3o_dKaT(WAyg+V6;S>qRr3qJcfsd`Shnh&0~)}7N2`8 z^pY^Z;w*a8Bz#jh|D=$3+KUqpgSd5stDz5VYQ}+Sru-#H0A;!oq-G?TS}l7M(WB&Z zk7Mx>8)atOLF*cxBq)SbXSq6RAu?hjcEco^m}pG8bT1TLN+$37zR#Dx{0~^l;`MKM z1NlOJS!b;+Wjoc=dW7Mx#iGR9eXZ5G#ATtiAX*rD6#{G-u$9JR(%+0Va1~Wu z1Zz8VZO6k{jccpe$YVb!Wt^!N93^ypD;yX`jm`}_IXpZz&XrBa-=KjXt)g4^qP9tRH| zF5{Ym=SaqcY|X@@BEO!v>&_WT#9`TY6qb z$lSm_4EY)Yy?f&$v<7Jh;T@qvh}EFIa2YBv3KRAYe;Ph;p@ppJ4xZ7ARnPoFWmDy?z-zP_Pw-^-}}&qShaR7%2M<5 z4qN)p3RmLn>;B-n?z~p+jJEbND`jd2>}#)UFh0ge5Ei%|C@0Jr6DVP!eK(dfkCJ$a z!^Nf%d79|E5b9(E&8&RT!N2RpWa`_Wn-n!!Z|;k_b4cg;m^Am97&0_OqFyR4h&);< zq_ApxO`^2dvuUs8-HJv$sgqz7;W&ip91Dq z63_A9vavMIXdN_?XjYLD=68rN=`h&?A-2>*530VoQ>xBi8(w_zMLzn`kMcXe_d#}E zbkV#hy`^(jMVLrQg9jqQI{7o&S+6rT0|68nvo@aZ&uK?Av(~UL#qMfqr6x;m3>bv; zkTM6_51P;Ten8n9@U$C%p^Wm({n)T>l@Oa=HzuxxK}QC9DAd4A&%&q9y>AL4!CsQq z+z@dNS;Q22L8BWJ`Sl7*S{UgEUVG>)NCPGci-u(=Donm>rwd&S@{6|_1a!3!&0*Zs zijkLH(R?!g+U(2%+C6yH_iNs4lrg`3`*!ZU^G@!6-~o2*xPZ66{q5Xz(@nAGZcNg$ zOJ~wLj?IM^UdX0Ro7r;i7H+%kHtxLhPP)3fIClIPU--fo_~=JJMyXUHmy2lo<}Au$ zF1Bnon--{;!OKn13AH;K(_FRRlXq8Z9b4bVBrRYO(`-S3eO>)-+jsOH9XDz{Sj%%V}ENGi_?23KE`qz2_eRUX)jmWJ$CM`qM|c_c~Ft z-^`nfJpJi%Ic4lDCvVPky&3PPPHb4}%;)G+GstU`t~br-#b)nXrqt^#P=gylIBA#BhRw2P2w(YE$LKBw4sJS^E3>*|Oza&O7fsuD$lP zalLyx22|Agwpc81$t9PP%jNJrpYQzVcj)Qq<-rFZ;AMzJ zU)pq8S=s|K+8|_58;? z!)DrG+FK;J%l4Xlq|)m4G=om6d8 zrpD;FEN$!7t=#fcw{Y>r7c)9KLRVK8U;N@1saC6u439Jg+xo2WW*Ovj9esU${KQZE z1Xo;fMJ!I!@+YL6rIK+Scp5CSW|se}U$3+M%=L4roF8Kv?_uO9#6LA-J_;Q!=3Z{i>a)-3 z*kT3?!z{~n)=uJopm^7O7t81K|tVJf{~FCcJJPe)`qp~*2S8}OYP0CTD6LIyyG3b_~MHU4-Yq2u}XNnGR$_D)Eh(L zeUr6vxj@=C6I-7mkwG6PaT*h3)FSE{WrQ9ewE&3FwKzr`GBQEC7gGzt#o(%9FIv%H|T?^BVFgQcOhRz6*h$FoFUqzqy63wPx+Q zwfyG${vWcrY)W9~RG@sEgOdm`NZGnh{;crYr3|vbL)I$Nlh7e`$=ZxG z|B~#V?Wx+$Rv({Es148B_v6i37y&{fg#w|7+dL+{cv{!#H#EzdBVaBxq#f@QDWLZo zot0Ujv{_iev_bXA)*j*9J$0odGC@x!_1{nF2GoK+O?J#`_0FgrY3y;wrCPy#%4Tyo zPKKVI9#*Yd8M7Uc_u6nz+n7+rVu8ypyOb-hyn^xZaq{^*-@oVk?A`k!$B!LrndQFN z!?G+kY}mkW|MqXw)zy`@@<9A)$YVcF3R;W}eR4P@f)!@hT{IJ|mhpJ5?ZV024 znsH7fz3k}UksNT8|6vkdxDE{lDJ7P&8d(oflNwI5?jrHrWG%O(F5SR&z1S#}LWB%~ zNJ;X>hDvy^{IHxZ5>QI6-?;N!G`|uCRsaM#m1Up&F-B7;7WvpGK0$YHZ>(v(9Leb* zyZ&q~p-YP!CI2rH)S3x6%>o>x27#tFm<3WbBMSrlcCVvePbK)THyoopO;CiH#WHK2X$EL5ZcTs4bXpviKU`O;yvjY&z$ zMiVuvBM%b!R7z`pHri;*N|&p5uHewnF{)Z4R1wFPWXuFBx?QTS^=8&*ANQC+p6VU4KUV@us<9e})V0`D|hpb@oeDT{o*uqeQrJj9x{Yq<2%OS%94`*EBM_x|u+c3!lT$;kK+|MyIG{jR+J(ZsDHTVgPpOjvE_`43(=d?ll8cV6B(z4B- zlQ1q@S1XPkJHeALNiI9Lh%p}PyQX>Vg+-?P0&9Avxc$K?uDS3Wj9ro_zerTysS)-t=*L z2L?EN+-27@Lv&|`dF$)8amT&;*tg$da@xi7`a+{CjIYbYM$@ERG~Jjc$foywKTcFk zHp7X5K2btvjA7H}&HVDO{4&b2mSwI>{WzOr^0o5O`U;C+Uv)jN$un%+o7yX(TR;rH zUe?x_n3WZ#J$7KW{cmPT+nF7$*+8=yz&F>`5N(}rtJms7rR`|;FD7QS?KUS~l&zs% zv<>hzburme9c;Nmef{r-`VMBb_Z7aNUVDkJ?V7~jK%E2i1L-XULpiSOO}1bK!iI0|q2C zWompJuUz4|myYoAk#P)$-t07k-C4#)rsykVSlyc=s}!C9oIqp0=hc$rl*G1etPp%n z*c#VBtYoK6;GL7CLrGBi2|!2!f3!jM^!D((ANoDA34x`>@)k}$$e%r~eNrkTdDVUs zx<1|7JrPozmEoShm!Ha;N-C@^phMo`upiA6{ALPQw)($h3+rSC`)uKadf}_tjxkp% zzU^>`p}sFoQ}3w$NHV#}k_3%8B;Ez#NLlN!HlGVP1&c0OLv0e-!3{dc3C}cnJxVDY z$HB5JY};uv`P%tueSWrOvw8C-Hg4R=)YKHEQVHMp!%TZWA?28)yqBiNF$zs=*|LRS z|Mg$TvaC3nF0MgOQ1dn6iDWWct;<}}7!(bXly&novHg96A z3`P|hnRIz_UzO2P5!dyZ_Pgi^VoUbYVNr25ZPRjS8mU9#RaPo zLQt&+9S5VuA9Njb@U;$V=Z$Vk@kK^s6q7fJvbku(O^Qn?%i=fR_deEd*syFA?b-tm zJu7QTjt=NKbKyJC1iz z6$QTigO`}Bz@(Suw(mW|if+xwc!^xD$atm5ox>%@ONyTC7{?}j4jdlEu{8Tfa@dYw zY~lbF-OGR7_cC4CalG;he4XPT?l?l#^Dv^HgO8VZZ0|T;)y3%Uc%zn5VuiT|nM{VN zTMd&BH7ca>lcE+8r5}0uk^F1C5)w7{2zcAizKvJE_BE#+{jIe@o%vFBZBf6bZVU1g z>$o9)Zx&(0UuM_e?rSqq`}GvLnSm`%QuM97_I8+rSfaLts^091p!2&A?J?prCup`U zYddqF;8|eQG&>pD_(8FN$!bLE^I9yTL}p#~a@+weDw|rwEca9xH8GFOvP&;pka@F^#Tt3^*IjoV_uhLiFYeicHae)07b5JeR#z@2YGG0H z=knIK|6EXxW=^Nfs~JKGP1a1k&)<&ZXfyqLS$E9F>;Hs-_qLjaLhzFCnTQiYU~b~0&Yl#qN5TCVKlaxLYWdi89Z6Upc20W)v;57?G#Gu})len-c4&O`0a@9c?7uwI$73ql#r1Ta8LE;_^nL z5wm#DdQK#BJtb{0CISk9F@dNEB3SS$nuJZLoR;*8F!%_BP(h#Xy0VoZ4y$Uh(?tZ+ zsD9tFELIGzpx9MJYt5cLd${@L*D*0Mk>f6GEl8dP%L&)u9V3Y9dfxT))uG9WO5iIIWjC**<&cX zC61ps!HU6Nlo?^;%08gP5mm;r?6^~$CwveFB?8*MTrT5zRiqNw(vG&O^m9v5^Wj=B zH6fi$Hp>S;_`x_tvfQ-kTT+iE>ay1Uxq3o3fc4A@00OUW4Opg;A=GupRZT(&hIAE! zMHpU+)h>0)4AICs4QGKgR-J!fp?yPGG(qzoPlmbf!^?-(4;#e@gT^400kAh|B*Bjo;cR3=sGI1Xz1wag)nAAqq`2xE}wps#%>p>2@i?&u?hLKw)35vHE`5cZ0j zVc;$JisvP=R!zzp6~dtX3Q`3C zB$2Vse7sR8E0k?jZ6|=Vw`taT0Z1tsTro&@cQ?n59p~`j!we1%@aUtvx$%Y@uxxA5 z8`QM3;aHZ%x^?S#_q*TC7ryWXrl+P-v#vgg$0R(}AnDFP)a}hPSW3lgfQvyU7F^`% z>x3zs3|9n;FX#kU^(~&+Zv{P@g#>aT&DIk><7{H#*XEF569*n1N6Nmy05QShE9L~_ z)4_YS7~sWY1{h$h?9pBDaD9mi&(HI8u%QbLmO!F$90%9)@H{V&trZf_cvzOzw8>6< z??R1+TrS6Z-}_!xu3kMSP*^Tw{*tyj-egaX7a9PGPOc-(BE&Kc2p#;?c9({G6tt3V zE24K2{BdS=fKn$H?#DtYtjZyrJzu2jr7wb>LgS&O#9(3E3c2I=AWMfam;W*2#Vf-- zT{nox8zED8V+>>ud=c^NU*Y)2k;Va)C+`@>wL9}YijsXR!#GE7M?3wL3mcpFr)Wu4 zhaE^|mSXh`k6z4tl$+(x>Rl|*(pV`x~uy62qTti9%(-~8tHVQqw|PvOmf9)hPS_>ZGTzeX6G z!4A9|tp-}s+l$*6O9bbZo{-Z9QHjfh>k|h$r4+8?#G^!+_vQ0>wr=R+nZp*d!Wxo7 z;N=x}ZZA-93|~07Kx9;#Px*4NP#Z)fV9&aU+jitRJ!yFOP)MjPO2k!Vo3!8sdNCF- zI$(H>!{Ng+HV+UGnbc}lBtc##m94y@PhyrWFQuffub+vD@q1UOj9u5|=9_QkzBjxckS|FNR&Yr| zILRb}n`1*qjk4=g2}?}OgCAWu06DJ9P)CTdlGz1=iNrE{jHS0z(g>hYU%CaPCejFj zl9v8XK^PcjssdvKQeMRDLs)WdM5*X9Gna)3Q4E`EcG*=r$bLsDeNJ)>wV1IA#!~?lLM7R*2ac)O*?Oz>Xfcs zpg#T-o>W*9VhS73B1eQ1qlF^VKFfGYr0rUj+P`9`y4Mh{c{B61U9k51n00qk&{H%{ z{S#{Tvlv?+9n!Z+Q#n&zpMMqt<1H#x;uf8?(B+Ktp?|A6a8mNypX_JP;3xu* zld~N(q8#1%1_K=d5iVm3dFqj5psd-x{v1F410MhFPb8z0LyH1^iY~Rk!YHC}b=9wg*s2@U(eTHb} zS+Ewbd@W|(4^S&yi^{7nL_;#-uVZq zk3No&60dg$(S{$QUcQmYXbP1>l*$4+h6y@)z(0d`@{j4RKY<{?ksWj&{~y%KH(&;C z#&~5adOOamKSU5!QF?*u(SIU$_CdP(f0ssZ6v7$&4ZlU9;NTzoE$U(bcjLdulwXOk z`528(cibc8#GLb9Q$#JIku#p9$Eg=S_B#z_ac`;%Ugdv?>U3}m_e}GSX;u9P=aG>Qa zpPYm+hEl1-d*AaOI?CmjtXiA}bfho9JdRr|+A1Uw{ka+bzaO!bJeSWLDKS19@#-7a zb7X#oIV(VFdfa&mc7eL}m}euNsB`z0G2VEa;#-F!KL6A_C#O5f#Nv-?VVkWG`jhn zkBu-^9boO65d1dlF`b*vg;|F**Uw#mQ}IC>sE_6-WbDMHnUSodlc%KK3DF*;8CHbxc^LZg(2 zqDZjehj6?C=GaHk=l%s5PEgcUun~IVNf`Yy`Rjk3TF)KSp8X5Fg-6JnYK*|mA7|mv zUt%F< zU)Tt{?!*%kd;Cucj(-#xjZz{2p=eAWK#zQx+^xSy)Vq)9)Thv=|2L5pxYrFq^&H{h zzd;F&FtH&~_{^Wv*!lB#{rd>Q2q_8#&N^^F3Pb0vE@tOOP-p%cikkY+e?s)#OVu=( zDf|?D>uCmmksil%di(on5~N*#>2JLIYmcqln!Se zN9SnRh@N7^+37sL_pyVVZRE&%bF9r9LIVpCbW8972 z59fRjpFBP80=s%G8~OxuRm*ed;H*D!lTjuygfn3g^uT!)8ntl$Sn&h_KgVe*2g#|^gEl~}MSfLh);ee9rl0*?M zrxg-Ikq*7Ry-hay)YMdygmvMB>^t*v9Ebk?KHl=yxA5_geViZ&@H`hsD(=1K9`1Yn z>)Ehj1BVYCZU*$0O)asqmW`{ZFpO|rwP=oBwgu?8cxd15TG!vMa+6wozfwYsY2MT2 z(E+>U8IUPgL&O3$%38D$Xq~w3B}u78V028z*|B2>zxa#4$noRH`Qn$p$Wu=}9itgx zi0gU$@DKkmJ9qAkJ>y@hv(ok=R;roHpO+1O;?-Sj={nA@e`bhNqoeFuuXysY<9K!> z!_GW!ytS8CZ8dawC8{RRUwz>@d~cYazE_iXDr_2Zaf^N2zq`i0`-+ru4%J$Kc6<2r z6LWNRN@OI-HOBaVeyp1#^=*9gQ;+kuSM;-U!vL;hnXSPeKX8Dp*KDU>Okz!*55B{) zJ~xW;CV0y|JNUEzJgUiLw?;o=#i&zPy8u4rl@;csr9{%XyF{a3y(m3in(KdOX1dCOvqt6Wf>=$ zBa&T2^+K!8*Q zfFpt~SH;i3ch5ZX=j} zo=$t7io20Q_aKwCA{{+9qo%)#mU+U?9rzu45Pf?==1^8PJ>`?(Xew?$t6~tko)5>?HbP1N-9NbsV=@{mjqL)7918+8nNeDzCG% zlULsLN{${q%EZJ3ue|G(yy~vI=MoR*|lpI_uY3NuIs)uB%&{H!J;Ty)j8y(&kbv0PRFO8&GGizw(#bg z^Q`mE^ThaCw)f2Qrkf-OPS$yBLh~bcMEt^=diakIlm5w z`OJZN?!UF0{^AUOb@l|eUULIdSnl7am<=5M=F6pUKFiaHo)_p4)7*({2EJ+bpAonAPTEUVbHQbpt_MZq*kp! zeh)?#=&nDBCj*QuQevE$)1RlT$Eg*zQM%#%1YLJfj>h3DKLXJ_`Pva8DHy!vC$amF z!sG~bHAH#OPtw=%3S2vjKK$=+gu%~kqO|XqiOSc~IsZ*6`#*^CkDyf-6xN_j1r?r) z8SSQlGx=2_nM3UO_xQta#kCE>(TC|U=P*{$SqN#&K7(uK;@w3WWH^J{7$FR15mpen z15nybNB3G}^%lhBSJAG*-TpSz#`~#@0V3mIY}A(ISs}?g0kzr_ls$tTn}f-x5&hk` zwt^me1aELJzPBYNEM{!Dm($o^S&wEJxPH|-^;vvV2&4!Z@FsD3&T>s}o{|%hb9~BP zh?EgXfvY6H^0v*qdaI_PV~FbZZ3P@V$G6TmkixRHFJAnGaGVeP_%>Wo;h&#q@Xuc! z;r4AMe&ZdR>GJA)^949E+TeA29k%yO@Kdkt<%`eEaN|~w&82e$w#?l0Io5C4OCy}4 z)6*C$I9(aw_*j+5$R^aUt;I%A35tx2&vWy(JWrpPVZNT{$!8|nzki+^Zg-ldhiz>N zr80@o(x>#wRoDW@X;ruNdY%6M{@B9sD#3B3lnf3I@ZR^n7uRu{?s+1SYlT?d%VzpY zX^AR?jL*dilz=EQ2qkc2!kQ?(T`LeUtaSWGi`*1o>6nj(Da<0M~R) zap>>@{>U8ff9o1XM$WLY-z6vN{QZ|lnTmRN^G$Kv@5$2=$IWwkCdbFW;qlH_b$R4mrYz9bH|E&=u;y^8h-RO8(Ej1W4a+Y za%6;Q-(~Fl0$aKSgQXghfW6nOC9h_gtQ9ylt~qe#AUAJV&po>ol_1a0|K35g@(68L zypboBU@Ah8^PHsjE8{UcBXOizcEe9?LWKY+R)BWJW$cP94KPBGj9kUS8c`HAz13Ua zA-=0Jp%#!z(b?I#sF{-lV`B-Th%gK>#-OAk6^lq2=8jSbN6-i(f*?Y;PIGLreJ(yj znxkcsV{BFy_F_TWv_bYw)ht01VqXgW+l%bKA|rA^Yf`{u+#rM?iX!}U5+OBqa$U-u zU38Q?5gA>7^HP>IwHLRUjoA;Dz*cv~J#6hWG?e7vXah%eqD08g zym1pY*MoF@{_blP)|47_6dH`3?MDmCJcQswu@yl+I#2ESzoxvifXxkHOn|MPK?U^~_wC*eP7hY7 z_&Et@@oLXf%=e?mzl!i@K}l-meF*E&rAMd+Qy3yB4A4-0Sfb>a6(AgBW0GRy2t`>S zIQbx|J{FfEltIz)ae_0{y>*F5j>L)1Q+eSZ@bqbv zsbCtTSa$%q?iVrkc{un#Sor1#K}fp&Gw3sinXfk}-uTNj3fnS9N7+A??4nv#CBIyB zTFtuCl(4n~tvY%3>^!&HGDRWrLkDXe#^*geA&^8IJa2er0*VS6xpBb%uff<+j*a?E=?%Cd!jVb%kpWWFaHL|WWZAZME$7D1v1Lskr$)|m%l0C>y8AgE zgb64m+AdMbs&7@);*x{2MzXZhxdZjPLuVX(8Gt^Jx> z81cuSp5pT-`?+>wnPUsvd495tQ>gQeX++&91;6zvOK*9KcYj}*KmYV;YPv5rW-_AntfskSif$Qm$$~-O ztp#C`ZL-X1vMdDDMIWVsTc}TdjiQ~xng164V@Ii|wYa)MDLRW|YE;#EJ3E^b1fd;qJfL?~qEMjE0U6`qJmcaBAxfU&RqSH#dO z$nE_ljO@VJDz2=Nvy!}~81x$W zT2V8wWlbI_BmoLvJCs$#uf6LIe&%<-N!}rzb2pa#hfedMzYj4+&|9=LG@PnKR}Ome zIfQjMJzk+s2fc+lJ%t9w3eHbXvc5wx-snXN7hx@~tn%u+w$R}k$|7WbA>hDC%`5i0 z+`V^z+Sxpv8+;x(*THN(5pKzpR%xKg8m(N3puQyG5;W)Itp(3>X*B$%%HGv#R?=FN zasT?zq34_KYN-H1Gc~d)bJXCt3J*&dnxqdeo1X193E*wadlS!CD3#2lTYJ&TgY@2| z)qmELV6mCZg*>q@M#~d`NVMr?a_*7VoIZV;t=qQ6$0}YnJ|s&Lzu20&C3n1X(j}OS z%KZB0%5*pl1W?mCjP3zzsf6A9+5;9>YK*mLyA=yObxQM7e^jKeG>7LHX8jz$^Upew zlkEZwIcE|pJ>K)-fXEhcsPo`c4T|}LG{S56)sF-?(hwO3qC z;Q1ruqI2Y!z(|iq=XJRI-p`D;1!Ekl3$rvLg|-8T+%Q@ckustdb)vTZG_^y2M2-gK z@Hmku5xLz&kxQh?$oyK2_NmUDrw~EtmJvO7Q`kI8aQg2N!4zko`A2$g`v8TlZ>F+v zl)OGqNsrJGoyHg!mgN2}rN1mDD<_!v^=aEuUa((9Nv1-}UjQd5J4y+dm{E%jMBJLM~E; zx*FQ^D{Vl=7{({Y6NNz0%03eznKa0Tp}}<(dC$R-Ce{MT^u1HEV^;C5$y;q!dU-^7h;-C!Q(P=K z*+jy0AuMD%^`@BwS|BizMP8dorTNvj*J%V1C&xW(X#;bU=jrI~2PaU zZyj3(8%)kLICgrL+je)*k(;7t-3>hR`~*E+CC*P9gg=ipmi~cm{^G$Yd{b;P4py?} z(21kus?ZYqZG3K4_}dg6O)B2@QYy7&Du2naj&5owK$YFMX} zaO=OL(7gkC6A<8jP>0(ee5RC%0NRsn%%=<+`7C77J;2Y1M<@@)QDS06$ zPM|V&M?p=IX_h+kYy^ShtEDT zi|ge;d4P)o7J?#QITBK+l&MdgRJhH}b7-e4gz)wlh07 z$F6JF5;ey0E9Y@!M0Zi6@&PK|!7uuX!sw!v{C8wjZk2irOZWpRl4&hkN~)bVQ|Y=3 z&=_G5BG#gpXap^t5`>~r+(D4v5zFEx&$1Q^pmIsVA*giRjOe%>Yy6lar7Lm6Eef#0 zqtg8vqMp}aghU}|V4-wNtPyDiLU`D|H&C$^q;L?{B@j6pJ#Wb*lNzf-OnwXVxlIZ3 zqESe!bP+^Y6EffXeJn^9VPi!;AvDHlqTD)W2j3Bs5tU2?f@BhyV}l!kL7H@=)CE+M zT$v;bH2A6qhF^=eHmTqZEez0?Q+&i!qFP~## z(xFy$!Ew>n;lF&c!e4%?LA7p}n$rw*k72CKOi<+aA3V#Se=cIYYB^uY^Kbs(B;|qw zc7}8H9MdzBANlYYyVkq(7b0eAg6GE-^+u7%hCDIm^3(t8G>9B$XFQ|`nT>k+zdpXe zKW@rVaNzW;;`q41bG+EFLg*NFGcvyUNmU7g$OyiEDB$^HC2qbgAG0f?sF_b?J>zZL zZgmNPeU@^YimBuh?!^4q%Ja zgO@>v7`eE~RZP@8 zMbgMI2oX}M|08PlA*9F?7Iv^O_#SNNfUF~>gVhF$ML7yeNW5Yc(^XOSKK_+mY#?>>BIGe%g9 z6<8f%t-{J2LRyF#U<69$(Y8WPPE)T;(wm!R`rL6k*1n0rbU^h0uBy@~--!`jtE<55Yy+z!<`zO4 zrWWfVr$RXCg>xKoxqMq2YgH1#tA%+p57XS-Jmcfz$vqWxc6Q>rZW~;GwQmR}k-l8a z2o<6o!jMwnIfg(R!caHq<*`6b0Y0F~7kx&{*&vv!#)^zqK$!ks zBG;_?IQaA=+Ik2Z0`2Fgkv#a+0ODywHBzhM;H+wd?kahX7e6P9k+=)JWCpo2i3i(C z52Z|WFtRzM)ir|L4pey)wec^J@Av}wxzB_3X!O1VZ~7lmwnC%#Z4`qO*tsWYXa~1r z2OTqyLO6pg--8LhiP8?|T=hKCGU^ zDefUq*WlHk#JIDBfnk6nR4X&s$lwj#jhOz&cvP=2Of~;{aaZu8OQD0fYLSO z)Q2d#-PGiDC}k;+egGj|Duui0aOVhS4-smEJ8&=c{2fU;v`7Fft8&+bn3wZxND0E1 z`KJj*E;bCoqAm1w2{x^5&{wFlYd!Rr>TF%3SvMf*EW*uO8hrolIeNPkJJyCMA=olx z*}B%@+6{t|7jV<{TUgswW7oQfb^Va@EW0)YDLxzom5nXvpXFjB_Jj3RV9-D>?8~c4a3qI?HN_3SBJtc{;l55vT z^mo~~H?k;Pr=we}AW4}eai#3>OU zXR++g*XjZ)<&bwBlo0615QY&!5E4bPcR*@vV@#a6V|WoFI7>w)nm;qCRkBR9@-IIj z&Z0k3$`|3zb~P697ZStKE2g{RiXu^q^p2Y@h}i)nyL7oVf1L3@uotkR*0D|Ka+anh zd2gj5S*SZ0vnq?TSPbVyUjOgpNS(cat?5NKIFZ8Rt2;BXsO4pAmB$I4PIT=I zEIdiQ_uUxxI=tzR(5Rk;@(z4&7nHUVFhHT_Cg#VU#rSn-%pfXj{(*kEr-A8@r~syD#z5w(URJLQcm=GK%)A&G=$9SZ z5k_3DL4jSyWBj`b-CxpL5tp{cLeY!pzKcSB4U@A^;uhCY9oru}%|#x*+=wqW(U6wT zl4auDX+oXH=5{bYdm3}@A)KB&X;fYy5`$Q|9qsHUG#aUfsMYHD3un+_C*_`%l43S!omTQUB79814C<-*3od~&xNKWDn+`;77NtB2v_3feFjH!#pCaMLiG*0xHvEPk25`bl45BCGqdx| zkDcPPUtHkEJBDy*U>2a)Jxyi0Mloj?IXOWLs`X)X$RiIwMHC5+p0&K{6_Owd89m*_ z^yCPSJu^ZhT*p%fj_~D2;l8^ph#t<3EwE<&G>)q|H(q1Sa2bn1BN!Q*rdafF)fkcA zXkAac{3X4Td9+w0lrb_gLcLbQT1&B5q>wK#JUske4epmh1QLKB_!w(3T9Zpy=BbNb zI!m8IMPwMVA>UfSJrffl%1UXzVG`PPiS7U39x|-Gz?~&g zj9m1YBK~7aH^+OTEZ_}p!N^YBj`awc!*_-$7B^9s4juhDlvuzqRf;mt963rGZbH}v zd^wC$s$fM9we|<7h&;tUiLlopM4hhADpq)C*-38GMyz%4MGwgM~K+E-X46kRQ<|AaCpnNAbcP(07Luo@7*7}rsx~U2%ZtlgXK8$Uk z%6rfhDQ{RuWA2|A8mKd0QPd&_F@)mO3sAY+O6Uryrdm_tXS^-2*&z_#96kuHgV@XCz}^8>iq2TuOZW z3n2?^!xIFt23Kizf2oQ z>r}x`Ce^z{8=E8n7G$C}Xj)-I2-=Ko;?K(RDNNf;yiy7w1=7Z|_^rSlLjhXrW}r%q zYqCj4Ho4AzH`y^e+kStWB+leqU(z%37diNBkY7z;lFL>STqH2G|H?|()_39&S!oNf zqD_stb-qHxCO+TwMDM}}u=;BK5?1pvaGO}}vyvZ1ahv%kD(d-%@jSAhAf{V%Kt4~`>9o~vjHuhL@S*Ut^{>z8hv$vOAsesl|+r>;( z)2?Wc&jtdmdH(t585tP?AeZwP8XBaxrzieCk;0VXs;uPO+75}YxZ^mOFp7^q3F1OZ zFmaGJCS`*rgie`C<+jigDHX=V^Ql_LL_MWqLR7lmjExlv)3%Wm5}`!mSFdm#moSRZ zIzASaOl)M=PPz#@QX-9P-sd0)P|8UpcNH>C8j{tMP7_?nK1jdZg8T|68VC88qn_WT zKMS^*k+-C%1>$8v!n_n!$QKjTznclb^3u#{%pe}Svj!_PLd26_Qn;}TyvbN*$CV-0 zhG1okf7?37XcPwFILXgqMzJv#YZ5Jm%Q$~$wNLPBNK7fqUTeKJXdwxWLMPuHSMp2G!WfOwSANs>1D_|Ke3DwN zhPBY$)lH#LV8ezD&Hk4Z#G{AW#G2+|6ea;$f>fCb+a?A;qFD(y*F);t zE+pER*lyR-2BRyPjzns7W>iY1Zg(QC#y!s^isHF_Yb`p`@vOZ~OllJJB(3t(gh5BK zNszS`*Kv?4MibHsJrzw_Zt5bf?CkeIC2z4y_k!=q2*2__NfEoy*{dZHx&q@mizihs zmJD1pjzxUwtUl| z?dMW7$Ca|lNBTLd;EJ@Jmr1YzX$3~>w%<1` ztbp#st%%fx&RSMGPxR6Y^2Rn3rGI^`uy3Y*@>feGE-oxcUlG#)0B2%%%6G}VFiBaE;J z>44D)Bw9;Aqm*36Yfh{xNpn`NrY~`kh*qs$t=BNxWM=hD(^H&GptpXrd^@=+qTZqLhq+6DWM@tp(<#>9R z5a=%}Dfu?9?ZwE$((je60B8lsR0BdGr3W_NQw%!xVTed3hzwRbv2v%8SQB8JTx_1E zBZQ7oMJW|1iNy!wUDkOx7t>8&-Ni_PYjXb8YBkxwlW`-n*cEIA_#h~-?@o^q{=ph=7 z22x7atQo@dT&}zBx@G{%>bPI+u|5cbW>6Qdi%j78lyRSWYHMv`GM#c_t@m{FEHSas zTDQ%~$FRO^-rHC*S0*rf@_^ZN-n~Vv(;~U|30FX=8LMy+OR=Ux|0Cx<=o|O8#nS1AIHkbeH;hm;3!oCPS7(Gps_0q|&wq zi%A&yCYIiA@-R{!l82B6j1NX3WJestmBi|JuM@&e`tTtbg%vsMWm;J|mlqwDV%{c6 zW(-iT*OT6ArkXdlEif;ah>68XK!z+QmO~s?>joy49<~r*v}v-Y5$y&5ttw6}yFk+5 zk@o7Rrly&jpQEd*>qRZzlsW$OuYZllAA6iAijYz=FfhR8&70Y}W$O~n`jChGLy9xD^;m0Rr2mAh zjDC>x3wA?fcFA#drvV3R2tooMn)KqVt>$x#c7 zurfBvkrreG#z&|E)<$4MtT70!KqUQq;$kJH<{PYO6*jA0a%rKu`bW53p0!=tw#BZO zK%>#f^r&OVysc-wMD8`)GtV;PTmMbpv+R&8qD_QETp%NB%F7~}n|rIRK$BRP-b|Gh zH_1G&S;HEwBW7o2*uQ`O6{*mU967=#ANVAdY6T$#gM)(<3I%Sx^)`yd;!AR$Gscq3 z=bF!GTE({*^Rfw>i-f%C`#wPs5=G=l001BWNkl*IF4#2I4vpK_%l-SoN^S7 z<0h4P666!)qsg5}HWQg7c2N`&h9N-^5Qbp}h01PB8Pl3YVmD36TT9zKeR{H4iOG~Z zsG}&`g?hP$`DN$iSB5@chVk8GG$2y7#&i-X+1G+FXsfuFlP3Mn1^6pNyj$rUmOG!t z|K9qqz#7p^#8Z?k4Lplg6EAeVuEaH65lT)mleCIYJVu(zuLh6`EiG2Y0S7$z!zM~r zxESqYRUT^tAZFWJB-)51)`XxW29KC+zvvt-EzSBR|CaGtxl#~^5VDyuG#ZWO=4>++ zeRgrTDdlBG!Lo)WZGT_9p=1+{%qEl7(_a~CX%Pfd$Tl`QiJ6ZG8y`0o&+{^9glPr` zYb{|AHqZCO$rF6!&pyJ?&=5O!?u>(r5UYEpk{*pfFf}>FM?d;E9DMd5K@i}19z%me ztX;c~oA&K%j?TRlU!LnG`t`PrThY7_BBpP8Io0gd2SI?*2G8?QPJB(%Ae@TbG#U9x zqL|g0*HJv<9{2NQLW&d3mP|HTaFj|mThSbkOcNArY-x&2Z6j$i@nurcXD0*NtvHw8 z2%8DvOIY!4HOSOTN}1o2FOiN}wss<8u8w3UOOS3t{njGvmAO}Ku9PdW>{FpJA+S;; zHa03AtF~c+tg2N3xZjR7*Pa~MCFrjph1(?%PZUX9uqJWfOOU{%vM16QtCLBh5ToPD zKmO!gJX&u9kgDz8H17}MqEl^K05>7ezr>(_N{Cpfso@0P9#~9V{9y1AY|;GWm|HXsq0+~&)N)JA8%ym&!6Ym zv15#ljU$9$=gyrg_Vbr3fWk0jYI2JI^(TMAm%j8xDwQh6ScZp(DV0jR`qi(Zx3Bl5 ziE*Z~2kD%A6vak0%NUfT_cp%HG&vIuzk#EjCWN1|*PGR~OqJxqw$ZmH883nKrIJnJ zT}ucjX&ngJ60k|uoiT~fjZFROK}wSZ`gndnHQi}b!&+4fBWjuFy-Z&#kI;#i_lYk# z&%_gaC1IrR_H&(K*V`uBG#gBoht@AQP_FdeIMEPsU`HTq7$@XHf^ZYjsYUSpr6io0 zFtM8Bzv%Cnq@5rY2nV5x$^5%+YGuULuL2atYSPhK6PeiV)&Xnc^L1rxg(ghgnvAqT zDjV;2kj&Lz;QlR>kT_Q=yAi3lgAg=oHL8^g`9hv3iqKIM|64Zb39)2WxUJ3>EUkzj zWBs|Pk{7gz;)q4#cx~@ZEAcQS@Pl|!B~t)W#wu)KvzOTVoOoPHH8=kt2$Ff}*llZY zaEON=ewa7ke?MRN{1jdzzn0EI%(i!tQWAzCz8_$X#mjk#AWo}- zPu1sAf?h1F6HiRUid3SRoMjU~nGj~uq)i}OV|(enml@NVMB_f4x`=)4ZGtRhVAz#R z4l-GIq;>3l{qhB?69K7}V&C^lH7^t6u0>gI6Vf-|#yemyv2iikgu*j>bDF4MXg0rvw6QGQf9aLKNc24< z(i*G{5i&^NbDba#NkwlIO&xwEz?vi`Dy)e|=B(BjrJ5sn!uUu)idgLqv2uWKYb7rt zyj;4htyuYj*yN`9zE8DQLrRJ7`!xJU+s4{l;!BJ?76tYtJ$TW^rf-t(GU|5e*m0X` z+2Y=RlC7q2a18s(gs2s_9#VBXxkzllQE?LB`#!Z=ol~byasK=mb91xYd+$BG{p~-* z{{8#u=;&bo{{7TyHFodbO;1k`QpSDaxN?se8#~Y8Lx^7qkL*t0r)T9+h3fJh@_POL;)tMgY=qc zLmcb_jB&wAECwl5(hde?9E=I#`FSO=xCp6ZvYys}i!l;uBdlC`yuGbYzv$=A>TcJ? zS?pO-UKmDHD-}G~B@9BsAZ8EBB#Xd~ceY ze;@1DucxP{n>+8ilYRU4we@_&OAY3<5QagEs0K*o5b3B{wWq-{o0Vs)2S+(58H?5! zZHPpwme;E2v(pw?5geOwlS`*QfFyY!aSzE(eZ+A8a=zJd3qZC$)7Vxrni(T)uDM01 zi^$A{Xk&0+&e1E?uD2KHQ+e>hUMI1%z-~fwtM7wu{n`)SqIJCt$|0twja9IEuFPx+d(gWJC^4 zGS4m$|9^XL9&1UK-*tY@IT3ex_br((U#^u|`|hr4S6Q@iRhzDsjg1ZNu{|@4HLbP= zW1EH|xW)c&hCI+d~_#W4pVHUaPy-u3TT{Tkall z&W}H0zc=ps?#sOS>K-XoukI2NH{zW0+rGc=*Na!P)=}G=3!;F4IOQI@Hq+X7iO@iC>s!ihLw^+wp#{3hsZ#NDA_ybjoc^Qm`PJqG zr*poPN~c2F`#c1`Q$tZ6DvRn1USUq;Bu|l>WC)@26y&m0mJBliUfPTftt5op) z3PI4vh8EBBvdOO%CPsK$YpGN!)N6HicXvVHbFY1l>({UI-q+vb;P8OgUw@s?e(tku zZ*OB_K^9{?oV6)$y0w<;*Ke@5cZX`V%9SfuIDLAXQ>V80@P|Ll$3FJ4v_5^JhKdY( z7)SI;y*@$LXW1digm|>ZV33#=*BD7#*rG_iJZel-<`QQjp~Xx%>$TP?m@-nnQ)m@2 zSLFetg^e|y!Vy|OTPOK?{-|v>?|_`-k(6c1o_$6K`aZGbxjkvnZiGRY;e{4>OXdZT z7lL6KHuKYq&O~J_pDso$RvDbC#r5_wXHTU;fQX=73|uo+(A7lWCAJtzJ8jGwSL#5j zthZdnWjK*c5^y&9)1o8f9Jl~&6qraLOKXiN11>-*53T(e;9J4jUep?=Ocdp-s18rX zpxMBM9kdB?qA|)wHF{G;5rPxEYJ(u`X5c+Fy>sHtUym3)973zr#P@xA-EKsV@%X>RVJ7S`=`e7A?va9clti?ai221Bouq9!+~wBIDK7Ps{{;$K2sFLq8k zh+fNrS4Bi=>(U}tbf0V+bOmkdi1r{1(VmAK{w~$E)AaQPoKz7p7{D`zAPAxkA;zcf z^J8JOw#kG}PSR{P(Mr?pb!j%6srr|-HkUAwayj%o7rj!$ob{^U80Q>-C_$qs=dUN% z*PpPXg)!Syq8t_mA^o6_O@&cR;W%0wJm16gG|gs{Zp=aMc^>t8op!sO8jy46&ZiWo zPFzV;6GyLcFSqY|T)KFP@A=sG@X|{!QK?iHA{eo3!$E{fjSYeFRmx+Z)Xwu-^HGLR zrM>S;d58q5NQ~$CXk);U>u=9E(~Hq~bi9OiHtFgRih9k~xfIZnZC*tOSjCZo%e7W! z91K!$7={*MEQoVbBKA>ZG`e2DPgC~8b<<^`{vE27kFk4Yhi42eRf|1M$vvpS9H6C^z`Dqd?pP%^oxg>EdFhPr? zybeRZ;RPhG%; zWD*vWVn2E;pJtKLE)0f9T&%MgF*7?-Ww??2-?Njx#0E`dsz@=vcPpS~?_l2kIX2$@ zNsQ|tu7=YafQLvQ?Fpw3{vEyMN9mou9AmW<4xKA`MYF;|RQpy28}_K){6+NZ|A=aT z7i-R7y;Jy`k3i$$2)sswR!6H`Qa_t-%C)HNe*rJ-5!TKk%1jVX%J!hdzzYsv4sh+a z37n;Gs?_TZdM?C2_$<}W{f`*eMlSuA*vJ12p>9Nhh;(sp{5BBFb}e%3-oR5#aJr$R7lu*;vpl>C4Z&wWM>Zov6eebECL5Vpx8`6zX6 zu{Pl^*96X{8_6lpP3V&^cV~p^jZaa1 z`=`+Eke)e9|It59^UNz)Q)7Mqck!?OTRK-hLAdp;ncjrNY<6+d$Eg@prZcC*r;EM* zA8GX8qgj0%{SAMWo11@viteEZkp$xsT2LVvk3@PLWg;Gnut}x!F4oiu)}KI>AD3d| z6KfNbx=Csi5@#o>w|j`v^x43F@=vpO`tv{gB{5+!S^wu6Fbctq- z!|E07#<4jE`OW?f8LPef%Ac}BCGwWcJg`6L=M#$dEb z!GYnLTP0pyFS7Y4j2*Yr(`8rv^4w5VuDc@Jh()H`z&tTtu z72`Uz{D*NL_@CI{{tm1ttgaF?9!H=3FHpLM#$uF0OCNvt*V(-K&++$uFUo#uP5<0? zad72F3D=%rtNH7wcm6g0-lq|5sJT1%@BB2j`f0jP{dGd?vw7`*r}o~zL^t0?8xOhi zZL|+>QSE-2*7}E#Xa5TB@SUh1-GuZvpQqCN3TvO)lT{4R~J z{~!3fzX7%d{yP1$AK~D!zeK2%a>|XpSg0*j9{pydQ zb&GK4N9eA91W`U%H(WZ%2T}Pl#lSQ27B#QZdc4Xu)n2bp5cKJGy9MBrR&K;Rl)+ej zD3Vu$bTw zVVZs`s1W8sb7bv25yM@?V&}x>97P>x3Z(FTMW$?*sux1qIbm($(7E)Qy@Ae%1or{Z z@AU?;r%P|L_*G08k%SrPB73hRM$k`Mw!i#0G4K5f${wJVMZy64wSP%fIh5)l z^=~AghTi{bYNWK^p!(Y1#T`l_ou23;!T?~9QI-SsoEN#-}7!j{X%0QPX$LRu}eMa>m1wo=b8(!YfC3Lr$B&~_h z7sl7%Qa$=et~ftG2g+S8TZBOnL{7;vf-A>4A5%IeI_BG6jqxJ_BhQ~gkp)Wn72`xT z8<$S7hq8P0)hYb#r)hb0T<2ZDizD8BLwf-S`N&!CN0#u?%M}{|2 zGP#ZJSdDxF2T1A~Df(FJsJ}2r{TW@XL|_6+C~R|u^t8PqZ{!Q<)d5cX65}5!dOkN!7!_B!&pzlXB>D6P>S{Hyd% zKTqFk>NkE9b>~e&eFoFGg4hVaqe`)rhxTVs@TJEW4!$1ZiuL!4eY4S*=DG{8Z(2kfqKCN{M znoL>qa+*hKfaZ2Edti(z2-;+b`FveJ(HaQQakqr;dxW7)rEx2jN=mR(*8iMbn=ytP zH*T=JZDcVwfs86K$=C#hil52o7P{3)_JAM^V%g)Pt@DChEk}@=ofF=ZD0q6ZPv_QW zsa$-UZuLCs13wn6(f{;i)FE`ev(%NKgI#cz(68e+9wIb5=;hi`&?Ng~X=z0xY z*hNd1p4mokJ&L7{e zp0iYR8`rsk*gm21X|qAMXAq}2RF6lJuctpiLusUY3nd{PbB?w32Qd<0JGW5;Aa%mV zV{}3fqh0(QG)gQr`3hC_^@z8=^HuEOtMsZLq<`u~>TmowYhU{bwy*pV`uqQm;N9C) zAO5e6Z4smCl?pYtd{qv2p99j&%;wtc7GXz_i6ErVkbu z&!;p6pK!$Mw7d}+eTPUG3DW3nZsapRNn}6h=p`RXTl11qO%4c{H~{%td_uoj@ik!> zb&_nu*((mnC;4DFPi%R1nN}{d}tLia@t)}wM&vIe=N$j=X#vOhUg^%?% zsnpJ)dV5seK8`BE`t#7fh9`Tdcm54)=RQKW@eJrF%2&#;^}wTSUHVq&2eAJJ?0gIR zYcF7=hYk19N}zHETiHQLm)^lu)&S*ttaYwaIeeA<&5shQ8aRh`UF@x2CTx8w*0pi; z)q|LGKZNt2=J4PR%-gThdF%gSU2d@3WT5Qzv6lwY9Yac;Bh|%DgD2e1#x+Rg~ zY7CE7vz7L=hoT~6tIKrQ#Pkp^YMhxHu<<5W|CW zNaJ}?c_wi#B*v8(&qHaI$`@N}BL%&JgmUaY#$6Zt`+G+QT-|d=K#d3D6(adq@eh&; zg-%_n1e&vmp;=S~}yYdMB)^l`k{RgVSb;39Q72$ioLN&aFCjp0*C(uv*C_R6NHT!kM-T~Fe zdRy4axd;%g8o135vDW+zvdKIw&Ywk9?t!>P+A7!iiCB!zPSzA9x z;GMxb50r-!i=t2X)~nd7{~q(LA0jmMc;0JV#L~Jw|Qw68k|9&#$KP*ec=YP^#238+=OMqzRFTxeOZ1f>D)35KzG7 zT8vXa#p)pgKI*@WK-G_)tP#a*qGF`d`nYwLAPgd-A3G~9 zCeBH76Kae$_8_I7%({<^aI$3BsaodIY&HunIKTPw>K62+ZDz=I51i{eu&Lh3WpL5uF!e;F0*LeNB+I{OTghlv5z5u~YyLr43GL!qXokG~bWd0Agr2%A!TlGFRY%A0~Wglh$wlMb@?)wWmKp ztM(*V$x8CM#3#9z8KFW4;Ek)9<)mIl8hH`H;qbJ^D2;OwAAAr7QA86{kK%GNfKCdI z!zgdONW`Otn4(mH+ny0LqQfEtBE_ga{T(hJ*+mhh4Q*;TjMiyib;5fd(W;u51Kmsx z2Ids}xv$gp67|Vz9nmG}LJFLij3NOvX3kTBR*!1ilF7Qpb50eUfm~!M6E9d3S*%|HCN zP~vdvEPaVYZ@TNxvG&rBv+k~=ou#EO5%{N3c=3Lt(YAvQ+c*?Te87ts^-QF1paCO& z)}H-AthY(4dOq$Dik#wMN3WQ59yP{B^2t{R5yHtvo2I*Qpf#0>PrbfI7>0PBpVC?e zvC3E?(-}q1jPyJTmpobJc8&?15Sm01fzA!|}}2g@$kQKe}frl}uJZ)(Kr=HlPu=;|sR?_rE{ z7~93L8+F2{ZJc;mv&Iemhtd?bQjV_L0^=~!M%g`D`ZUeTne5rS9{b+IQR>?W+8&^# ziKqr`&e8JD#`PnE4lAGnJh_Q=hNikm;H~2@@iPTcHCkMvWgbfBMsyDY#3>Mq#zkze zExEthz8v4K+~1PYQ(IdmcI zW|RHHLpq%fK@d^dI_F~H7L|&TB)!Fyk~bugc8)%D1d@L6{j$$w0yo77EJ8n&y3xEb}l;N>!=l>zB7-q!ADS-t#h$ ziLP>@3}@SF+wxm1Sv)&OyeMn9O-+BV=p>k}GP~ z$K3ic>s^$nihiRQiJby9`k}_J^}4;2s6RB2inh7+ZFBddgfM$SlU7aPGAt^RpY}Le zaw#L|l5~B9^YzCMwBH`JGLfd_2k;S-c<|FYF*#{wEZVhm(9M+TBx|%wa5w{^Xi8)t zQmvvz(Fk87xb}Z@s(y!qeihFBAblPMZE+YheZ(o0@48ILRy%$0Z~PdpYst-m|l6qt$@2tfV zvjCp(Q7i|l9PyPZM8e76izrR0MW_DmLZ<7;7J4u$U-0^$jNMXsO7-}UC=yOzS2CPM zk%GNXL;NhO8X@OJ9NP78YKwm5EMfCKHtqk<)~&xoP`gOidyw`1|BI?T$lm?~Y@TV* z?(E{*>v;Yq&E5ZvGw!zuone3D53||)Ej;%n@E&HDCur2}(AQ7lw|*1V`V!SkFB9DO z6|8=U{^s-as^0?&0c}*+1~O zuy&n}K2Om35*1G4nF~~Rf02g#0=6^ zpN#L7jrYzt_@PK?JF2we1v&og#K4Ob`ML|d++-?Y$z~X_e)()|ZBwh=q}6KEYPD0O zaTtbC1RvU15Fn3oR;5mSlKRPezayjhJRwe=F)lw@avpTv=Sb_SgF-kaisC)vp-qIS z*2c&44b`eoqtRe{dz;IbFSEYBPA7Jj@(xDAPoJ=^4N>pr`}uVS_Dc-ZxG15pP?mf{ z%Y)}4c`1j@>BcAZ>jXfT+$(2YxtdjY-A4-jNe4ao@Zb@#u#ZqXpxc0w;g|T-I9J?KbxfCET2)`Ot>B?29VK*<~Q! zg@y9!uH2F%c)qDMoVg}HVAy56&7WwGMd)n)7_I$ZWux~+`qdY3?g9MnXKC9$-p+TT-+L9iCfwQh4$j=U z%HioxvVZ+QvF-2D)2DFW7GAwavwwz~dylsHAS&>X&>&uggSBtR>GK%xB31n=?ZYp# zzJ3XIuhL$>i0DnMvGf~1id{R8zyG(neb7Rm`)<0rj#u+IXm7KA`nzb|>a*Fp3c)5e zY_WUpM_B9s6V`)w+4mozD>g#zV?o(WZ7xq?o1?Uq5s%)>%Rol=a~QakkLuHOriv(D zlPJAft@6ktkFc?^&RcK2&5aw^+1uOW_U+pQK~RW>qD?)~{2m>_4vLtRP6t&?w=!f% z8H$<*-_O6MW6D}jd-%Se{###P=hW6IcFyjwv$Mmwv*)-Fi_=spmB=yBdgubn4SWf8 z zY*lRBrL;VhWO)!W@tc(QK9k5DuFk{`ofB^D-dYZH{S*d#HqQENmby5Tm$bji#I&<4 z1Q#*VBlMpokOrbJ)2lsAd;MuT`&aR#OStyOQF4yv?rZp4f1JK|9{f}6 z`hS#A9>V%v_Pq`IZWG(vqi-A3Y?V9u2TPsZxQT&0dMCydg5^Y8N}6SR4;KT=TP2LsH$ae?``zjY4-eWy7fNB+l1;jgYS^= z99>@KP(MO9d=K4kV>T-Y7Q~`N$7RQXpd~L*Qx?g4o;Srj9+vA&jC6j(Jay_c7h~Dg zZnqmN&;{)6?Q`qaO>W-2#m$>HxpU_>dwYBIdcBw}EG(1&5+@){jm9*ll<%3vs9342 zmacUqMv(l!(J9+lQfoEFQ1L5xzE8DUVRLhnQ(LDvd+sdT+uLk!pW)1zZ8kPGs8(wj zqjNP^Nu#(lrL0D$>IaR}sGk3mHahK2w1LFE(w#C*Xifwkt*xGB7{mEy)5~mJK%B^n(aU&3A2vo$J*YEdE&V$Zn z%p||#Cs+z4jH^Now(B0HJT$o`Kh1IURHkvtP&o#a#b^YT?VV{!W5x47J}YE)Fq^07 z={0Jn9w!uqS3es^`86CL#5!o~yg)}RmF>p?M=0xPe7v;>u~gB@LloFE--}giG|qgG zE`7kD8zEXO0ad)(v!L6AdIND_*IvY{KM$gDY70j|W&26G;$t>1;;lbVsB3se2;2tR z820TM{L|k;Uu|MGo!@?zg$=*Ln*9oE<_`P49;_Q; z_J@L^C4;H@5%eH2a>AF zY;PyNuJ;uAmkiA-@48OXS}g5XRjMV6D=j5A)UG*ZrwVF3tL{8oLWuAfC1TDIRk(5jIVj1(9nv)`wcrqk)Lc}kY<$BbjTD+N22 z%;hMs3?9074hHk6VNuVygv5#TgqLR89(ws$gH211TNT$E&>(rmC| zX5d}gLiax6`Ce33XxxN6$~nA58~Yd%fFt_EM{g>lji8!oa~i$$i{6ivq-v*+#+QMl zX88@gbBptJ45ar@8*A z(e-!VrygVEbM0?OH}Yl2nVbO__5DT6nQ5c&L4El}Uc=om^4)GXM{qP4Q4NJg1s*gt z)x(+0+Bn7)UBK)upMWYcbR|(_0xaxyd&`Tr8e``Nqxng5xg5}_j&IjdlRl#tbDlY- zM^fG!UxT0B;2(*%K4ET?;f&Ox4gDev>msmA(`2LWMkJqKOGmzECD-CDW>FnIqAWYc zqEgX|W4Bsxq!&|5&30V5uTo*w+7##*@hapNM#+Q}H_*mYVk8;hNf?#l%<(Zx|VxO>@AqV>JItA)C6 zi6d3Kr_*!CFD8o54E$iCewL_NaxJOH8Bs6AI=NTBDbM>Mqj4z59Le7US9#(C$c*Z} zO7#qxdYSHA_e1@Ca8xQ4UViyy9((Muw3{N|Suwb|g<(ji)1lYv(dl$z3gWCK((^nT zjYcHnnzTulc-F=keBa0OBEEwJeVU_KlJdTy1JKkgA-ZxhRqih%G4$Qqtn{P;xuKP z*y?Ce?s4p&^Bi-U@62;tSyZ)I9DQrgz}J}~k*k{UVg zwJS@{BflC;WZF|&@PXO#K26sT zM%mLb3KgkH)m>QRv!82NM}36}SdGOS`f&zF9djM*uGY%$QQ%QlgP+x4FT&S~`05oZ zk$jHR!O1+3fs%RBxOC(#XG|2Th?pbCfO4+i=u zUh3p$CR0QU(<}bW8da|!CcHuR&NaY^ZoOAWj%qK$6)O*Tn+ zTj#*JNE$k+(?j(CGOSfP7EhGlHSkcVv8R zeFeB%jrX&mM;#BU|5#B4GQ8d=)#a!DjSISPYb{D#X;30AqJj2&f3)FCXmKY+^Ml`+ zCUD7@JIVr{vbMvc8fiIx%Qz3%dM+dXe%d4!8zBoaL0o2DU|m@qt_+F4OvZlYc${zp zo1+|EuCVy{H_35xebOBF`6DiC zkxFnS-e@>HGqiZ$wl+^z$ZtZc*~$i*gE&n{9Q>+*onf z&-2LS?)Ie1Ft26qk6812d3~eRI8iqKWFg&;J<^|&RC5_`=dl=iCS+Tu1v?F<;|J60 ziz+KEq^0f%UlTdfXGCPV81L?)Mw`U zJtOMVN2;YS8ofT4USH7h@0NX>NA7ys_q}CaAj|J?4A+ex3=s0K>Y9vRC82j^<}s6P*0? z{ye|&ZbtdD9%Io__}Gr10fRhgZrx46uDcm1O_QFV_OtoxUK#lXE`Z~hJS%5lHST+d z;Dp!lhmEyN$GN`<1CJc}7-PRTu02g<&o_Ntl|B7IC!y?}(Z6XLmtyF5szuG*W125B z%!6s4rQDzB`g`YjO(*?&{B3J@^IWU38Y@G;00S?xerLAtPjj4wWlwVLX`?vjQ=?VN zA)grj-1keqSFp`8!YB?X&8Z_eP*dOGyUb-TGhN;I<0sMnDw02Xf~E967nX!N&U59? zv@-syu`=}cY71C^fuG(U$dswtc_R1Htz@7VeRK?7^1kFub=mg^TI5CsSOe)1858v^ zY%4qtFyox(kZF+iCC58)z`I4Iw~s6XThc}|WSSRb{6}#Hty2A~G0&Pg&N}2tGW1hX z(as<^BMaE{*wQk$sVOtqgEC$(YZKhnkmR>^`z!oKzZyp{`o}eYPJ*E?ZIvr|f4X%(MF!wrw9t*~YtMn@rYUVIrN(9t)PjSp`)%(# z))u;Dik|Q1g?=~DovR3aHST*y;yyO?qlBgAH89I=k0xnl%w(*m;F+HXUBAq#aU<1O zWxnSy@*Kz7LN`)o==~ezmu+hUb8cJ@x(Yo#(E5etsI9@9%8^-qK$A?r*GEJ-VNa0N$i6yji8TIN1t538>KHl?QsX& zcGSw^uf~!a;bk`TkyM2g)={~v`fMt4#}g?o)*y}MnFkrTCzFis*mQr%c@`3>--o5j z+4|(ibygIQvs5+tWluwAH}-hwY{#Vm=eaj4>e1C$h!H;Kd9>6KeSz)m_ydW#f~VuS zZ({{zrZn`q-!V4&pT>Y6R|a571Il?nSLP_JfOqqnW4XId$*2dss?V>+NpU8o*}caa zjAM?b?|N{!2rAU09AuKAXG)QpNlrmbqBE8Gp7OJc$}S)0{<`RSIw}*eGV(`!NcY;v zj+-dQd2K7_;(cU*r$I9xQ_1dLQol}WEfger?T7%X<3!FMIquD|+bvS0rqn{IsVBu< z%TnGelzy4Po|pf?yIb_Ct9GvCasXCiH5MC*zYo=C<$d&{XH3x#+3rb(S*ht$C2EB> bnq~ZF&AFBHD2m6l00000NkvXXu0mjf#~lmY literal 0 HcmV?d00001 diff --git a/vendor/github.com/brianvoe/gofakeit/v7/minecraft.go b/vendor/github.com/brianvoe/gofakeit/v7/minecraft.go index 5e5efa20..f15daa87 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/minecraft.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/minecraft.go @@ -171,6 +171,8 @@ func addMinecraftLookup() { Description: "Naturally occurring minerals found in the game Minecraft, used for crafting purposes", Example: "coal", Output: "string", + Aliases: []string{"resource block", "crafting ore", "mining material", "mineral node", "in-game ore"}, + Keywords: []string{"naturally", "occurring", "coal", "iron", "gold", "diamond", "lapis", "emerald", "redstone"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return minecraftOre(f), nil }, @@ -182,6 +184,8 @@ func addMinecraftLookup() { Description: "Natural resource in Minecraft, used for crafting various items and building structures", Example: "oak", Output: "string", + Aliases: []string{"tree log", "wood block", "timber type", "crafting wood", "building wood"}, + Keywords: []string{"natural", "resource", "oak", "birch", "jungle", "spruce", "mangrove", "planks"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return minecraftWood(f), nil }, @@ -193,6 +197,8 @@ func addMinecraftLookup() { Description: "Classification system for armor sets in Minecraft, indicating their effectiveness and protection level", Example: "iron", Output: "string", + Aliases: []string{"armor level", "armor rank", "armor category", "tier type", "defense tier"}, + Keywords: []string{"classification", "iron", "diamond", "netherite", "leather", "chainmail", "gold", "effectiveness", "defense"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return minecraftArmorTier(f), nil }, @@ -204,6 +210,8 @@ func addMinecraftLookup() { Description: "Component of an armor set in Minecraft, such as a helmet, chestplate, leggings, or boots", Example: "helmet", Output: "string", + Aliases: []string{"armor piece", "armor gear", "armor equipment", "armor slot", "protective item"}, + Keywords: []string{"helmet", "chestplate", "leggings", "boots", "component", "set", "gear"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return minecraftArmorPart(f), nil }, @@ -215,6 +223,8 @@ func addMinecraftLookup() { Description: "Tools and items used in Minecraft for combat and defeating hostile mobs", Example: "bow", Output: "string", + Aliases: []string{"combat item", "fighting tool", "attack weapon", "battle gear", "mob killer"}, + Keywords: []string{"bow", "sword", "axe", "trident", "crossbow", "used", "combat", "damage"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return minecraftWeapon(f), nil }, @@ -226,6 +236,8 @@ func addMinecraftLookup() { Description: "Items in Minecraft designed for specific tasks, including mining, digging, and building", Example: "shovel", Output: "string", + Aliases: []string{"utility tool", "crafting tool", "gathering tool", "work tool", "task tool"}, + Keywords: []string{"pickaxe", "axe", "hoe", "shovel", "fishing-rod", "tasks", "mining", "digging"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return minecraftTool(f), nil }, @@ -237,6 +249,8 @@ func addMinecraftLookup() { Description: "Items used to change the color of various in-game objects", Example: "white", Output: "string", + Aliases: []string{"color dye", "pigment item", "colorant", "dye material", "color change"}, + Keywords: []string{"red", "blue", "green", "yellow", "white", "wool", "coloring", "sheep"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return minecraftDye(f), nil }, @@ -248,6 +262,8 @@ func addMinecraftLookup() { Description: "Consumable items in Minecraft that provide nourishment to the player character", Example: "apple", Output: "string", + Aliases: []string{"edible item", "consumable block", "nourishment item", "hunger food", "survival food"}, + Keywords: []string{"apple", "bread", "meat", "carrot", "potato", "steak", "restore", "health", "hunger"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return minecraftFood(f), nil }, @@ -259,6 +275,8 @@ func addMinecraftLookup() { Description: "Non-hostile creatures in Minecraft, often used for resources and farming", Example: "chicken", Output: "string", + Aliases: []string{"farm animal", "passive mob", "resource creature", "livestock", "tameable mob"}, + Keywords: []string{"cow", "pig", "sheep", "chicken", "horse", "llama", "resources", "farming"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return minecraftAnimal(f), nil }, @@ -270,6 +288,8 @@ func addMinecraftLookup() { Description: "The profession or occupation assigned to a villager character in the game", Example: "farmer", Output: "string", + Aliases: []string{"villager profession", "npc job", "trade role", "occupation type", "work class"}, + Keywords: []string{"farmer", "librarian", "cleric", "armorer", "fletcher", "smith", "trading"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return minecraftVillagerJob(f), nil }, @@ -281,6 +301,8 @@ func addMinecraftLookup() { Description: "Designated area or structure in Minecraft where villagers perform their job-related tasks and trading", Example: "furnace", Output: "string", + Aliases: []string{"workstation block", "villager station", "profession station", "trade station", "job block"}, + Keywords: []string{"furnace", "grindstone", "lectern", "brewing", "stand", "smithing", "table", "trading", "block"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return minecraftVillagerStation(f), nil }, @@ -292,6 +314,8 @@ func addMinecraftLookup() { Description: "Measure of a villager's experience and proficiency in their assigned job or profession", Example: "master", Output: "string", + Aliases: []string{"villager rank", "experience tier", "profession level", "npc level", "skill grade"}, + Keywords: []string{"novice", "apprentice", "journeyman", "expert", "master", "progression"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return minecraftVillagerLevel(f), nil }, @@ -303,6 +327,8 @@ func addMinecraftLookup() { Description: "Non-aggressive creatures in the game that do not attack players", Example: "cow", Output: "string", + Aliases: []string{"peaceful mob", "friendly creature", "safe entity", "passive entity", "non-hostile mob"}, + Keywords: []string{"cow", "sheep", "chicken", "villager", "bat", "neutral", "farm"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return minecraftMobPassive(f), nil }, @@ -314,6 +340,8 @@ func addMinecraftLookup() { Description: "Creature in the game that only becomes hostile if provoked, typically defending itself when attacked", Example: "bee", Output: "string", + Aliases: []string{"conditional mob", "provokable creature", "neutral mob", "reactive entity", "self-defense mob"}, + Keywords: []string{"bee", "wolf", "enderman", "goat", "attack", "provoked", "defending"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return minecraftMobNeutral(f), nil }, @@ -325,6 +353,8 @@ func addMinecraftLookup() { Description: "Aggressive creatures in the game that actively attack players when encountered", Example: "spider", Output: "string", + Aliases: []string{"enemy mob", "aggressive entity", "dangerous creature", "threat mob", "monster mob"}, + Keywords: []string{"spider", "zombie", "skeleton", "creeper", "witch", "attack", "players"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return minecraftMobHostile(f), nil }, @@ -336,6 +366,8 @@ func addMinecraftLookup() { Description: "Powerful hostile creature in the game, often found in challenging dungeons or structures", Example: "ender dragon", Output: "string", + Aliases: []string{"boss mob", "elite mob", "endgame creature", "raid boss", "legendary mob"}, + Keywords: []string{"ender", "dragon", "wither", "warden", "powerful", "challenging", "structure", "hostile"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return minecraftMobBoss(f), nil }, @@ -347,6 +379,8 @@ func addMinecraftLookup() { Description: "Distinctive environmental regions in the game, characterized by unique terrain, vegetation, and weather", Example: "forest", Output: "string", + Aliases: []string{"environment zone", "terrain type", "climate region", "biome area", "ecological zone"}, + Keywords: []string{"forest", "plains", "jungle", "desert", "swamp", "tundra", "savanna"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return minecraftBiome(f), nil }, @@ -358,6 +392,8 @@ func addMinecraftLookup() { Description: "Atmospheric conditions in the game that include rain, thunderstorms, and clear skies, affecting gameplay and ambiance", Example: "rain", Output: "string", + Aliases: []string{"climate condition", "weather effect", "game atmosphere", "sky state", "environmental condition"}, + Keywords: []string{"rain", "clear", "thunderstorm", "snow", "atmospheric", "storm", "lightning"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return minecraftWeather(f), nil }, diff --git a/vendor/github.com/brianvoe/gofakeit/v7/misc.go b/vendor/github.com/brianvoe/gofakeit/v7/misc.go index b40ba1ef..54fa1ace 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/misc.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/misc.go @@ -1,7 +1,6 @@ package gofakeit import ( - "encoding/hex" "reflect" "github.com/brianvoe/gofakeit/v7/data" @@ -15,43 +14,6 @@ func (f *Faker) Bool() bool { return boolFunc(f) } func boolFunc(f *Faker) bool { return randIntRange(f, 0, 1) == 1 } -// UUID (version 4) will generate a random unique identifier based upon random numbers -// Format: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx -func UUID() string { return uuid(GlobalFaker) } - -// UUID (version 4) will generate a random unique identifier based upon random numbers -// Format: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx 8-4-4-4-12 -func (f *Faker) UUID() string { return uuid(f) } - -func uuid(f *Faker) string { - version := byte(4) - uuid := make([]byte, 16) - - // Read 16 random bytes - for i := 0; i < 16; i++ { - uuid[i] = byte(f.IntN(256)) - } - - // Set version - uuid[6] = (uuid[6] & 0x0f) | (version << 4) - - // Set variant - uuid[8] = (uuid[8] & 0xbf) | 0x80 - - buf := make([]byte, 36) - hex.Encode(buf[0:8], uuid[0:4]) - buf[8] = dash - hex.Encode(buf[9:13], uuid[4:6]) - buf[13] = dash - hex.Encode(buf[14:18], uuid[6:8]) - buf[18] = dash - hex.Encode(buf[19:23], uuid[8:10]) - buf[23] = dash - hex.Encode(buf[24:], uuid[10:]) - - return string(buf) -} - // ShuffleAnySlice takes in a slice and outputs it in a random order func ShuffleAnySlice(v any) { shuffleAnySlice(GlobalFaker, v) } @@ -129,23 +91,14 @@ func Categories() map[string][]string { } func addMiscLookup() { - AddFuncLookup("uuid", Info{ - Display: "UUID", - Category: "misc", - Description: "128-bit identifier used to uniquely identify objects or entities in computer systems", - Example: "590c1440-9888-45b0-bd51-a817ee07c3f2", - Output: "string", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return uuid(f), nil - }, - }) - AddFuncLookup("bool", Info{ Display: "Boolean", Category: "misc", Description: "Data type that represents one of two possible values, typically true or false", Example: "true", Output: "bool", + Aliases: []string{"boolean", "true", "false", "logic", "binary"}, + Keywords: []string{"bool", "data", "type", "represents", "values", "typically", "two", "possible"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return boolFunc(f), nil }, @@ -157,6 +110,8 @@ func addMiscLookup() { Description: "Decision-making method involving the tossing of a coin to determine outcomes", Example: "Tails", Output: "string", + Aliases: []string{"coin", "flip", "heads", "tails", "decision", "random"}, + Keywords: []string{"decision-making", "method", "tossing", "determine", "outcomes", "chance", "probability"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return flipACoin(f), nil }, diff --git a/vendor/github.com/brianvoe/gofakeit/v7/movie.go b/vendor/github.com/brianvoe/gofakeit/v7/movie.go index 27199cd6..67d7112c 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/movie.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/movie.go @@ -39,6 +39,8 @@ func addMovieLookup() { }`, Output: "map[string]string", ContentType: "application/json", + Aliases: []string{"cinema", "picture", "story", "entertainment", "motion"}, + Keywords: []string{"film", "moving", "sound", "pictures", "told", "through", "psycho", "mystery"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return movie(f), nil }, @@ -50,6 +52,18 @@ func addMovieLookup() { Description: "Title or name of a specific film used for identification and reference", Example: "The Matrix", Output: "string", + Aliases: []string{ + "movie title", + "film title", + "film name", + "motion picture title", + "cinema title", + }, + Keywords: []string{ + "movie", "film", "title", "name", "cinema", + "motionpicture", "blockbuster", "feature", "picture", + "hollywood", "bollywood", "screenplay", "screen", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return movieName(f), nil }, @@ -61,6 +75,19 @@ func addMovieLookup() { Description: "Category that classifies movies based on common themes, styles, and storytelling approaches", Example: "Action", Output: "string", + Aliases: []string{ + "film genre", + "movie category", + "film type", + "cinema genre", + "movie classification", + }, + Keywords: []string{ + "category", "type", "classification", + "movie", "film", "cinema", "style", "theme", + "drama", "comedy", "horror", "thriller", "romance", + "documentary", "animation", "sci-fi", "fantasy", "action", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return movieGenre(f), nil }, diff --git a/vendor/github.com/brianvoe/gofakeit/v7/number.go b/vendor/github.com/brianvoe/gofakeit/v7/number.go index e4cdfb9c..d98581e6 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/number.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/number.go @@ -331,9 +331,20 @@ func addNumberLookup() { AddFuncLookup("number", Info{ Display: "Number", Category: "number", - Description: "Mathematical concept used for counting, measuring, and expressing quantities or values", + Description: "Integer used for counting or measuring, with optional bounds", Example: "14866", Output: "int", + Aliases: []string{ + "integer value", + "whole-number output", + "bounded result", + "range-limited value", + "discrete quantity", + }, + Keywords: []string{ + "integer", "int", "random", + "min", "max", "range", "bounded", "between", "inclusive", + }, Params: []Param{ {Field: "min", Display: "Min", Type: "int", Default: "-2147483648", Description: "Minimum integer value"}, {Field: "max", Display: "Max", Type: "int", Default: "2147483647", Description: "Maximum integer value"}, @@ -343,12 +354,10 @@ func addNumberLookup() { if err != nil { return nil, err } - max, err := info.GetInt(m, "max") if err != nil { return nil, err } - return number(f, min, max), nil }, }) @@ -356,29 +365,48 @@ func addNumberLookup() { AddFuncLookup("uint", Info{ Display: "Uint", Category: "number", - Description: "Unsigned integer", + Description: "Unsigned integer (nonnegative whole number)", Example: "14866", Output: "uint", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return uintFunc(f), nil + Aliases: []string{ + "nonnegative value", + "natural-count type", + "unsigned whole", + "zero-or-greater", + "cardinal quantity", }, + Keywords: []string{ + "unsigned", "integer", "nonnegative", + "natural", "zero", "positive", "whole", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return uintFunc(f), nil }, }) AddFuncLookup("uintn", Info{ Display: "UintN", Category: "number", - Description: "Unsigned integer between 0 and n", + Description: "Unsigned integer between 0 (inclusive) and n (exclusive)", Example: "32783", Output: "uint", + Aliases: []string{ + "upper-bounded uint", + "cap-limited unsigned", + "zero-to-n minus one", + "exclusive-maximum uint", + "limited-range unsigned", + }, + Keywords: []string{ + "unsigned", "range", "upper", + "limit", "bound", "cap", "max", "exclusive", + }, Params: []Param{ - {Field: "n", Display: "N", Type: "uint", Default: "4294967295", Description: "Maximum uint value"}, + {Field: "n", Display: "N", Type: "uint", Default: "4294967295", Description: "Maximum uint value (exclusive)"}, }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { n, err := info.GetUint(m, "n") if err != nil { return nil, err } - return uintNFunc(f, n), nil }, }) @@ -386,53 +414,89 @@ func addNumberLookup() { AddFuncLookup("uint8", Info{ Display: "Uint8", Category: "number", - Description: "Unsigned 8-bit integer, capable of representing values from 0 to 255", + Description: "Unsigned 8-bit integer, range 0–255", Example: "152", Output: "uint8", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return uint8Func(f), nil + Aliases: []string{ + "byte-sized unsigned", + "octet quantity", + "small-range unsigned", + "one-byte value", + "0-255 whole", }, + Keywords: []string{ + "unsigned", "8bit", "byte", "octet", "range", "integer", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return uint8Func(f), nil }, }) AddFuncLookup("uint16", Info{ Display: "Uint16", Category: "number", - Description: "Unsigned 16-bit integer, capable of representing values from 0 to 65,535", + Description: "Unsigned 16-bit integer, range 0–65,535", Example: "34968", Output: "uint16", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return uint16Func(f), nil + Aliases: []string{ + "two-byte unsigned", + "ushort quantity", + "medium-range unsigned", + "port-sized value", + "0-65535 whole", }, + Keywords: []string{ + "unsigned", "16bit", "word", "port", "range", "integer", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return uint16Func(f), nil }, }) AddFuncLookup("uint32", Info{ Display: "Uint32", Category: "number", - Description: "Unsigned 32-bit integer, capable of representing values from 0 to 4,294,967,295", + Description: "Unsigned 32-bit integer, range 0–4,294,967,295", Example: "1075055705", Output: "uint32", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return uint32Func(f), nil + Aliases: []string{ + "four-byte unsigned", "u32 numeric", "ipv4-scale value", + "wide-range unsigned", "32-bit whole", "medium unsigned int", "standard unsigned int", }, + Keywords: []string{ + "unsigned", "32bit", "range", "ipv4", "integer", "binary", "numeric", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return uint32Func(f), nil }, }) AddFuncLookup("uint64", Info{ Display: "Uint64", Category: "number", - Description: "Unsigned 64-bit integer, capable of representing values from 0 to 18,446,744,073,709,551,615", + Description: "Unsigned 64-bit integer, range 0–18,446,744,073,709,551,615", Example: "843730692693298265", Output: "uint64", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return f.Uint64(), nil + Aliases: []string{ + "eight-byte unsigned", "u64 numeric", "very-large unsigned", "wide whole count", "extended-range value", "large uint", "unsigned bigint", }, + Keywords: []string{ + "unsigned", "64bit", "range", "bigint", "integer", "numeric", "arithmetic", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return f.Uint64(), nil }, }) AddFuncLookup("uintrange", Info{ - Display: "UintRange", + Display: "Uint Range", Category: "number", - Description: "Non-negative integer value between given range", + Description: "Unsigned integer value within a given range", Example: "1075055705", Output: "uint", + Aliases: []string{ + "unsigned span", + "nonnegative interval", + "ranged cardinal", + "bounded unsigned result", + "constrained uint output", + }, + Keywords: []string{ + "uintrange", "unsigned", "range", "min", "max", + "bounds", "limits", "interval", "span", + }, Params: []Param{ {Field: "min", Display: "Min", Type: "uint", Default: "0", Description: "Minimum uint value"}, {Field: "max", Display: "Max", Type: "uint", Default: "4294967295", Description: "Maximum uint value"}, @@ -442,12 +506,10 @@ func addNumberLookup() { if err != nil { return nil, err } - max, err := info.GetUint(m, "max") if err != nil { return nil, err } - return uintRangeFunc(f, min, max), nil }, }) @@ -458,26 +520,45 @@ func addNumberLookup() { Description: "Signed integer", Example: "14866", Output: "int", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return intFunc(f), nil + Aliases: []string{ + "signed whole", + "two-sided count", + "negative-or-positive value", + "zero-inclusive whole", + "general int type", }, + Keywords: []string{ + "signed", "integer", + "positive", "negative", "zero", "counting", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return intFunc(f), nil }, }) AddFuncLookup("intn", Info{ Display: "IntN", Category: "number", - Description: "Integer value between 0 and n", + Description: "Integer between 0 (inclusive) and n (exclusive)", Example: "32783", Output: "int", + Aliases: []string{ + "upper-bounded int", + "exclusive-maximum int", + "zero-through-n minus one", + "limited-range int", + "cap-limited integer", + }, + Keywords: []string{ + "range", "upper", "limit", "bound", + "cap", "max", "exclusive", "integer", + }, Params: []Param{ - {Field: "n", Display: "N", Type: "int", Default: "2147483647", Description: "Maximum int value"}, + {Field: "n", Display: "N", Type: "int", Default: "2147483647", Description: "Maximum int value (exclusive)"}, }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { n, err := info.GetInt(m, "n") if err != nil { return nil, err } - return intNFunc(f, n), nil }, }) @@ -485,53 +566,84 @@ func addNumberLookup() { AddFuncLookup("int8", Info{ Display: "Int8", Category: "number", - Description: "Signed 8-bit integer, capable of representing values from -128 to 127", + Description: "Signed 8-bit integer, range −128–127", Example: "24", Output: "int8", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return int8Func(f), nil + Aliases: []string{ + "byte-sized signed", "small signed range", "one-byte integer", "8-bit whole signed", "narrow signed value", "tiny int", "signed byte", }, + Keywords: []string{ + "signed", "8bit", "range", "twoscomplement", "integer", "arithmetic", "numeric", "binary", "storage", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return int8Func(f), nil }, }) AddFuncLookup("int16", Info{ Display: "Int16", Category: "number", - Description: "Signed 16-bit integer, capable of representing values from 32,768 to 32,767", + Description: "Signed 16-bit integer, range −32,768–32,767", Example: "2200", Output: "int16", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return int16Func(f), nil + Aliases: []string{ + "two-byte signed", "short integer signed", "16-bit whole signed", "narrow-mid signed", "twobyte int", "short int", "halfword signed", }, + Keywords: []string{ + "signed", "16bit", "range", "word", "numeric", "arithmetic", "binary", "integer", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return int16Func(f), nil }, }) AddFuncLookup("int32", Info{ Display: "Int32", Category: "number", - Description: "Signed 32-bit integer, capable of representing values from -2,147,483,648 to 2,147,483,647", + Description: "Signed 32-bit integer, range −2,147,483,648–2,147,483,647", Example: "-1072427943", Output: "int32", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return int32Func(f), nil + Aliases: []string{ + "four-byte signed", "standard-width signed", "32-bit whole signed", "midrange integer", "int32 value", "long int", "standard signed int", }, + Keywords: []string{ + "signed", "32bit", "range", "ipv4", "numeric", "arithmetic", "binary", "integer", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return int32Func(f), nil }, }) AddFuncLookup("int64", Info{ Display: "Int64", Category: "number", - Description: "Signed 64-bit integer, capable of representing values from -9,223,372,036,854,775,808 to -9,223,372,036,854,775,807", + Description: "Signed 64-bit integer, range −9,223,372,036,854,775,808–9,223,372,036,854,775,807", Example: "-8379641344161477543", Output: "int64", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return int64Func(f), nil + Aliases: []string{ + "eight-byte signed", + "long-width integer", + "64-bit whole signed", + "large signed value", + "extended signed range", }, + Keywords: []string{ + "signed", "64bit", "bigint", "range", "timestamp", "nanosecond", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return int64Func(f), nil }, }) AddFuncLookup("intrange", Info{ - Display: "IntRange", + Display: "Int Range", Category: "number", - Description: "Integer value between given range", + Description: "Signed integer value within a given range", Example: "-8379477543", Output: "int", + Aliases: []string{ + "signed span", + "bounded integer result", + "constrained int output", + "limited signed interval", + "ranged whole value", + }, + Keywords: []string{ + "int", "range", "min", "max", + "bounds", "limits", "interval", "span", + }, Params: []Param{ {Field: "min", Display: "Min", Type: "int", Description: "Minimum int value"}, {Field: "max", Display: "Max", Type: "int", Description: "Maximum int value"}, @@ -541,12 +653,10 @@ func addNumberLookup() { if err != nil { return nil, err } - max, err := info.GetInt(m, "max") if err != nil { return nil, err } - return intRangeFunc(f, min, max), nil }, }) @@ -554,20 +664,40 @@ func addNumberLookup() { AddFuncLookup("float32", Info{ Display: "Float32", Category: "number", - Description: "Data type representing floating-point numbers with 32 bits of precision in computing", + Description: "Floating-point number with 32-bit single precision (IEEE 754)", Example: "3.1128167e+37", Output: "float32", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return float32Func(f), nil + Aliases: []string{ + "single-precision float", + "fp32 numeric", + "32-bit real", + "float single", + "reduced-precision real", }, + Keywords: []string{ + "single-precision", "ieee754", + "fp32", "mantissa", "exponent", "decimal", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return float32Func(f), nil }, }) AddFuncLookup("float32range", Info{ Display: "Float32 Range", Category: "number", - Description: "Float32 value between given range", + Description: "Float32 value within a given range", Example: "914774.6", Output: "float32", + Aliases: []string{ + "single-precision span", + "bounded fp32", + "limited float32 output", + "constrained 32-bit real", + "float single interval", + }, + Keywords: []string{ + "float32", "range", + "min", "max", "bounds", "limits", "interval", + }, Params: []Param{ {Field: "min", Display: "Min", Type: "float", Description: "Minimum float32 value"}, {Field: "max", Display: "Max", Type: "float", Description: "Maximum float32 value"}, @@ -577,12 +707,10 @@ func addNumberLookup() { if err != nil { return nil, err } - max, err := info.GetFloat32(m, "max") if err != nil { return nil, err } - return float32Range(f, min, max), nil }, }) @@ -590,20 +718,41 @@ func addNumberLookup() { AddFuncLookup("float64", Info{ Display: "Float64", Category: "number", - Description: "Data type representing floating-point numbers with 64 bits of precision in computing", + Description: "Floating-point number with 64-bit double precision (IEEE 754)", Example: "1.644484108270445e+307", Output: "float64", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return float64Func(f), nil + Aliases: []string{ + "double-precision float", + "fp64 numeric", + "64-bit real", + "float double", + "high-precision real", }, + Keywords: []string{ + "double-precision", "ieee754", + "fp64", "mantissa", "exponent", "decimal", + "precision", "scientific", "number", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return float64Func(f), nil }, }) AddFuncLookup("float64range", Info{ Display: "Float64 Range", Category: "number", - Description: "Float64 value between given range", + Description: "Float64 value within a given range", Example: "914774.5585333086", Output: "float64", + Aliases: []string{ + "double-precision span", + "bounded fp64", + "limited float64 output", + "constrained 64-bit real", + "float double interval", + }, + Keywords: []string{ + "float64", "range", + "min", "max", "bounds", "limits", "interval", + }, Params: []Param{ {Field: "min", Display: "Min", Type: "float", Description: "Minimum float64 value"}, {Field: "max", Display: "Max", Type: "float", Description: "Maximum float64 value"}, @@ -613,12 +762,10 @@ func addNumberLookup() { if err != nil { return nil, err } - max, err := info.GetFloat64(m, "max") if err != nil { return nil, err } - return float64Range(f, min, max), nil }, }) @@ -629,6 +776,17 @@ func addNumberLookup() { Description: "Shuffles an array of ints", Example: "1,2,3,4 => 3,1,4,2", Output: "[]int", + Aliases: []string{ + "reorder integers", + "scramble int slice", + "random permutation ints", + "reshuffle numbers", + "jumbled int output", + }, + Keywords: []string{ + "shuffle", "permute", "randomize", + "ints", "slice", "array", "permutation", + }, Params: []Param{ {Field: "ints", Display: "Integers", Type: "[]int", Description: "Delimited separated integers"}, }, @@ -637,9 +795,7 @@ func addNumberLookup() { if err != nil { return nil, err } - shuffleInts(f, ints) - return ints, nil }, }) @@ -650,6 +806,17 @@ func addNumberLookup() { Description: "Randomly selected value from a slice of int", Example: "-1,2,-3,4 => -3", Output: "int", + Aliases: []string{ + "draw one integer", + "sample an int", + "pick from ints", + "select a number", + "choose single int", + }, + Keywords: []string{ + "random", "pick", "choose", + "select", "ints", "slice", "list", + }, Params: []Param{ {Field: "ints", Display: "Integers", Type: "[]int", Description: "Delimited separated integers"}, }, @@ -658,7 +825,6 @@ func addNumberLookup() { if err != nil { return nil, err } - return randomInt(f, ints), nil }, }) @@ -669,6 +835,17 @@ func addNumberLookup() { Description: "Randomly selected value from a slice of uint", Example: "1,2,3,4 => 4", Output: "uint", + Aliases: []string{ + "draw one unsigned", + "sample a uint", + "pick from uints", + "select an unsigned", + "choose single uint", + }, + Keywords: []string{ + "random", "pick", "choose", + "select", "uints", "slice", "list", "nonnegative", + }, Params: []Param{ {Field: "uints", Display: "Unsigned Integers", Type: "[]uint", Description: "Delimited separated unsigned integers"}, }, @@ -677,7 +854,6 @@ func addNumberLookup() { if err != nil { return nil, err } - return randomUint(f, uints), nil }, }) @@ -688,6 +864,17 @@ func addNumberLookup() { Description: "Hexadecimal representation of an unsigned integer", Example: "0x87", Output: "string", + Aliases: []string{ + "hex-encoded unsigned", + "base-16 uint string", + "prefixed 0x value", + "hex view of uint", + "formatted unsigned hex", + }, + Keywords: []string{ + "hex", "base16", "uint", "0x", + "bits", "width", "format", + }, Params: []Param{ {Field: "bitSize", Display: "Bit Size", Type: "int", Default: "8", Description: "Bit size of the unsigned integer"}, }, @@ -696,8 +883,8 @@ func addNumberLookup() { if err != nil { return nil, err } - return hexUint(f, bitSize), nil }, }) + } diff --git a/vendor/github.com/brianvoe/gofakeit/v7/payment.go b/vendor/github.com/brianvoe/gofakeit/v7/payment.go index 52496153..8aac56df 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/payment.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/payment.go @@ -108,7 +108,7 @@ func creditCardNumber(f *Faker, cco *CreditCardOptions) string { if cco == nil { cco = &CreditCardOptions{} } - if cco.Types == nil || len(cco.Types) == 0 { + if len(cco.Types) == 0 { cco.Types = data.CreditCardTypes } ccType := randomString(f, cco.Types) @@ -240,50 +240,80 @@ func bitcoinPrivateKey(f *Faker) string { return "5" + randomString(f, []string{"H", "J", "K"}) + b.String() } +func BankName() string { return bankName(GlobalFaker) } + +func (f *Faker) BankName() string { return bankName(f) } + +func bankName(f *Faker) string { return getRandValue(f, []string{"bank", "name"}) } + +func BankType() string { return bankType(GlobalFaker) } + +func (f *Faker) BankType() string { return bankType(f) } + +func bankType(f *Faker) string { return getRandValue(f, []string{"bank", "type"}) } + func addPaymentLookup() { AddFuncLookup("currency", Info{ Display: "Currency", Category: "payment", - Description: "Medium of exchange, often in the form of paper money or coins, used for trade and transactions", + Description: "Medium of exchange, often in the form of money, used for trade and transactions", Example: `{ "short": "IQD", "long": "Iraq Dinar" }`, Output: "map[string]string", ContentType: "application/json", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return currency(f), nil + Aliases: []string{ + "currency unit", "currency code", "money type", "exchange currency", "monetary unit", "legal tender", "fiat money", }, + Keywords: []string{ + "money", "exchange", "fiat", "unit", "code", "iso", "usd", "eur", "gbp", "jpy", "cny", "trade", "transaction", "market", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return currency(f), nil }, }) AddFuncLookup("currencyshort", Info{ Display: "Currency Short", Category: "payment", - Description: "Short 3-letter word used to represent a specific currency", + Description: "Short 3-letter ISO code used to represent a specific currency", Example: "USD", Output: "string", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return currencyShort(f), nil + Aliases: []string{ + "iso alpha-3", "currency ticker", "alpha-3 code", "currency shorthand", "iso-4217 code", }, + Keywords: []string{ + "currency", "short", "iso", "code", "alpha3", "usd", "eur", "gbp", "jpy", "cad", "aud", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return currencyShort(f), nil }, }) AddFuncLookup("currencylong", Info{ Display: "Currency Long", Category: "payment", - Description: "Complete name of a specific currency used for official identification in financial transactions", + Description: "Complete name of a specific currency used in financial transactions", Example: "United States Dollar", Output: "string", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return currencyLong(f), nil + Aliases: []string{ + "currency name", "full currency", "long form name", "official currency", "monetary name", }, + Keywords: []string{ + "currency", "name", "long", "full", "official", "dollar", "euro", "pound", "yen", "franc", "peso", "rupee", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return currencyLong(f), nil }, }) AddFuncLookup("price", Info{ Display: "Price", Category: "payment", - Description: "The amount of money or value assigned to a product, service, or asset in a transaction", + Description: "The amount of money assigned to a product, service, or asset in a transaction", Example: "92.26", Output: "float64", + Aliases: []string{ + "amount", "cost", "value", "fee", "charge", "rate", "unit price", + }, + Keywords: []string{ + "payment", "transaction", "retail", "wholesale", "market", "asset", "listing", "quote", "valuation", + }, Params: []Param{ {Field: "min", Display: "Min", Type: "float", Default: "0", Description: "Minimum price value"}, {Field: "max", Display: "Max", Type: "float", Default: "1000", Description: "Maximum price value"}, @@ -293,12 +323,10 @@ func addPaymentLookup() { if err != nil { return nil, err } - max, err := info.GetFloat64(m, "max") if err != nil { return nil, err } - return price(f, min, max), nil }, }) @@ -306,18 +334,22 @@ func addPaymentLookup() { AddFuncLookup("creditcard", Info{ Display: "Credit Card", Category: "payment", - Description: "Plastic card allowing users to make purchases on credit, with payment due at a later date", + Description: "Card allowing users to make purchases on credit, with payment due at a later date", Example: `{ - "type": "UnionPay", - "number": "4364599489953698", - "exp": "02/24", - "cvv": "300" + "type": "Visa", + "number": "4111111111111111", + "exp": "02/27", + "cvv": "123" }`, Output: "map[string]any", ContentType: "application/json", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return creditCard(f), nil + Aliases: []string{ + "credit card", "payment card", "charge card", "credit instrument", "card account", "plastic card", }, + Keywords: []string{ + "credit", "card", "payment", "debt", "visa", "mastercard", "amex", "discover", "unionpay", "maestro", "jcb", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return creditCard(f), nil }, }) AddFuncLookup("creditcardtype", Info{ @@ -326,17 +358,27 @@ func addPaymentLookup() { Description: "Classification of credit cards based on the issuing company", Example: "Visa", Output: "string", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return creditCardType(f), nil + Aliases: []string{ + "credit card type", "issuer brand", "card network", "scheme name", "card family", "issuer type", }, + Keywords: []string{ + "credit", "card", "type", "issuer", "brand", "network", "visa", "mastercard", "amex", "discover", "unionpay", "maestro", "jcb", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return creditCardType(f), nil }, }) AddFuncLookup("creditcardnumber", Info{ Display: "Credit Card Number", Category: "payment", - Description: "Unique numerical identifier on a credit card used for making electronic payments and transactions", - Example: "4136459948995369", + Description: "Unique number on a credit card used for electronic payments", + Example: "4111111111111111", Output: "string", + Aliases: []string{ + "credit card", "credit card number", "card number", "cc number", "primary account number", "pan value", "payment number", + }, + Keywords: []string{ + "credit", "card", "number", "identifier", "luhn", "validation", "checksum", "bin", "tokenize", "masking", "digits", + }, Params: []Param{ { Field: "types", Display: "Types", Type: "[]string", Default: "all", @@ -344,7 +386,7 @@ func addPaymentLookup() { Description: "A select number of types you want to use when generating a credit card number", }, {Field: "bins", Display: "Bins", Type: "[]string", Optional: true, Description: "Optional list of prepended bin numbers to pick from"}, - {Field: "gaps", Display: "Gaps", Type: "bool", Default: "false", Description: "Whether or not to have gaps in number"}, + {Field: "gaps", Display: "Gaps", Type: "bool", Default: "false", Optional: true, Description: "Whether or not to have gaps in number"}, }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { types, err := info.GetStringArray(m, "types") @@ -354,23 +396,12 @@ func addPaymentLookup() { if len(types) == 1 && types[0] == "all" { types = []string{} } - bins, _ := info.GetStringArray(m, "bins") - - gaps, err := info.GetBool(m, "gaps") - if err != nil { - return nil, err - } - - options := CreditCardOptions{ - Types: types, - Gaps: gaps, - } - + gaps, _ := info.GetBool(m, "gaps") + options := CreditCardOptions{Types: types, Gaps: gaps} if len(bins) >= 1 { options.Bins = bins } - return creditCardNumber(f, &options), nil }, }) @@ -378,66 +409,121 @@ func addPaymentLookup() { AddFuncLookup("creditcardexp", Info{ Display: "Credit Card Exp", Category: "payment", - Description: "Date when a credit card becomes invalid and cannot be used for transactions", - Example: "01/21", + Description: "Expiration date of a credit card", + Example: "01/27", Output: "string", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return creditCardExp(f), nil + Aliases: []string{ + "credit card exp", "credit card expiration", "expiry date", "expiration date", "exp date", "valid thru", "card expiry", }, + Keywords: []string{ + "credit", "card", "exp", "expiry", "expiration", "month", "year", "validity", "future", "expired", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return creditCardExp(f), nil }, }) AddFuncLookup("creditcardcvv", Info{ Display: "Credit Card CVV", Category: "payment", - Description: "Three or four-digit security code on a credit card used for online and remote transactions", + Description: "Three or four-digit security code on a credit card", Example: "513", Output: "string", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return creditCardCvv(f), nil + Aliases: []string{ + "credit card cvv", "cvv", "cvc", "cid", "security number", "auth digits", "card check value", "security code", }, + Keywords: []string{ + "security", "code", "verification", "authentication", "fraud", "protection", "online", "payment", "transaction", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return creditCardCvv(f), nil }, }) AddFuncLookup("achrouting", Info{ Display: "ACH Routing Number", Category: "payment", - Description: "Unique nine-digit code used in the U.S. for identifying the bank and processing electronic transactions", + Description: "Nine-digit code used in the U.S. for identifying a bank in ACH transactions", Example: "513715684", Output: "string", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return achRouting(f), nil + Aliases: []string{ + "routing number", "aba number", "routing transit number", "rtn code", "bank routing id", }, + Keywords: []string{ + "ach", "routing", "aba", "us", "bank", "federal", "reserve", "clearinghouse", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return achRouting(f), nil }, }) AddFuncLookup("achaccount", Info{ Display: "ACH Account Number", Category: "payment", - Description: "A bank account number used for Automated Clearing House transactions and electronic transfers", + Description: "Bank account number used for Automated Clearing House transactions", Example: "491527954328", Output: "string", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return achAccount(f), nil + Aliases: []string{ + "account number", "ach account", "bank account", "checking account", "savings account", "account identifier", }, + Keywords: []string{ + "ach", "account", "banking", "checking", "savings", "finance", "electronic", "transfer", "payment", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return achAccount(f), nil }, }) AddFuncLookup("bitcoinaddress", Info{ Display: "Bitcoin Address", Category: "payment", - Description: "Cryptographic identifier used to receive, store, and send Bitcoin cryptocurrency in a peer-to-peer network", - Example: "1lWLbxojXq6BqWX7X60VkcDIvYA", + Description: "Cryptographic identifier used to send and receive Bitcoin", + Example: "1BoatSLRHtKNngkdXEeobR76b53LETtpyT", Output: "string", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return bitcoinAddress(f), nil + Aliases: []string{ + "btc address", "bitcoin wallet", "crypto address", "public address", "payment address", }, + Keywords: []string{ + "bitcoin", "btc", "wallet", "blockchain", "public", "key", "hash", "base58", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return bitcoinAddress(f), nil }, }) AddFuncLookup("bitcoinprivatekey", Info{ Display: "Bitcoin Private Key", Category: "payment", - Description: "Secret, secure code that allows the owner to access and control their Bitcoin holdings", - Example: "5vrbXTADWJ6sQBSYd6lLkG97jljNc0X9VPBvbVqsIH9lWOLcoqg", + Description: "Secret key that allows access and control over Bitcoin holdings", + Example: "5HueCGU8rMjxEXxiPuD5BDuG6o5xjA7QkbPp", Output: "string", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return bitcoinPrivateKey(f), nil + Aliases: []string{ + "btc private key", "wallet key", "secret key", "private wif", "signing key", }, + Keywords: []string{ + "bitcoin", "btc", "private", "key", "blockchain", "wallet", "signature", "base58", "wif", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return bitcoinPrivateKey(f), nil }, }) + + AddFuncLookup("bankname", Info{ + Display: "Bank Name", + Category: "payment", + Description: "Name of a financial institution that offers banking services", + Example: "Wells Fargo", + Output: "string", + Aliases: []string{ + "financial institution", "banking entity", "lender name", "depository name", "institution title", + }, + Keywords: []string{ + "bank", "name", "institution", "financial", "wells", "fargo", "chase", "citibank", "pnc", "boa", "usbank", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return bankName(f), nil }, + }) + + AddFuncLookup("banktype", Info{ + Display: "Bank Type", + Category: "payment", + Description: "Classification of a bank based on its services and operations", + Example: "Investment Bank", + Output: "string", + Aliases: []string{ + "bank classification", "bank category", "bank segment", "institution class", "service tier", + }, + Keywords: []string{ + "bank", "type", "classification", "category", "segment", "investment", "commercial", "retail", "savings", "credit", "union", "central", "federal", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return bankType(f), nil }, + }) + } diff --git a/vendor/github.com/brianvoe/gofakeit/v7/person.go b/vendor/github.com/brianvoe/gofakeit/v7/person.go index 3eb804c3..b69a5321 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/person.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/person.go @@ -11,6 +11,7 @@ type PersonInfo struct { FirstName string `json:"first_name" xml:"first_name"` LastName string `json:"last_name" xml:"last_name"` Gender string `json:"gender" xml:"gender"` + Age int `json:"age" xml:"age"` SSN string `json:"ssn" xml:"ssn"` Hobby string `json:"hobby" xml:"hobby"` Job *JobInfo `json:"job" xml:"job"` @@ -30,6 +31,7 @@ func person(f *Faker) *PersonInfo { FirstName: firstName(f), LastName: lastName(f), Gender: gender(f), + Age: age(f), SSN: ssn(f), Hobby: hobby(f), Job: job(f), @@ -89,6 +91,22 @@ func (f *Faker) NameSuffix() string { return nameSuffix(f) } func nameSuffix(f *Faker) string { return getRandValue(f, []string{"person", "suffix"}) } +// Age will generate a random age between 0 and 100 +func Age() int { return age(GlobalFaker) } + +// Age will generate a random age between 0 and 100 +func (f *Faker) Age() int { return age(f) } + +func age(f *Faker) int { return randIntRange(f, 0, 100) } + +// Ethnicity will generate a random ethnicity string +func Ethnicity() string { return ethnicity(GlobalFaker) } + +// Ethnicity will generate a random ethnicity string +func (f *Faker) Ethnicity() string { return ethnicity(f) } + +func ethnicity(f *Faker) string { return getRandValue(f, []string{"person", "ethnicity"}) } + // SSN will generate a random Social Security Number func SSN() string { return ssn(GlobalFaker) } @@ -97,6 +115,27 @@ func (f *Faker) SSN() string { return ssn(f) } func ssn(f *Faker) string { return strconv.Itoa(randIntRange(f, 100000000, 999999999)) } +// EIN will generate a random Employer Identification Number +func EIN() string { return ein(GlobalFaker) } + +// EIN will generate a random Employer Identification Number +func (f *Faker) EIN() string { return ein(f) } + +func ein(f *Faker) string { + // EIN format: XX-XXXXXXX (2 digits, dash, 7 digits) + // First two digits have specific valid prefixes + prefixes := []string{"10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", "30", "31", "32", "33", "34", "35", "36", "37", "38", "39", "40", "41", "42", "43", "44", "45", "46", "47", "48", "49", "50", "51", "52", "53", "54", "55", "56", "57", "58", "59", "60", "61", "62", "63", "64", "65", "66", "67", "68", "69", "70", "71", "72", "73", "74", "75", "76", "77", "78", "79", "80", "81", "82", "83", "84", "85", "86", "87", "88", "89", "90", "91", "92", "93", "94", "95", "96", "97", "98", "99"} + prefix := prefixes[f.IntN(len(prefixes))] + + // Generate 7 random digits + sevenDigits := "" + for i := 0; i < 7; i++ { + sevenDigits += string(rune('0' + f.IntN(10))) + } + + return prefix + "-" + sevenDigits +} + // Gender will generate a random gender string func Gender() string { return gender(GlobalFaker) } @@ -119,6 +158,44 @@ func (f *Faker) Hobby() string { return hobby(f) } func hobby(f *Faker) string { return getRandValue(f, []string{"person", "hobby"}) } +// SocialMedia will generate a random social media string +func SocialMedia() string { return socialMedia(GlobalFaker) } + +// SocialMedia will generate a random social media string +func (f *Faker) SocialMedia() string { return socialMedia(f) } + +func socialMedia(f *Faker) string { + template := getRandValue(f, []string{"person", "social_media"}) + social, err := generate(f, template) + if err != nil { + return template // fallback to raw template if generation fails + } + + return social +} + +// Bio will generate a random biography using mad libs style templates +func Bio() string { + return bio(GlobalFaker) +} + +// Bio will generate a random biography using mad libs style templates +func (f *Faker) Bio() string { + return bio(f) +} + +func bio(f *Faker) string { + template := getRandValue(f, []string{"person", "bio"}) + + // Use generate function to process the template with all available lookups + bio, err := generate(f, template) + if err != nil { + return template // fallback to raw template if generation fails + } + + return bio +} + // ContactInfo struct full of contact info type ContactInfo struct { Phone string `json:"phone" xml:"phone"` @@ -211,6 +288,7 @@ func addPersonLookup() { "first_name": "Markus", "last_name": "Moen", "gender": "male", + "age": 30, "ssn": "275413589", "image": "https://picsum.photos/208/500", "hobby": "Lacrosse", @@ -243,141 +321,411 @@ func addPersonLookup() { }`, Output: "map[string]any", ContentType: "application/json", + Aliases: []string{ + "person record", + "identity profile", + "user profile", + "personal info", + "individual data", + }, + Keywords: []string{ + "profile", "identity", "individual", + "user", "account", "record", "contact", + "name", "details", "attributes", "information", + "bio", "demographics", "personal", "data", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return person(f), nil }, }) + // full name AddFuncLookup("name", Info{ Display: "Name", Category: "person", Description: "The given and family name of an individual", Example: "Markus Moen", Output: "string", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return name(f), nil + Aliases: []string{ + "full name", + "person name", + "complete name", + "name string", + "display name", }, + Keywords: []string{ + "fullname", "given", "family", + "first", "last", "forename", "surname", + "display", "legal", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return name(f), nil }, }) + // name prefix (honorific) AddFuncLookup("nameprefix", Info{ Display: "Name Prefix", Category: "person", Description: "A title or honorific added before a person's name", Example: "Mr.", Output: "string", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return namePrefix(f), nil + Aliases: []string{ + "name prefix", + "honorific", + "title prefix", + "courtesy title", + "pre-nominal", }, + Keywords: []string{ + "prefix", "title", "mr", "ms", "mrs", + "dr", "prof", "sir", "madam", "rev", "fr", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return namePrefix(f), nil }, }) + // name suffix (generational/professional) AddFuncLookup("namesuffix", Info{ Display: "Name Suffix", Category: "person", Description: "A title or designation added after a person's name", Example: "Jr.", Output: "string", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return nameSuffix(f), nil + Aliases: []string{ + "name suffix", + "post nominal", + "suffix designation", + "generational suffix", + "professional suffix", }, + Keywords: []string{ + "suffix", "jr", "sr", "iii", "iv", + "esq", "phd", "md", "mba", "cpa", + "designation", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return nameSuffix(f), nil }, }) + // first name AddFuncLookup("firstname", Info{ Display: "First Name", Category: "person", Description: "The name given to a person at birth", Example: "Markus", Output: "string", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return firstName(f), nil + Aliases: []string{ + "first name", + "given name", + "forename", + "personal name", + "given label", }, + Keywords: []string{ + "first", "given", "name", + "preferred", "callname", "initial", + "personal", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return firstName(f), nil }, }) + // middle name AddFuncLookup("middlename", Info{ Display: "Middle Name", Category: "person", Description: "Name between a person's first name and last name", Example: "Belinda", Output: "string", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return middleName(f), nil + Aliases: []string{ + "middle name", + "second name", + "additional name", + "secondary name", + "middle initial label", }, + Keywords: []string{ + "middle", "second", "additional", "secondary", + "name", "initial", "intermediate", "optional", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return middleName(f), nil }, }) + // last name AddFuncLookup("lastname", Info{ Display: "Last Name", Category: "person", Description: "The family name or surname of an individual", Example: "Daniel", Output: "string", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return lastName(f), nil + Aliases: []string{ + "last name", + "family name", + "surname", + "patronymic", + "family designation", }, + Keywords: []string{ + "last", "family", "name", + "lineage", "heritage", "ancestry", "clan", + "tribe", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return lastName(f), nil }, }) + // gender AddFuncLookup("gender", Info{ Display: "Gender", Category: "person", - Description: "Classification based on social and cultural norms that identifies an individual", + Description: "Classification that identifies gender", Example: "male", Output: "string", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return gender(f), nil + Aliases: []string{ + "gender identity", + "gender label", + "sex category", + "gender marker", + "presentation", }, + Keywords: []string{ + "male", "female", "nonbinary", + "identity", "label", "category", "sex", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return gender(f), nil }, }) + // age + AddFuncLookup("age", Info{ + Display: "Age", + Category: "person", + Description: "The number of years a person has lived", + Example: "40", + Output: "int", + Aliases: []string{ + "age of person", "person age", "age of individual", "years old", "years of age", + }, + Keywords: []string{ + "years", "old", "birthday", "birthdate", "birth-date", + "lifespan", "maturity", "elderly", "young", "adult", "teenager", "child", + "senior", "juvenile", "minor", "majority", "minority", "generation", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return age(f), nil }, + }) + + // ethnicity + AddFuncLookup("ethnicity", Info{ + Display: "Ethnicity", + Category: "person", + Description: "Classification that identifies a person's cultural or ethnic background", + Example: "German", + Output: "string", + Aliases: []string{ + "ethnic background", + "ethnic identity", + "cultural background", + "cultural heritage", + "ethnic origin", + }, + Keywords: []string{ + "ethnic", "heritage", "ancestry", + "origin", "identity", "cultural", "nationality", + "background", "descent", "lineage", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return ethnicity(f), nil }, + }) + + // ssn AddFuncLookup("ssn", Info{ Display: "SSN", Category: "person", Description: "Unique nine-digit identifier used for government and financial purposes in the United States", Example: "296446360", Output: "string", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return ssn(f), nil + Aliases: []string{ + "social security number", + "ssn number", + "us ssn", + "tax id us", + "federal id", }, + Keywords: []string{ + "social", "security", "number", + "us", "tax", "irs", "employment", + "benefits", "identification", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return ssn(f), nil }, }) + AddFuncLookup("ein", Info{ + Display: "EIN", + Category: "person", + Description: "Nine-digit Employer Identification Number used by businesses for tax purposes", + Example: "12-3456789", + Output: "string", + Aliases: []string{ + "employer id", + "tax id", + "business tax id", + "federal tax id", + "irs number", + }, + Keywords: []string{ + "employer", "identification", "tax", "business", "federal", "irs", "number", "id", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return ein(f), nil }, + }) + + // hobby AddFuncLookup("hobby", Info{ Display: "Hobby", Category: "person", Description: "An activity pursued for leisure and pleasure", Example: "Swimming", Output: "string", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return hobby(f), nil + Aliases: []string{ + "pastime", + "leisure activity", + "recreational activity", + "interest", + "free-time pursuit", }, + Keywords: []string{ + "leisure", "recreation", + "activity", "sport", "craft", + "game", "collection", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return hobby(f), nil }, }) + AddFuncLookup("socialmedia", Info{ + Display: "Social Media", + Category: "person", + Description: "Random social media string", + Example: "https://twitter.com/ImpossibleTrousers", + Output: "string", + Aliases: []string{ + "social media", + "social link", + "social url", + "social handle", + "social username", + "social profile", + "profile link", + "profile url", + "profile handle", + "account link", + "account url", + "account handle", + "username handle", + "screen name", + // platform-intent phrases (useful for fuzzy scoring) + "twitter link", + "x link", + "instagram link", + "linkedin url", + "github url", + "tiktok handle", + "facebook profile", + }, + Keywords: []string{ + "social", "media", "profile", "account", "handle", "username", + "screenname", "link", "url", + "twitter", "x", "instagram", "linkedin", "github", + "tiktok", "facebook", "dribbble", "behance", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return socialMedia(f), nil }, + }) + + AddFuncLookup("bio", Info{ + Display: "Biography", + Category: "person", + Description: "Random biography", + Example: "Born in New York, John grew up to become a Software Engineer who codes applications.", + Output: "string", + Aliases: []string{ + "bio", + "short bio", + "mini bio", + "one line bio", + "profile bio", + "user bio", + "author bio", + "about", + "about me", + "profile summary", + "personal summary", + "blurb", + "elevator pitch", + }, + Keywords: []string{ + "profile", "summary", "tagline", "intro", + "overview", "description", "story", "background", + "career", "job", "role", "hobby", "personal", "person", + "one-liner", "author", "user", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return f.Bio(), nil }, + }) + + // email AddFuncLookup("email", Info{ Display: "Email", Category: "person", - Description: "Electronic mail used for sending digital messages and communication over the internet", + Description: "Electronic mail address", Example: "markusmoen@pagac.net", Output: "string", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return email(f), nil + Aliases: []string{ + "email address", + "mail address", + "contact email", + "user email", + "electronic mailbox", }, + Keywords: []string{ + "address", "mail", "inbox", + "account", "contact", "sender", "recipient", + "domain", "username", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return email(f), nil }, }) + // phone (raw digits) AddFuncLookup("phone", Info{ Display: "Phone", Category: "person", Description: "Numerical sequence used to contact individuals via telephone or mobile devices", Example: "6136459948", Output: "string", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return phone(f), nil + Aliases: []string{ + "phone number", + "telephone number", + "mobile number", + "contact number", + "voice number", }, + Keywords: []string{ + "number", "telephone", "mobile", + "contact", "dial", "cell", "landline", + "e164", "voice", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return phone(f), nil }, }) + // phone formatted (readable) AddFuncLookup("phoneformatted", Info{ Display: "Phone Formatted", Category: "person", Description: "Formatted phone number of a person", Example: "136-459-9489", Output: "string", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return phoneFormatted(f), nil + Aliases: []string{ + "formatted phone", + "pretty phone", + "display phone", + "readable phone", + "formatted telephone", }, + Keywords: []string{ + "phone", "formatted", "format", "pattern", + "dashes", "parentheses", "spaces", "separators", + "telephone", "contact", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return phoneFormatted(f), nil }, }) AddFuncLookup("teams", Info{ @@ -402,6 +750,17 @@ func addPersonLookup() { }`, Output: "map[string][]string", ContentType: "application/json", + Aliases: []string{ + "people grouping", + "team assignment", + "random partition", + "group allocator", + "roster builder", + }, + Keywords: []string{ + "randomly", "person", "into", + "distribution", "allocation", "roster", "squad", + }, Params: []Param{ {Field: "people", Display: "Strings", Type: "[]string", Description: "Array of people"}, {Field: "teams", Display: "Strings", Type: "[]string", Description: "Array of teams"}, diff --git a/vendor/github.com/brianvoe/gofakeit/v7/product.go b/vendor/github.com/brianvoe/gofakeit/v7/product.go index 23f8c8b7..b8011a8b 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/product.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/product.go @@ -2,7 +2,10 @@ package gofakeit import ( "fmt" + "strconv" "strings" + + "github.com/brianvoe/gofakeit/v7/data" ) type ProductInfo struct { @@ -69,38 +72,62 @@ func (f *Faker) ProductName() string { return productName(f) } func productName(f *Faker) string { name := getRandValue(f, []string{"product", "name"}) - switch number(f, 0, 9) { - case 1: - // Name + Adjective + Feature - return title(fmt.Sprintf("%s %s %s", name, getRandValue(f, []string{"product", "adjective"}), productFeature(f))) - case 2: - // Adjective + Material + Name - return title(fmt.Sprintf("%s %s %s", getRandValue(f, []string{"product", "adjective"}), productMaterial(f), name)) - case 3: - // Color + Name + Suffix - return title(fmt.Sprintf("%s %s %s", safeColor(f), name, getRandValue(f, []string{"product", "suffix"}))) - case 4: - // Feature + Name + Adjective - return title(fmt.Sprintf("%s %s %s", productFeature(f), name, getRandValue(f, []string{"product", "adjective"}))) - case 5: - // Material + Color + Name - return title(fmt.Sprintf("%s %s %s", productMaterial(f), safeColor(f), name)) - case 6: - // Name + Suffix + Material - return title(fmt.Sprintf("%s %s %s", name, getRandValue(f, []string{"product", "suffix"}), productMaterial(f))) - case 7: - // Adjective + Feature + Name - return title(fmt.Sprintf("%s %s %s", getRandValue(f, []string{"product", "adjective"}), productFeature(f), name)) - case 8: - // Color + Material + Name - return title(fmt.Sprintf("%s %s %s", safeColor(f), productMaterial(f), name)) - case 9: - // Suffix + Adjective + Name - return title(fmt.Sprintf("%s %s %s", getRandValue(f, []string{"product", "suffix"}), getRandValue(f, []string{"product", "adjective"}), name)) + adj := func() string { return getRandValue(f, []string{"product", "adjective"}) } + suf := func() string { return getRandValue(f, []string{"product", "suffix"}) } + mat := func() string { return productMaterial(f) } + feat := func() string { return productFeature(f) } + + // Small realism helpers: occasional compound/connector without turning into a listing + compoundAdj := func() string { + // 1 in 5: "adj-adj" (e.g., "Ultra-Light", "Quick-Dry") + if number(f, 1, 5) == 1 { + return fmt.Sprintf("%s-%s", adj(), adj()) + } + return adj() } - // case: 0 - Adjective + Name + Suffix - return title(fmt.Sprintf("%s %s %s", getRandValue(f, []string{"product", "adjective"}), name, getRandValue(f, []string{"product", "suffix"}))) + // Keep it "product name"-ish: 2–3 chunks, no specs/colors/for-phrases. + // Weighted: 2-word names (~70%), 3-word names (~30%) + wordCount, _ := weighted(f, []any{"2word", "3word"}, []float32{70, 30}) + + switch wordCount { + case "2word": + // 2-word names (most common in real products) + switch number(f, 0, 4) { + case 0, 1: + // Adjective + Name (most common 2-word pattern) + return title(fmt.Sprintf("%s %s", compoundAdj(), name)) + case 2: + // Name + Suffix + return title(fmt.Sprintf("%s %s", name, suf())) + case 3: + // Material + Name + return title(fmt.Sprintf("%s %s", mat(), name)) + case 4: + // Feature + Name + return title(fmt.Sprintf("%s %s", feat(), name)) + } + + case "3word": + // 3-word names (less common) + switch number(f, 0, 4) { + case 0, 1: + // Adjective + Name + Suffix (most common 3-word pattern) + return title(fmt.Sprintf("%s %s %s", compoundAdj(), name, suf())) + case 2: + // Name + Feature + Suffix + return title(fmt.Sprintf("%s %s %s", name, feat(), suf())) + case 3: + // Adjective + Feature + Name + return title(fmt.Sprintf("%s %s %s", compoundAdj(), feat(), name)) + case 4: + // Name + Material + Suffix + return title(fmt.Sprintf("%s %s %s", name, mat(), suf())) + } + } + + // Fallback to 2-word name + return title(fmt.Sprintf("%s %s", compoundAdj(), name)) } // ProductDescription will generate a random product description @@ -233,6 +260,87 @@ func productSuffix(f *Faker) string { return getRandValue(f, []string{"product", "suffix"}) } +// ProductISBN13 will generate a random ISBN-13 string for the product +func ProductISBN(opts *ISBNOptions) string { return productISBN(GlobalFaker, opts) } + +// ProductISBN13 will generate a random ISBN-13 string for the product +func (f *Faker) ProductISBN(opts *ISBNOptions) string { return productISBN(f, opts) } + +type ISBNOptions struct { + Version string // "10" or "13" + Separator string // e.g. "-", "" (default: "-") +} + +func productISBN(f *Faker, opts *ISBNOptions) string { + if opts == nil { + opts = &ISBNOptions{Version: "13", Separator: "-"} + } + + sep := opts.Separator + if sep == "" { + sep = "-" + } + + // string of n random digits + randomDigits := func(f *Faker, n int) string { + digits := make([]byte, n) + for i := 0; i < n; i++ { + digits[i] = byte('0' + number(f, 0, 9)) + } + return string(digits) + } + + switch opts.Version { + case "10": + // ISBN-10 format: group(1)-registrant(4)-publication(3)-check(1) + group := randomDigits(f, 1) + registrant := randomDigits(f, 4) + publication := randomDigits(f, 3) + base := group + registrant + publication + + // checksum + sum := 0 + for i, c := range base { + digit := int(c - '0') + sum += digit * (10 - i) + } + remainder := (11 - (sum % 11)) % 11 + check := "X" + if remainder < 10 { + check = strconv.Itoa(remainder) + } + + return strings.Join([]string{group, registrant, publication, check}, sep) + + case "13": + // ISBN-13 format: prefix(3)-group(1)-registrant(4)-publication(4)-check(1) + prefix := data.ISBN13Prefix + group := randomDigits(f, 1) + registrant := randomDigits(f, 4) + publication := randomDigits(f, 4) + base := prefix + group + registrant + publication + + // checksum + sum := 0 + for i, c := range base { + digit := int(c - '0') + if i%2 == 0 { + sum += digit + } else { + sum += digit * 3 + } + } + remainder := (10 - (sum % 10)) % 10 + check := strconv.Itoa(remainder) + + return strings.Join([]string{prefix, group, registrant, publication, check}, sep) + + default: + // fallback to ISBN-13 if invalid version provided + return productISBN(f, &ISBNOptions{Version: "13", Separator: sep}) + } +} + func addProductLookup() { AddFuncLookup("product", Info{ Display: "Product", @@ -262,6 +370,17 @@ func addProductLookup() { }`, Output: "map[string]any", ContentType: "application/json", + Aliases: []string{ + "goods", + "merchandise", + "retail item", + "consumer product", + "commercial item", + }, + Keywords: []string{ + "sale", "use", "trade", "manufactured", + "market", "inventory", "supply", "distribution", "commodity", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return product(f), nil }, @@ -273,6 +392,19 @@ func addProductLookup() { Description: "Distinctive title or label assigned to a product for identification and marketing", Example: "olive copper monitor", Output: "string", + Aliases: []string{ + "product title", + "product label", + "brand name", + "item name", + "product identifier", + }, + Keywords: []string{ + "product", "name", "title", "label", "brand", + "item", "merchandise", "goods", "article", + "identifier", "marketing", "branding", + "catalog", "inventory", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return productName(f), nil }, @@ -284,6 +416,19 @@ func addProductLookup() { Description: "Explanation detailing the features and characteristics of a product", Example: "Backwards caused quarterly without week it hungry thing someone him regularly. Whomever this revolt hence from his timing as quantity us these yours.", Output: "string", + Aliases: []string{ + "product details", + "product specs", + "item description", + "feature list", + "marketing copy", + }, + Keywords: []string{ + "product", "description", "details", "features", + "specifications", "characteristics", "summary", + "overview", "attributes", "benefits", + "marketing", "content", "copy", "info", "text", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return productDescription(f), nil }, @@ -295,6 +440,19 @@ func addProductLookup() { Description: "Classification grouping similar products based on shared characteristics or functions", Example: "clothing", Output: "string", + Aliases: []string{ + "product classification", + "product type", + "item category", + "product group", + "product segment", + }, + Keywords: []string{ + "product", "category", "type", "class", "classification", + "group", "segment", "line", "collection", "range", + "electronics", "furniture", "clothing", "appliances", + "food", "toys", "accessories", "goods", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return productCategory(f), nil }, @@ -306,6 +464,19 @@ func addProductLookup() { Description: "Specific characteristic of a product that distinguishes it from others products", Example: "ultra-lightweight", Output: "string", + Aliases: []string{ + "product trait", + "product attribute", + "key feature", + "unique feature", + "special characteristic", + }, + Keywords: []string{ + "feature", "trait", "attribute", "characteristic", + "capability", "functionality", "specification", + "benefit", "advantage", "highlight", + "unique", "differentiator", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return productFeature(f), nil }, @@ -317,6 +488,19 @@ func addProductLookup() { Description: "The substance from which a product is made, influencing its appearance, durability, and properties", Example: "brass", Output: "string", + Aliases: []string{ + "material type", + "product substance", + "product composition", + "item material", + "build material", + }, + Keywords: []string{ + "material", "substance", "composition", "make", + "fabric", "textile", "cloth", "leather", "wool", + "wood", "metal", "plastic", "glass", "stone", + "durability", "properties", "construction", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return productMaterial(f), nil }, @@ -328,6 +512,20 @@ func addProductLookup() { Description: "Standardized barcode used for product identification and tracking in retail and commerce", Example: "012780949980", Output: "string", + Aliases: []string{ + "upc code", + "product barcode", + "product code", + "product sku", + "universal product code", + "retail barcode", + }, + Keywords: []string{ + "upc", "barcode", "product", "code", "identifier", + "sku", "retail", "commerce", "inventory", + "tracking", "scanning", "checkout", "label", + "universal", "standard", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return productUPC(f), nil }, @@ -339,6 +537,20 @@ func addProductLookup() { Description: "The group of people for whom the product is designed or intended", Example: "adults", Output: "[]string", + Aliases: []string{ + "target audience", + "target market", + "customer group", + "user base", + "demographic group", + }, + Keywords: []string{ + "audience", "market", "segment", "demographic", + "consumer", "customer", "buyer", "user", + "group", "target", "population", "adults", + "kids", "teens", "families", "professionals", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return productAudience(f), nil }, @@ -350,6 +562,19 @@ func addProductLookup() { Description: "The size or dimension of a product", Example: "medium", Output: "string", + Aliases: []string{ + "product size", + "product measurement", + "item dimensions", + "product scale", + "size specification", + }, + Keywords: []string{ + "dimension", "size", "measurement", "proportion", + "scale", "specification", "specs", "length", + "width", "height", "depth", "volume", "weight", + "product", "item", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return productDimension(f), nil }, @@ -361,6 +586,19 @@ func addProductLookup() { Description: "The scenario or purpose for which a product is typically used", Example: "home", Output: "string", + Aliases: []string{ + "use case", + "product purpose", + "intended use", + "product application", + "usage scenario", + }, + Keywords: []string{ + "use", "usecase", "purpose", "usage", "application", + "context", "scenario", "situation", "case", + "intention", "goal", "objective", "function", + "home", "office", "outdoor", "industrial", "commercial", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return productUseCase(f), nil }, @@ -372,6 +610,17 @@ func addProductLookup() { Description: "The key advantage or value the product provides", Example: "comfort", Output: "string", + Aliases: []string{ + "product advantage", + "product value", + "user benefit", + "customer gain", + "selling point", + }, + Keywords: []string{ + "benefit", "advantage", "value", "improvement", + "enhancement", "feature", "positive", "outcome", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return productBenefit(f), nil }, @@ -383,8 +632,39 @@ func addProductLookup() { Description: "A suffix used to differentiate product models or versions", Example: "pro", Output: "string", + Aliases: []string{ + "product suffix", + "model suffix", + "version suffix", + "edition suffix", + "name suffix", + }, + Keywords: []string{ + "suffix", "variant", "edition", "version", "model", + "series", "line", "tier", "release", "upgrade", + "plus", "pro", "max", "lite", "mini", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return productSuffix(f), nil }, }) + + AddFuncLookup("productisbn", Info{ + Display: "Product ISBN", + Category: "product", + Description: "ISBN-10 or ISBN-13 identifier for books", + Example: "978-1-4028-9462-6", + Output: "string", + Aliases: []string{ + "isbn code", "isbn number", "book isbn", "isbn13", + "isbn10", "publication code", "book identifier", + }, + Keywords: []string{ + "identifier", "publication", "library", "catalog", + "literature", "reference", "edition", "registration", "publishing", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { + return productISBN(f, nil), nil + }, + }) } diff --git a/vendor/github.com/brianvoe/gofakeit/v7/school.go b/vendor/github.com/brianvoe/gofakeit/v7/school.go index b100ab80..e153e2dc 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/school.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/school.go @@ -1,25 +1,31 @@ -package gofakeit - -// School will generate a random School type -func School() string { return school(GlobalFaker) } - -func (f *Faker) School() string { return school(f) } - -func school(f *Faker) string { - return getRandValue(f, []string{"school", "name"}) + " " + - getRandValue(f, []string{"school", "isPrivate"}) + " " + - getRandValue(f, []string{"school", "type"}) -} - -func addSchoolLookup() { - AddFuncLookup("school", Info{ - Display: "School", - Category: "school", - Description: "An institution for formal education and learning", - Example: `Harborview State Academy`, - Output: "string", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return school(f), nil - }, - }) -} +package gofakeit + +// School will generate a random School type +func School() string { return school(GlobalFaker) } + +func (f *Faker) School() string { return school(f) } + +func school(f *Faker) string { + return getRandValue(f, []string{"school", "name"}) + " " + + getRandValue(f, []string{"school", "isPrivate"}) + " " + + getRandValue(f, []string{"school", "type"}) +} + +func addSchoolLookup() { + AddFuncLookup("school", Info{ + Display: "School", + Category: "school", + Description: "An institution for formal education and learning", + Example: `Harborview State Academy`, + Output: "string", + Aliases: []string{ + "academy", "educational institute", "learning center", "training school", "academic institution", + }, + Keywords: []string{ + "institution", "education", "learning", "teaching", "university", "college", "campus", "classroom", "study", "pupil", "curriculum", "instruction", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { + return school(f), nil + }, + }) +} diff --git a/vendor/github.com/brianvoe/gofakeit/v7/slice.go b/vendor/github.com/brianvoe/gofakeit/v7/slice.go index f9636eec..b13fff03 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/slice.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/slice.go @@ -11,5 +11,9 @@ func Slice(v any) { sliceFunc(GlobalFaker, v) } func (f *Faker) Slice(v any) { sliceFunc(f, v) } func sliceFunc(f *Faker, v any) { - r(f, reflect.TypeOf(v), reflect.ValueOf(v), "", -1) + // Note: We intentionally call r with size -1 instead of using structFunc. + // structFunc starts with size 0, which would result in zero-length top-level + // slices and maps. Passing -1 lets rSlice/rMap auto-size (random length) + // when no fakesize tag is provided. + r(f, reflect.TypeOf(v), reflect.ValueOf(v), "", -1, 0) } diff --git a/vendor/github.com/brianvoe/gofakeit/v7/song.go b/vendor/github.com/brianvoe/gofakeit/v7/song.go new file mode 100644 index 00000000..a89ed77c --- /dev/null +++ b/vendor/github.com/brianvoe/gofakeit/v7/song.go @@ -0,0 +1,131 @@ +package gofakeit + +func SongName() string { return songName(GlobalFaker) } + +func (f *Faker) SongName() string { return songName(f) } + +func songName(f *Faker) string { return getRandValue(f, []string{"song", "name"}) } + +func SongArtist() string { return songArtist(GlobalFaker) } + +func (f *Faker) SongArtist() string { return songArtist(f) } + +func songArtist(f *Faker) string { return getRandValue(f, []string{"song", "artist"}) } + +func SongGenre() string { return songGenre(GlobalFaker) } + +func (f *Faker) SongGenre() string { return songGenre(f) } + +func songGenre(f *Faker) string { return getRandValue(f, []string{"song", "genre"}) } + +type SongInfo struct { + Name string `json:"name" xml:"name"` + Artist string `json:"artist" xml:"artist"` + Genre string `json:"genre" xml:"genre"` +} + +func Song() *SongInfo { return song(GlobalFaker) } + +func (f *Faker) Song() *SongInfo { return song(f) } + +func song(f *Faker) *SongInfo { + return &SongInfo{ + Name: songName(f), + Artist: songArtist(f), + Genre: songGenre(f), + } +} + +func addSongLookup() { + AddFuncLookup("song", Info{ + Display: "Song", + Category: "song", + Description: "Song with a drum and horn instrumentation", + Example: `{ + "name": "New Rules", + "genre": "Tropical house" +}`, + Output: "map[string]string", + ContentType: "application/json", + Aliases: []string{ + "musical work", + "sound piece", + "single release", + "record title", + "audio selection", + }, + Keywords: []string{ + "music", "track", "tune", "melody", + "artist", "genre", "name", "composition", "recording", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { + return song(f), nil + }, + }) + + AddFuncLookup("songname", Info{ + Display: "Song Name", + Category: "song", + Description: "Title or name of a specific song used for identification and reference", + Example: "New Rules", + Output: "string", + Aliases: []string{ + "song title", + "track name", + "music title", + "song label", + }, + Keywords: []string{ + "song", "title", "name", "track", "music", + "single", "hit", "tune", "recording", + "composition", "melody", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { + return songName(f), nil + }, + }) + + AddFuncLookup("songartist", Info{ + Display: "Song Artist", + Category: "song", + Description: "The artist of maker of song", + Example: "Dua Lipa", + Output: "string", + Aliases: []string{ + "performer name", + "music act", + "band name", + "recording artist", + "song creator", + }, + Keywords: []string{ + "song", "artist", "singer", "musician", "composer", + "band", "producer", "vocalist", "group", "instrumentalist", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { + return songArtist(f), nil + }, + }) + + AddFuncLookup("songgenre", Info{ + Display: "Genre", + Category: "song", + Description: "Category that classifies song based on common themes, styles, and storytelling approaches", + Example: "Action", + Output: "string", + Aliases: []string{ + "music style", + "song category", + "musical classification", + "sound type", + "genre label", + }, + Keywords: []string{ + "song", "style", "category", "type", + "classification", "theme", "musical", "subgenre", "influence", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { + return songGenre(f), nil + }, + }) +} diff --git a/vendor/github.com/brianvoe/gofakeit/v7/sql.go b/vendor/github.com/brianvoe/gofakeit/v7/sql.go index cf767758..0d447c9e 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/sql.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/sql.go @@ -21,7 +21,7 @@ func sqlFunc(f *Faker, so *SQLOptions) (string, error) { if so.Table == "" { return "", errors.New("must provide table name to generate SQL") } - if so.Fields == nil || len(so.Fields) <= 0 { + if len(so.Fields) <= 0 { return "", errors.New(("must pass fields in order to generate SQL queries")) } if so.Count <= 0 { @@ -112,6 +112,12 @@ VALUES (2, 'Santino', 235.13, 40, '1964-07-07 22:25:40');`, Output: "string", ContentType: "application/sql", + Aliases: []string{ + "insert command", "database query", "sql statement", "record insert", "data query", + }, + Keywords: []string{ + "database", "insert", "command", "records", "table", "tuples", "rows", "data", "values", "query", + }, Params: []Param{ {Field: "table", Display: "Table", Type: "string", Description: "Name of the table to insert into"}, {Field: "count", Display: "Count", Type: "int", Default: "100", Description: "Number of inserts to generate"}, @@ -153,4 +159,5 @@ VALUES return sqlFunc(f, &so) }, }) + } diff --git a/vendor/github.com/brianvoe/gofakeit/v7/string.go b/vendor/github.com/brianvoe/gofakeit/v7/string.go index 5be37826..46bf032f 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/string.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/string.go @@ -124,6 +124,12 @@ func addStringLookup() { Description: "Character or symbol from the American Standard Code for Information Interchange (ASCII) character set", Example: "g", Output: "string", + Aliases: []string{ + "alphabet", "character", "text symbol", "ascii char", "alphabetical sign", + }, + Keywords: []string{ + "standard", "code", "information", "interchange", "set", "printable", "typography", "symbolic", "encoding", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return letter(f), nil }, @@ -135,6 +141,12 @@ func addStringLookup() { Description: "ASCII string with length N", Example: "gbRMaRxHki", Output: "string", + Aliases: []string{ + "random letters", "ascii string", "text sequence", "generated letters", "alphabetical string", + }, + Keywords: []string{ + "sequence", "multiple", "concatenated", "combined", "series", "generated", "batch", "collection", + }, Params: []Param{ {Field: "count", Display: "Count", Type: "uint", Description: "Number of digits to generate"}, }, @@ -154,6 +166,12 @@ func addStringLookup() { Description: "Speech sound produced with an open vocal tract", Example: "a", Output: "string", + Aliases: []string{ + "vocal sound", "speech letter", "phonetic vowel", "linguistic vowel", "spoken sound", + }, + Keywords: []string{ + "open", "e", "i", "o", "u", "phonetic", "linguistic", "articulation", "pronunciation", "syllable", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return vowel(f), nil }, @@ -165,6 +183,12 @@ func addStringLookup() { Description: "Numerical symbol used to represent numbers", Example: "0", Output: "string", + Aliases: []string{ + "number symbol", "numeric character", "decimal digit", "ascii number", "numerical sign", + }, + Keywords: []string{ + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "decimal", "base10", "notation", "numeric", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return digit(f), nil }, @@ -173,9 +197,15 @@ func addStringLookup() { AddFuncLookup("digitn", Info{ Display: "DigitN", Category: "string", - Description: "string of length N consisting of ASCII digits", + Description: "String of length N consisting of ASCII digits", Example: "0136459948", Output: "string", + Aliases: []string{ + "numeric string", "digit sequence", "number series", "generated digits", "ascii digits", + }, + Keywords: []string{ + "consisting", "multiple", "concatenated", "combined", "series", "numeric", "sequence", "continuous", "string", "digits", + }, Params: []Param{ {Field: "count", Display: "Count", Type: "uint", Description: "Number of digits to generate"}, }, @@ -195,6 +225,12 @@ func addStringLookup() { Description: "Replace # with random numerical values", Example: "(###)###-#### => (555)867-5309", Output: "string", + Aliases: []string{ + "hash replace", "number substitute", "pattern filler", "digit replacer", "placeholder numbers", + }, + Keywords: []string{ + "replace", "hash", "pound", "template", "placeholder", "format", "substitute", "pattern", "randomize", "masking", + }, Params: []Param{ {Field: "str", Display: "String", Type: "string", Description: "String value to replace #'s"}, }, @@ -214,6 +250,12 @@ func addStringLookup() { Description: "Replace ? with random generated letters", Example: "?????@??????.com => billy@mister.com", Output: "string", + Aliases: []string{ + "letter substitute", "pattern letters", "placeholder letters", "random letter filler", "character replacer", + }, + Keywords: []string{ + "replace", "question", "mark", "template", "placeholder", "format", "substitute", "pattern", "randomize", "masking", + }, Params: []Param{ {Field: "str", Display: "String", Type: "string", Description: "String value to replace ?'s"}, }, @@ -234,6 +276,12 @@ func addStringLookup() { Example: "hello,world,whats,up => whats,world,hello,up", Output: "[]string", ContentType: "application/json", + Aliases: []string{ + "array shuffle", "list randomize", "string reorder", "string mixer", "sequence shuffle", + }, + Keywords: []string{ + "collection", "list", "slice", "permutation", "randomized", "scrambled", "jumbled", "unordered", + }, Params: []Param{ {Field: "strs", Display: "Strings", Type: "[]string", Description: "Delimited separated strings"}, }, @@ -254,7 +302,13 @@ func addStringLookup() { Category: "string", Description: "Return a random string from a string array", Example: "hello,world,whats,up => world", - Output: "[]string", + Output: "string", + Aliases: []string{ + "string picker", "array choice", "string select", "random pick", "string chooser", + }, + Keywords: []string{ + "selection", "chosen", "picked", "random", "list", "slice", "array", "choice", "element", "option", + }, Params: []Param{ {Field: "strs", Display: "Strings", Type: "[]string", Description: "Delimited separated strings"}, }, @@ -267,4 +321,5 @@ func addStringLookup() { return randomString(f, strs), nil }, }) + } diff --git a/vendor/github.com/brianvoe/gofakeit/v7/struct.go b/vendor/github.com/brianvoe/gofakeit/v7/struct.go index b2ac99a3..86026ae7 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/struct.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/struct.go @@ -9,6 +9,10 @@ import ( "time" ) +// RecursiveDepth controls the maximum recursion depth when populating structs. +// Increase if your data structures are deeply nested; decrease to be more conservative. +var RecursiveDepth = 10 + // Struct fills in exported fields of a struct with random data // based on the value of `fake` tag of exported fields // or with the result of a call to the Fake() method @@ -26,10 +30,10 @@ func Struct(v any) error { return structFunc(GlobalFaker, v) } func (f *Faker) Struct(v any) error { return structFunc(f, v) } func structFunc(f *Faker, v any) error { - return r(f, reflect.TypeOf(v), reflect.ValueOf(v), "", 0) + return r(f, reflect.TypeOf(v), reflect.ValueOf(v), "", 0, 0) } -func r(f *Faker, t reflect.Type, v reflect.Value, tag string, size int) error { +func r(f *Faker, t reflect.Type, v reflect.Value, tag string, size int, depth int) error { // Handle special types if t.PkgPath() == "encoding/json" { @@ -50,9 +54,9 @@ func r(f *Faker, t reflect.Type, v reflect.Value, tag string, size int) error { // Handle generic types switch t.Kind() { case reflect.Ptr: - return rPointer(f, t, v, tag, size) + return rPointer(f, t, v, tag, size, depth) case reflect.Struct: - return rStruct(f, t, v, tag) + return rStruct(f, t, v, tag, depth) case reflect.String: return rString(f, t, v, tag) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: @@ -64,9 +68,9 @@ func r(f *Faker, t reflect.Type, v reflect.Value, tag string, size int) error { case reflect.Bool: return rBool(f, t, v, tag) case reflect.Array, reflect.Slice: - return rSlice(f, t, v, tag, size) + return rSlice(f, t, v, tag, size, depth) case reflect.Map: - return rMap(f, t, v, tag, size) + return rMap(f, t, v, tag, size, depth) } return nil @@ -123,7 +127,12 @@ func rCustom(f *Faker, v reflect.Value, tag string) error { return nil } -func rStruct(f *Faker, t reflect.Type, v reflect.Value, tag string) error { +func rStruct(f *Faker, t reflect.Type, v reflect.Value, tag string, depth int) error { + // Prevent recursing deeper than configured levels + if depth >= RecursiveDepth { + return nil + } + // Check if tag exists, if so run custom function if t.Name() != "" && tag != "" { return rCustom(f, v, tag) @@ -214,7 +223,7 @@ func rStruct(f *Faker, t reflect.Type, v reflect.Value, tag string) error { } // Recursively call r() to fill in the struct - err := r(f, elementT.Type, elementV, fakeTag, size) + err := r(f, elementT.Type, elementV, fakeTag, size, depth+1) if err != nil { return err } @@ -223,18 +232,23 @@ func rStruct(f *Faker, t reflect.Type, v reflect.Value, tag string) error { return nil } -func rPointer(f *Faker, t reflect.Type, v reflect.Value, tag string, size int) error { +func rPointer(f *Faker, t reflect.Type, v reflect.Value, tag string, size int, depth int) error { elemT := t.Elem() + // Prevent recursing deeper than configured levels + if depth >= RecursiveDepth { + return nil + } + if v.IsNil() { nv := reflect.New(elemT).Elem() - err := r(f, elemT, nv, tag, size) + err := r(f, elemT, nv, tag, size, depth+1) if err != nil { return err } v.Set(nv.Addr()) } else { - err := r(f, elemT, v.Elem(), tag, size) + err := r(f, elemT, v.Elem(), tag, size, depth+1) if err != nil { return err } @@ -243,12 +257,17 @@ func rPointer(f *Faker, t reflect.Type, v reflect.Value, tag string, size int) e return nil } -func rSlice(f *Faker, t reflect.Type, v reflect.Value, tag string, size int) error { +func rSlice(f *Faker, t reflect.Type, v reflect.Value, tag string, size int, depth int) error { // If you cant even set it dont even try if !v.CanSet() { return errors.New("cannot set slice") } + // Prevent recursing deeper than configured levels + if depth >= RecursiveDepth { + return nil + } + // Check if tag exists, if so run custom function if t.Name() != "" && tag != "" { // Check to see if custom function works if not continue to normal loop of values @@ -284,7 +303,7 @@ func rSlice(f *Faker, t reflect.Type, v reflect.Value, tag string, size int) err // Loop through the elements length and set based upon the index for i := 0; i < size; i++ { nv := reflect.New(elemT) - err := r(f, elemT, nv.Elem(), tag, ogSize) + err := r(f, elemT, nv.Elem(), tag, ogSize, depth+1) if err != nil { return err } @@ -300,18 +319,22 @@ func rSlice(f *Faker, t reflect.Type, v reflect.Value, tag string, size int) err return nil } -func rMap(f *Faker, t reflect.Type, v reflect.Value, tag string, size int) error { +func rMap(f *Faker, t reflect.Type, v reflect.Value, tag string, size int, depth int) error { // If you cant even set it dont even try if !v.CanSet() { return errors.New("cannot set slice") } + // Prevent recursing deeper than configured levels + if depth >= RecursiveDepth { + return nil + } + // Check if tag exists, if so run custom function if tag != "" { return rCustom(f, v, tag) - } else if size > 0 { - // NOOP - } else if isFakeable(t) { + } else if isFakeable(t) && size <= 0 { + // Only call custom function if no fakesize is specified (size <= 0) value, err := callFake(f, v, reflect.Map) if err != nil { return err @@ -334,14 +357,14 @@ func rMap(f *Faker, t reflect.Type, v reflect.Value, tag string, size int) error for i := 0; i < newSize; i++ { // Create new key mapIndex := reflect.New(t.Key()) - err := r(f, t.Key(), mapIndex.Elem(), "", -1) + err := r(f, t.Key(), mapIndex.Elem(), "", -1, depth+1) if err != nil { return err } // Create new value mapValue := reflect.New(t.Elem()) - err = r(f, t.Elem(), mapValue.Elem(), "", -1) + err = r(f, t.Elem(), mapValue.Elem(), "", -1, depth+1) if err != nil { return err } @@ -586,7 +609,7 @@ func rTime(f *Faker, t reflect.StructField, v reflect.Value, tag string) error { // Check to see if they are passing in a format to parse the time timeFormat, timeFormatOK := t.Tag.Lookup("format") if timeFormatOK { - timeFormat = javaDateFormatToGolangDateFormat(timeFormat) + timeFormat = javaDateTimeFormatToGolangFormat(timeFormat) } else { // If tag == "{date}" use time.RFC3339 // They are attempting to use the default date lookup diff --git a/vendor/github.com/brianvoe/gofakeit/v7/template.go b/vendor/github.com/brianvoe/gofakeit/v7/template.go index 5921fbf6..612d387d 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/template.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/template.go @@ -55,7 +55,9 @@ const templateMarkdown = ` *Author: {{FirstName}} {{LastName}}* -{{Paragraph 2 5 7 "\n\n"}} +{{Paragraph}} + +{{Paragraph}} ## Table of Contents - [Installation](#installation) @@ -119,7 +121,11 @@ Dear {{LastName}}, {{RandomString (SliceString "I trust this email finds you well." "I hope you're doing great." "Hoping this message reaches you in good spirits.")}} {{RandomString (SliceString "Wishing you a fantastic day!" "May your week be filled with joy." "Sending good vibes your way.")}} -{{Paragraph 3 5 10 "\n\n"}} +{{Paragraph}} + +{{Paragraph}} + +{{Paragraph}} {{RandomString (SliceString "I would appreciate your thoughts on" "I'm eager to hear your feedback on" "I'm curious to know what you think about")}} it. If you have a moment, please feel free to check out the project on {{RandomString (SliceString "GitHub" "GitLab" "Bitbucket")}} @@ -342,6 +348,12 @@ func addTemplateLookup() { Markus Moen`, Output: "string", ContentType: "text/plain", + Aliases: []string{ + "document template", "layout", "blueprint", "design pattern", "text template", "generator", "format schema", + }, + Keywords: []string{ + "generates", "format", "structure", "engine", "document", "pattern", "design", "syntax", "render", "compile", + }, Params: []Param{ {Field: "template", Display: "Template", Type: "string", Description: "Golang template to generate the document from"}, {Field: "data", Display: "Custom Data", Type: "string", Default: "", Optional: true, Description: "Custom data to pass to the template"}, @@ -397,6 +409,12 @@ print("purplesheep5 result:", "in progress") ## License MIT`, Output: "string", + Aliases: []string{ + "markup language", "readme format", "lightweight markup", "documentation style", "plain text format", "md file", "doc format", + }, + Keywords: []string{ + "markdown", "markup", "language", "formatting", "plain", "text", "documentation", "lightweight", "syntax", "rendering", "structure", "readme", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { template_result, err := templateFunc(templateMarkdown, templateFuncMap(f, nil), &MarkdownOptions{}) return string(template_result), err @@ -432,6 +450,19 @@ Milford Johnston jamelhaag@king.org (507)096-3058`, Output: "string", + Aliases: []string{ + "email body", + "email text", + "email message", + "message body", + "email content", + }, + Keywords: []string{ + "email", "body", "message", "content", + "subject", "salutation", "greeting", "closing", + "signature", "footer", "paragraph", "plaintext", + "correspondence", "communication", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { template_result, err := templateFunc(templateEmail, templateFuncMap(f, nil), &EmailOptions{}) return string(template_result), err diff --git a/vendor/github.com/brianvoe/gofakeit/v7/text.go b/vendor/github.com/brianvoe/gofakeit/v7/text.go new file mode 100644 index 00000000..be62eb6c --- /dev/null +++ b/vendor/github.com/brianvoe/gofakeit/v7/text.go @@ -0,0 +1,540 @@ +package gofakeit + +import ( + "bytes" + "errors" + "strings" + "unicode" +) + +// Comment will generate a random statement or remark expressing an opinion, observation, or reaction +func Comment() string { return comment(GlobalFaker) } + +// Comment will generate a random statement or remark expressing an opinion, observation, or reaction +func (f *Faker) Comment() string { return comment(f) } + +func comment(f *Faker) string { + template := getRandValue(f, []string{"text", "comment"}) + + // Generate using your faker + comment, err := generate(f, template) + if err != nil { + return template + } + + // ~16% chance to capitalize first letter + if prob, _ := weighted(f, []any{0, 1}, []float32{5, 1}); prob == 1 { + if len(comment) > 0 { + comment = strings.ToUpper(comment[:1]) + comment[1:] + } + } + + // ~15% chance to add punctuation + if prob, _ := weighted(f, []any{0, 1}, []float32{17, 3}); prob == 1 { + // Within punctuation: ? (4), ! (3), . (1) + switch p, _ := weighted(f, []any{"?", "!", "."}, []float32{4, 3, 1}); p { + case "?": + comment += "?" + case "!": + comment += "!" + case ".": + comment += "." + } + } + + return comment +} + +// Phrase will return a random phrase +func Phrase() string { return phrase(GlobalFaker) } + +// Phrase will return a random phrase +func (f *Faker) Phrase() string { return phrase(f) } + +func phrase(f *Faker) string { return getRandValue(f, []string{"text", "phrase"}) } + +// PhraseNoun will return a random noun phrase +func PhraseNoun() string { return phraseNoun(GlobalFaker) } + +// PhraseNoun will return a random noun phrase +func (f *Faker) PhraseNoun() string { return phraseNoun(f) } + +func phraseNoun(f *Faker) string { + str := "" + + // You may also want to add an adjective to describe the noun + if boolFunc(f) { + str = adjectiveDescriptive(f) + " " + noun(f) + } else { + str = noun(f) + } + + // Add determiner from weighted list + prob, _ := weighted(f, []any{1, 2, 3}, []float32{2, 1.5, 1}) + switch prob { + case 1: + str = getArticle(str) + " " + str + case 2: + str = "the " + str + } + + return str +} + +// PhraseVerb will return a random preposition phrase +func PhraseVerb() string { return phraseVerb(GlobalFaker) } + +// PhraseVerb will return a random preposition phrase +func (f *Faker) PhraseVerb() string { return phraseVerb(f) } + +func phraseVerb(f *Faker) string { + // Put together a string builder + sb := []string{} + + // You may have an adverb phrase + if boolFunc(f) { + sb = append(sb, phraseAdverb(f)) + } + + // Lets add the primary verb + sb = append(sb, verbAction(f)) + + // You may have a noun phrase + if boolFunc(f) { + sb = append(sb, phraseNoun(f)) + } + + // You may have an adverb phrase + if boolFunc(f) { + sb = append(sb, phraseAdverb(f)) + + // You may also have a preposition phrase + if boolFunc(f) { + sb = append(sb, phrasePreposition(f)) + } + + // You may also hae an adverb phrase + if boolFunc(f) { + sb = append(sb, phraseAdverb(f)) + } + } + + return strings.Join(sb, " ") +} + +// PhraseAdverb will return a random adverb phrase +func PhraseAdverb() string { return phraseAdverb(GlobalFaker) } + +// PhraseAdverb will return a random adverb phrase +func (f *Faker) PhraseAdverb() string { return phraseAdverb(f) } + +func phraseAdverb(f *Faker) string { + if boolFunc(f) { + return adverbDegree(f) + " " + adverbManner(f) + } + + return adverbManner(f) +} + +// PhrasePreposition will return a random preposition phrase +func PhrasePreposition() string { return phrasePreposition(GlobalFaker) } + +// PhrasePreposition will return a random preposition phrase +func (f *Faker) PhrasePreposition() string { return phrasePreposition(f) } + +func phrasePreposition(f *Faker) string { + return prepositionSimple(f) + " " + phraseNoun(f) +} + +// Sentence will generate a random sentence +// Deprecated: The wordCount parameter is ignored and will be removed in the next major release. +func Sentence(wordCount ...int) string { return sentence(GlobalFaker) } + +// Sentence will generate a random sentence +// Deprecated: The wordCount parameter is ignored and will be removed in the next major release. +func (f *Faker) Sentence(wordCount ...int) string { return sentence(f) } + +func sentence(f *Faker) string { + sentence, err := generate(f, getRandValue(f, []string{"text", "sentence"})) + if err != nil { + return "" + } + + // Capitalize the first letter + return strings.ToUpper(sentence[:1]) + sentence[1:] +} + +// Paragraph will generate a random paragraph +// Deprecated: The parameters are ignored and will be removed in the next major release. +func Paragraph(paragraphCount ...any) string { + return paragraph(GlobalFaker) +} + +// Paragraph will generate a random paragraph +// Deprecated: The parameters are ignored and will be removed in the next major release. +func (f *Faker) Paragraph(paragraphCount ...any) string { + return paragraph(f) +} + +func paragraph(f *Faker) string { + // generate 2-5 sentences + sentenceCount := f.Number(2, 5) + sentences := make([]string, sentenceCount) + for i := 0; i < sentenceCount; i++ { + sentences[i] = sentence(f) + } + + return strings.Join(sentences, " ") +} + +// Question will return a random question +func Question() string { + return question(GlobalFaker) +} + +// Question will return a random question +func (f *Faker) Question() string { + return question(f) +} + +func question(f *Faker) string { + question, err := generate(f, getRandValue(f, []string{"text", "question"})) + if err != nil { + return "" + } + + // Capitalize the first letter and add a question mark + return strings.ToUpper(question[:1]) + question[1:] + "?" +} + +// Quote will return a random quote from a random person +func Quote() string { return quote(GlobalFaker) } + +// Quote will return a random quote from a random person +func (f *Faker) Quote() string { return quote(f) } + +func quote(f *Faker) string { + quote, err := generate(f, getRandValue(f, []string{"text", "quote"})) + if err != nil { + return "" + } + + // Capitalize the first letter after the opening quote + return quote[:1] + strings.ToUpper(quote[1:2]) + quote[2:] +} + +// LoremIpsumSentence will generate a random sentence +func LoremIpsumSentence(wordCount int) string { + return loremIpsumSentence(GlobalFaker, wordCount) +} + +// LoremIpsumSentence will generate a random sentence +func (f *Faker) LoremIpsumSentence(wordCount int) string { + return loremIpsumSentence(f, wordCount) +} + +func loremIpsumSentence(f *Faker, wordCount int) string { + if wordCount <= 0 { + return "" + } + + sentence := bytes.Buffer{} + sentence.Grow(wordCount * 6) // estimate 6 bytes per word + + for i := 0; i < wordCount; i++ { + word := loremIpsumWord(f) + if i == 0 { + runes := []rune(word) + runes[0] = unicode.ToTitle(runes[0]) + word = string(runes) + } + sentence.WriteString(word) + if i < wordCount-1 { + sentence.WriteRune(' ') + } + } + sentence.WriteRune('.') + return sentence.String() +} + +// LoremIpsumParagraph will generate a random paragraphGenerator +func LoremIpsumParagraph(paragraphCount int, sentenceCount int, wordCount int, separator string) string { + return loremIpsumParagraph(GlobalFaker, paragraphCount, sentenceCount, wordCount, separator) +} + +// LoremIpsumParagraph will generate a random paragraphGenerator +func (f *Faker) LoremIpsumParagraph(paragraphCount int, sentenceCount int, wordCount int, separator string) string { + return loremIpsumParagraph(f, paragraphCount, sentenceCount, wordCount, separator) +} + +func loremIpsumParagraph(f *Faker, paragraphCount int, sentenceCount int, wordCount int, separator string) string { + if paragraphCount <= 0 || sentenceCount <= 0 || wordCount <= 0 { + return "" + } + + paragraphs := bytes.Buffer{} + paragraphs.Grow(paragraphCount * sentenceCount * wordCount * 6) // estimate 6 bytes per word + + for i := 0; i < paragraphCount; i++ { + for e := 0; e < sentenceCount; e++ { + paragraphs.WriteString(loremIpsumSentence(f, wordCount)) + if e < sentenceCount-1 { + paragraphs.WriteRune(' ') + } + } + + if i < paragraphCount-1 { + paragraphs.WriteString(separator) + } + } + + return paragraphs.String() +} + +func addTextLookup() { + AddFuncLookup("comment", Info{ + Display: "Comment", + Category: "text", + Description: "Statement or remark expressing an opinion, observation, or reaction", + Example: "add some a little bit team", + Output: "string", + Aliases: []string{ + "verbal statement", "expressed thought", "spoken remark", "communication element", "casual note", + }, + Keywords: []string{ + "opinion", "observation", "reaction", "response", "feedback", "critique", "interpretation", "perspective", "reflection", "discussion", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { + return comment(f), nil + }, + }) + + AddFuncLookup("phrase", Info{ + Display: "Phrase", + Category: "text", + Description: "A small group of words standing together", + Example: "time will tell", + Output: "string", + Aliases: []string{"word group", "language unit", "text element", "expression block"}, + Keywords: []string{"words", "group", "sentence", "text", "language", "grammar", "expression", "unit", "collection"}, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { + return phrase(f), nil + }, + }) + + AddFuncLookup("phrasenoun", Info{ + Display: "Noun Phrase", + Category: "text", + Description: "Phrase with a noun as its head, functions within sentence like a noun", + Example: "a tribe", + Output: "string", + Aliases: []string{"nominal phrase", "substantive element", "subject phrase", "object phrase"}, + Keywords: []string{"phrase", "noun", "grammar", "subject", "object", "head", "sentence", "nominal", "substantive", "entity"}, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { + return phraseNoun(f), nil + }, + }) + + AddFuncLookup("phraseverb", Info{ + Display: "Verb Phrase", + Category: "text", + Description: "Phrase that Consists of a verb and its modifiers, expressing an action or state", + Example: "a tribe", + Output: "string", + Aliases: []string{"predicate phrase", "verbal element", "action phrase", "state phrase"}, + Keywords: []string{"phrase", "verb", "grammar", "action", "state", "modifiers", "sentence", "predicate", "verbal", "behavior"}, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { + return phraseVerb(f), nil + }, + }) + + AddFuncLookup("phraseadverb", Info{ + Display: "Adverb Phrase", + Category: "text", + Description: "Phrase that modifies a verb, adjective, or another adverb, providing additional information.", + Example: "fully gladly", + Output: "string", + Aliases: []string{"adverbial phrase", "qualifier element", "modifier phrase", "description phrase"}, + Keywords: []string{"phrase", "adverb", "grammar", "modifier", "description", "information", "adverbial", "qualifier", "modification"}, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { + return phraseAdverb(f), nil + }, + }) + + AddFuncLookup("phrasepreposition", Info{ + Display: "Preposition Phrase", + Category: "text", + Description: "Phrase starting with a preposition, showing relation between elements in a sentence.", + Example: "out the black thing", + Output: "string", + Aliases: []string{"prepositional phrase", "relational element", "connection phrase", "grammar bridge"}, + Keywords: []string{"phrase", "preposition", "grammar", "relation", "connection", "sentence", "prepositional", "relational", "linking"}, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { + return phrasePreposition(f), nil + }, + }) + + AddFuncLookup("sentence", Info{ + Display: "Sentence", + Category: "text", + Description: "Set of words expressing a statement, question, exclamation, or command", + Example: "Guide person with kind affordances.", + Output: "string", + Aliases: []string{"complete thought", "grammatical unit", "word group", "linguistic element"}, + Keywords: []string{"complete", "thought", "grammatical", "unit", "word", "group", "expression", "clause", "utterance"}, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { + return sentence(f), nil + }, + }) + + AddFuncLookup("paragraph", Info{ + Display: "Paragraph", + Category: "text", + Description: "Distinct section of writing covering a single theme, composed of multiple sentences", + Example: "Protect the place under grumpy load. Decompose work into smaller group. Ruthlessly remove dead work.", + Output: "string", + Aliases: []string{"text block", "writing section", "thematic unit", "content block"}, + Keywords: []string{"text", "block", "writing", "section", "theme", "sentences", "composition", "distinct", "passage", "content"}, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { + return paragraph(f), nil + }, + }) + + AddFuncLookup("question", Info{ + Display: "Question", + Category: "text", + Description: "Statement formulated to inquire or seek clarification", + Example: "Roof chia echo?", + Output: "string", + Aliases: []string{ + "interrogative sentence", + "information request", + "asking phrase", + "query prompt", + "clarifying ask", + }, + Keywords: []string{ + "inquiry", "clarification", "interrogative", + "ask", "who", "what", "when", "where", "why", "how", "mark", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { + return question(f), nil + }, + }) + + AddFuncLookup("quote", Info{ + Display: "Quote", + Category: "text", + Description: "Direct repetition of someone else's words", + Example: `"Does orange help the tissue or distract it"`, + Output: "string", + Aliases: []string{ + "direct speech", + "verbatim line", + "cited passage", + "attributed text", + "pulled excerpt", + }, + Keywords: []string{ + "quotation", "citation", "reference", "excerpt", + "epigraph", "saying", "maxim", "attribution", "blockquote", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { + return quote(f), nil + }, + }) + + AddFuncLookup("loremipsumsentence", Info{ + Display: "Lorem Ipsum Sentence", + Category: "text", + Description: "Sentence of the Lorem Ipsum placeholder text used in design and publishing", + Example: "Quia quae repellat consequatur quidem.", + Output: "string", + Aliases: []string{ + "lorem sentence", + "ipsum sentence", + "placeholder sentence", + "latin sentence", + }, + Keywords: []string{ + "lorem", "ipsum", "sentence", "placeholder", + "latin", "dummy", "filler", "text", + "typography", "mockup", + }, + Params: []Param{ + {Field: "wordcount", Display: "Word Count", Type: "int", Default: "5", Description: "Number of words in a sentence"}, + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { + wordCount, err := info.GetInt(m, "wordcount") + if err != nil { + return nil, err + } + if wordCount <= 0 || wordCount > 50 { + return nil, errors.New("invalid word count, must be greater than 0, less than 50") + } + + return loremIpsumSentence(f, wordCount), nil + }, + }) + + AddFuncLookup("loremipsumparagraph", Info{ + Display: "Lorem Ipsum Paragraph", + Category: "text", + Description: "Paragraph of the Lorem Ipsum placeholder text used in design and publishing", + Example: `Quia quae repellat consequatur quidem nisi quo qui voluptatum accusantium quisquam amet. Quas et ut non dolorem ipsam aut enim assumenda mollitia harum ut. Dicta similique veniam nulla voluptas at excepturi non ad maxime at non. Eaque hic repellat praesentium voluptatem qui consequuntur dolor iusto autem velit aut. Fugit tempore exercitationem harum consequatur voluptatum modi minima aut eaque et et. + +Aut ea voluptatem dignissimos expedita odit tempore quod aut beatae ipsam iste. Minus voluptatibus dolorem maiores eius sed nihil vel enim odio voluptatem accusamus. Natus quibusdam temporibus tenetur cumque sint necessitatibus dolorem ex ducimus iusto ex. Voluptatem neque dicta explicabo officiis et ducimus sit ut ut praesentium pariatur. Illum molestias nisi at dolore ut voluptatem accusantium et fugiat et ut. + +Explicabo incidunt reprehenderit non quia dignissimos recusandae vitae soluta quia et quia. Aut veniam voluptas consequatur placeat sapiente non eveniet voluptatibus magni velit eum. Nobis vel repellendus sed est qui autem laudantium quidem quam ullam consequatur. Aut iusto ut commodi similique quae voluptatem atque qui fugiat eum aut. Quis distinctio consequatur voluptatem vel aliquid aut laborum facere officiis iure tempora.`, + Output: "string", + Aliases: []string{ + "lorem paragraph", + "ipsum paragraph", + "placeholder paragraph", + "latin paragraph", + }, + Keywords: []string{ + "lorem", "ipsum", "paragraph", "placeholder", + "latin", "dummy", "filler", "text", + "typography", "mockup", + }, + Params: []Param{ + {Field: "paragraphcount", Display: "Paragraph Count", Type: "int", Default: "2", Description: "Number of paragraphs"}, + {Field: "sentencecount", Display: "Sentence Count", Type: "int", Default: "2", Description: "Number of sentences in a paragraph"}, + {Field: "wordcount", Display: "Word Count", Type: "int", Default: "5", Description: "Number of words in a sentence"}, + {Field: "paragraphseparator", Display: "Paragraph Separator", Type: "string", Default: "
", Description: "String value to add between paragraphs"}, + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { + paragraphCount, err := info.GetInt(m, "paragraphcount") + if err != nil { + return nil, err + } + if paragraphCount <= 0 || paragraphCount > 20 { + return nil, errors.New("invalid paragraph count, must be greater than 0, less than 20") + } + + sentenceCount, err := info.GetInt(m, "sentencecount") + if err != nil { + return nil, err + } + if sentenceCount <= 0 || sentenceCount > 20 { + return nil, errors.New("invalid sentence count, must be greater than 0, less than 20") + } + + wordCount, err := info.GetInt(m, "wordcount") + if err != nil { + return nil, err + } + if wordCount <= 0 || wordCount > 50 { + return nil, errors.New("invalid word count, must be greater than 0, less than 50") + } + + paragraphSeparator, err := info.GetString(m, "paragraphseparator") + if err != nil { + return nil, err + } + + return loremIpsumParagraph(f, paragraphCount, sentenceCount, wordCount, paragraphSeparator), nil + }, + }) +} diff --git a/vendor/github.com/brianvoe/gofakeit/v7/weighted.go b/vendor/github.com/brianvoe/gofakeit/v7/weighted.go index cedfe42d..95d4cade 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/weighted.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/weighted.go @@ -80,6 +80,12 @@ func addWeightedLookup() { Description: "Randomly select a given option based upon an equal amount of weights", Example: "[hello, 2, 6.9],[1, 2, 3] => 6.9", Output: "any", + Aliases: []string{ + "weighted choice", "probabilistic pick", "random weight", "distribution choice", "chance selection", "ratio selection", "stochastic option", + }, + Keywords: []string{ + "randomly", "select", "probability", "distribution", "likelihood", "chance", "statistical", "outcome", "bias", "ratio", + }, Params: []Param{ {Field: "options", Display: "Options", Type: "[]string", Description: "Array of any values"}, {Field: "weights", Display: "Weights", Type: "[]float", Description: "Array of weights"}, diff --git a/vendor/github.com/brianvoe/gofakeit/v7/word_adjective.go b/vendor/github.com/brianvoe/gofakeit/v7/word_adjective.go index 01efb032..f9023269 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/word_adjective.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/word_adjective.go @@ -96,6 +96,17 @@ func addWordAdjectiveLookup() { Description: "Word describing or modifying a noun", Example: "genuine", Output: "string", + Aliases: []string{ + "descriptor term", + "qualifying modifier", + "attribute marker", + "descriptive label", + "noun qualifier", + }, + Keywords: []string{ + "noun", "speech", "quality", "attribute", + "characteristic", "property", "trait", "descriptive", "modifier", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return adjective(f), nil }, @@ -107,6 +118,17 @@ func addWordAdjectiveLookup() { Description: "Adjective that provides detailed characteristics about a noun", Example: "brave", Output: "string", + Aliases: []string{ + "qualitative adjective", + "detail-rich modifier", + "characterizing term", + "specific descriptor", + "noun enhancer", + }, + Keywords: []string{ + "adjective", "word", "describing", "modifying", "attribute", + "property", "trait", "feature", "aspect", "detailed", "characteristics", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return adjectiveDescriptive(f), nil }, @@ -118,6 +140,17 @@ func addWordAdjectiveLookup() { Description: "Adjective that indicates the quantity or amount of something", Example: "a little", Output: "string", + Aliases: []string{ + "numeric descriptor", + "cardinal qualifier", + "quantifier adjective", + "how many indicator", + "magnitude marker", + }, + Keywords: []string{ + "adjective", "quantitative", "word", "describing", "modifying", + "count", "volume", "extent", "degree", "magnitude", "quantity", "amount", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return adjectiveQuantitative(f), nil }, @@ -129,6 +162,18 @@ func addWordAdjectiveLookup() { Description: "Adjective derived from a proper noun, often used to describe nationality or origin", Example: "Afghan", Output: "string", + Aliases: []string{ + "nationality adjective", + "eponym-derived", + "proper-noun based", + "demonym adjective", + "origin descriptor", + }, + Keywords: []string{ + "adjective", "noun", "word", "describing", + "cultural", "regional", "ethnic", "linguistic", "heritage", + "proper", "nationality", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return adjectiveProper(f), nil }, @@ -140,6 +185,19 @@ func addWordAdjectiveLookup() { Description: "Adjective used to point out specific things", Example: "this", Output: "string", + Aliases: []string{ + "demonstrative adjective", + "pointing adjective", + "deictic adjective", + "proximal distal adjective", + "reference adjective", + }, + Keywords: []string{ + "adjective", "demonstrative", "deictic", + "this", "that", "these", "those", + "proximal", "distal", "near", "far", + "pointer", "reference", "specific", "grammar", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return adjectiveDemonstrative(f), nil }, @@ -151,6 +209,18 @@ func addWordAdjectiveLookup() { Description: "Adjective indicating ownership or possession", Example: "my", Output: "string", + Aliases: []string{ + "ownership adjective", + "owners descriptor", + "possessive determiner", + "belonging indicator", + "proprietary modifier", + }, + Keywords: []string{ + "adjective", "word", "grammar", + "my", "your", "his", "her", "its", "our", "their", + "belong", "possessive", "ownership", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return adjectivePossessive(f), nil }, @@ -162,6 +232,17 @@ func addWordAdjectiveLookup() { Description: "Adjective used to ask questions", Example: "what", Output: "string", + Aliases: []string{ + "interrogative adjective", + "question word", + "asking adjective", + "inquiry word", + "grammar adjective", + }, + Keywords: []string{ + "adjective", "word", "grammar", "what", "which", "whose", + "question", "inquiry", "interrogation", "interrogative", "ask", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return adjectiveInterrogative(f), nil }, @@ -173,8 +254,21 @@ func addWordAdjectiveLookup() { Description: "Adjective describing a non-specific noun", Example: "few", Output: "string", + Aliases: []string{ + "unspecified adjective", + "quantifier-like", + "noncount marker", + "broad determiner", + "approximate amount", + }, + Keywords: []string{ + "adjective", "noun", "word", "grammar", + "some", "any", "many", "few", "several", "various", "certain", + "indefinite", "non-specific", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return adjectiveIndefinite(f), nil }, }) + } diff --git a/vendor/github.com/brianvoe/gofakeit/v7/word_adverb.go b/vendor/github.com/brianvoe/gofakeit/v7/word_adverb.go index 4cafb8a8..fe5b546b 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/word_adverb.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/word_adverb.go @@ -90,6 +90,12 @@ func addWordAdverbLookup() { Description: "Word that modifies verbs, adjectives, or other adverbs", Example: "smoothly", Output: "string", + Aliases: []string{ + "modifier", "descriptive word", "language part", "expression word", "qualifier", + }, + Keywords: []string{ + "intensity", "manner", "degree", "place", "time", "frequency", "extent", "emphasis", "usage", "context", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return adverb(f), nil }, @@ -101,6 +107,12 @@ func addWordAdverbLookup() { Description: "Adverb that describes how an action is performed", Example: "stupidly", Output: "string", + Aliases: []string{ + "manner word", "action style", "performance word", "descriptive term", "behavior word", + }, + Keywords: []string{ + "style", "process", "mode", "technique", "behavior", "attitude", "fashion", "pattern", "characteristic", "approach", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return adverbManner(f), nil }, @@ -112,6 +124,12 @@ func addWordAdverbLookup() { Description: "Adverb that indicates the degree or intensity of an action or adjective", Example: "intensely", Output: "string", + Aliases: []string{ + "degree word", "intensity word", "level word", "strength word", "extent word", + }, + Keywords: []string{ + "measure", "force", "strength", "scope", "magnitude", "gradation", "amount", "power", "amplification", "range", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return adverbDegree(f), nil }, @@ -123,6 +141,12 @@ func addWordAdverbLookup() { Description: "Adverb that indicates the location or direction of an action", Example: "east", Output: "string", + Aliases: []string{ + "place word", "location word", "direction word", "position word", "movement word", + }, + Keywords: []string{ + "orientation", "destination", "area", "region", "spot", "placement", "site", "territory", "geography", "setting", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return adverbPlace(f), nil }, @@ -134,6 +158,12 @@ func addWordAdverbLookup() { Description: "Adverb that specifies the exact time an action occurs", Example: "now", Output: "string", + Aliases: []string{ + "time word", "definite time", "exact time", "moment word", "specific time", + }, + Keywords: []string{ + "precise", "instant", "point", "schedule", "fixed", "timestamp", "occasion", "momentary", "calendar", "chronology", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return adverbTimeDefinite(f), nil }, @@ -145,6 +175,12 @@ func addWordAdverbLookup() { Description: "Adverb that gives a general or unspecified time frame", Example: "already", Output: "string", + Aliases: []string{ + "time word", "indefinite time", "general time", "approximate time", "vague time", + }, + Keywords: []string{ + "uncertain", "broad", "loose", "non-specific", "undefined", "imprecise", "approximation", "unsure", "flexible", "open-ended", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return adverbTimeIndefinite(f), nil }, @@ -156,6 +192,12 @@ func addWordAdverbLookup() { Description: "Adverb that specifies how often an action occurs with a clear frequency", Example: "hourly", Output: "string", + Aliases: []string{ + "frequency word", "repetition word", "regular word", "interval word", "scheduled word", + }, + Keywords: []string{ + "interval", "regular", "pattern", "routine", "cycle", "repetition", "rate", "periodic", "consistency", "predictable", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return adverbFrequencyDefinite(f), nil }, @@ -167,8 +209,15 @@ func addWordAdverbLookup() { Description: "Adverb that specifies how often an action occurs without specifying a particular frequency", Example: "occasionally", Output: "string", + Aliases: []string{ + "frequency word", "indefinite frequency", "irregular word", "sporadic word", "recurring word", + }, + Keywords: []string{ + "uncertain", "sporadic", "occasional", "irregular", "unfixed", "varying", "undetermined", "fluctuating", "approximate", "inconsistent", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return adverbFrequencyIndefinite(f), nil }, }) + } diff --git a/vendor/github.com/brianvoe/gofakeit/v7/word_comment.go b/vendor/github.com/brianvoe/gofakeit/v7/word_comment.go deleted file mode 100644 index f73d1f03..00000000 --- a/vendor/github.com/brianvoe/gofakeit/v7/word_comment.go +++ /dev/null @@ -1,72 +0,0 @@ -package gofakeit - -import ( - "strings" -) - -// Comment will generate a random statement or remark expressing an opinion, observation, or reaction -func Comment() string { return comment(GlobalFaker) } - -// Comment will generate a random statement or remark expressing an opinion, observation, or reaction -func (f *Faker) Comment() string { return comment(f) } - -func comment(f *Faker) string { - structures := [][]string{ - {"interjection", "adjective", "noun", "verb", "adverb"}, - {"noun", "verb", "preposition", "determiner", "adjective", "noun"}, - {"noun", "verb", "adverb"}, - {"adjective", "noun", "verb"}, - {"noun", "verb", "preposition", "noun"}, - } - - // Randomly select a structure - structure := structures[number(f, 0, len(structures)-1)] - - // Build the sentence - var commentParts []string - for _, wordType := range structure { - switch wordType { - case "noun": - commentParts = append(commentParts, noun(f)) - case "verb": - commentParts = append(commentParts, verb(f)) - case "adjective": - commentParts = append(commentParts, adjective(f)) - case "adverb": - commentParts = append(commentParts, adverb(f)) - case "interjection": - commentParts = append(commentParts, interjection(f)) - case "preposition": - commentParts = append(commentParts, preposition(f)) - case "determiner": - commentParts = append(commentParts, nounDeterminer(f)) - default: - // Should never hit - panic("Invalid word type") - } - } - - // Combine the words into a sentence - sentence := strings.Join(commentParts, " ") - - // Capitalize the first letter - sentence = title(sentence) - - // Add a period to the end of the sentence - sentence = sentence + "." - - return sentence -} - -func addWordCommentLookup() { - AddFuncLookup("comment", Info{ - Display: "Comment", - Category: "word", - Description: "Statement or remark expressing an opinion, observation, or reaction", - Example: "wow", - Output: "string", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return interjection(f), nil - }, - }) -} diff --git a/vendor/github.com/brianvoe/gofakeit/v7/word_connective.go b/vendor/github.com/brianvoe/gofakeit/v7/word_connective.go index 92dc025f..48304511 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/word_connective.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/word_connective.go @@ -86,6 +86,8 @@ func addWordConnectiveLookup() { Description: "Word used to connect words or sentences", Example: "such as", Output: "string", + Aliases: []string{"joining element", "grammar connector", "sentence bridge", "word linker"}, + Keywords: []string{"word", "connect", "sentence", "grammar", "used", "conjunction", "link", "joining"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return connective(f), nil }, @@ -97,6 +99,8 @@ func addWordConnectiveLookup() { Description: "Connective word used to indicate a temporal relationship between events or actions", Example: "finally", Output: "string", + Aliases: []string{"temporal connector", "time relationship", "chronological link", "sequence element"}, + Keywords: []string{"connective", "time", "temporal", "relationship", "events", "grammar", "actions", "chronological", "sequence", "timing"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return connectiveTime(f), nil }, @@ -108,6 +112,8 @@ func addWordConnectiveLookup() { Description: "Connective word used to indicate a comparison between two or more things", Example: "in addition", Output: "string", + Aliases: []string{"comparison connector", "contrast element", "similarity link", "grammar bridge"}, + Keywords: []string{"connective", "comparative", "comparison", "things", "grammar", "indicate", "contrast", "similarity", "relative"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return connectiveComparative(f), nil }, @@ -119,6 +125,8 @@ func addWordConnectiveLookup() { Description: "Connective word used to express dissatisfaction or complaints about a situation", Example: "besides", Output: "string", + Aliases: []string{"objection connector", "criticism element", "dissatisfaction link", "grammar bridge"}, + Keywords: []string{"connective", "complaint", "dissatisfaction", "situation", "grammar", "express", "objection", "criticism", "negative"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return connectiveComplaint(f), nil }, @@ -130,6 +138,8 @@ func addWordConnectiveLookup() { Description: "Connective word used to list or enumerate items or examples", Example: "firstly", Output: "string", + Aliases: []string{"enumeration connector", "sequence element", "order link", "grammar bridge"}, + Keywords: []string{"connective", "listing", "enumerate", "items", "examples", "grammar", "list", "sequence", "order", "numbered"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return connectiveListing(f), nil }, @@ -141,6 +151,8 @@ func addWordConnectiveLookup() { Description: "Connective word used to indicate a cause-and-effect relationship between events or actions", Example: "an outcome of", Output: "string", + Aliases: []string{"causal connector", "effect relationship", "consequence link", "grammar bridge"}, + Keywords: []string{"connective", "casual", "cause", "effect", "relationship", "grammar", "events", "actions", "causal", "consequence", "result"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return connectiveCasual(f), nil }, @@ -152,6 +164,8 @@ func addWordConnectiveLookup() { Description: "Connective word used to provide examples or illustrations of a concept or idea", Example: "then", Output: "string", + Aliases: []string{"example connector", "illustration element", "instance link", "grammar bridge"}, + Keywords: []string{"connective", "examplify", "examples", "illustrations", "concept", "grammar", "provide", "instance", "demonstration", "sample"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return connectiveExamplify(f), nil }, diff --git a/vendor/github.com/brianvoe/gofakeit/v7/word_general.go b/vendor/github.com/brianvoe/gofakeit/v7/word_general.go index 548ef125..fcf7acd1 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/word_general.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/word_general.go @@ -30,6 +30,8 @@ func addWordGeneralLookup() { Description: "Basic unit of language representing a concept or thing, consisting of letters and having meaning", Example: "man", Output: "string", + Aliases: []string{"language unit", "speech element", "writing component", "lexical item"}, + Keywords: []string{"basic", "unit", "language", "concept", "letters", "meaning", "representing", "lexeme", "vocabulary"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return word(f), nil }, diff --git a/vendor/github.com/brianvoe/gofakeit/v7/word_grammar.go b/vendor/github.com/brianvoe/gofakeit/v7/word_grammar.go deleted file mode 100644 index b3690bee..00000000 --- a/vendor/github.com/brianvoe/gofakeit/v7/word_grammar.go +++ /dev/null @@ -1,34 +0,0 @@ -package gofakeit - -import ( - "unicode" -) - -// SentenceSimple will generate a random simple sentence -func SentenceSimple() string { return sentenceSimple(GlobalFaker) } - -// SentenceSimple will generate a random simple sentence -func (f *Faker) SentenceSimple() string { return sentenceSimple(f) } - -func sentenceSimple(f *Faker) string { - // simple sentence consists of a noun phrase and a verb phrase - str := phraseNoun(f) + " " + phraseVerb(f) + "." - - // capitalize the first letter - strR := []rune(str) - strR[0] = unicode.ToUpper(strR[0]) - return string(strR) -} - -func addWordGrammerLookup() { - AddFuncLookup("sentencesimple", Info{ - Display: "Simple Sentence", - Category: "word", - Description: "Group of words that expresses a complete thought", - Example: "A tribe fly the lemony kitchen.", - Output: "string", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return sentenceSimple(f), nil - }, - }) -} diff --git a/vendor/github.com/brianvoe/gofakeit/v7/word_misc.go b/vendor/github.com/brianvoe/gofakeit/v7/word_misc.go index 58c8799b..8e00e7e7 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/word_misc.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/word_misc.go @@ -8,6 +8,14 @@ func (f *Faker) Interjection() string { return interjection(f) } func interjection(f *Faker) string { return getRandValue(f, []string{"word", "interjection"}) } +// LoremIpsumWord will generate a random word +func LoremIpsumWord() string { return loremIpsumWord(GlobalFaker) } + +// LoremIpsumWord will generate a random word +func (f *Faker) LoremIpsumWord() string { return loremIpsumWord(f) } + +func loremIpsumWord(f *Faker) string { return getRandValue(f, []string{"lorem", "word"}) } + func addWordMiscLookup() { AddFuncLookup("interjection", Info{ Display: "Interjection", @@ -15,8 +23,32 @@ func addWordMiscLookup() { Description: "Word expressing emotion", Example: "wow", Output: "string", + Aliases: []string{"emotional expression", "feeling word", "reaction term", "exclamation element"}, + Keywords: []string{"emotion", "word", "expression", "feeling", "reaction", "exclamation", "utterance", "ejaculation", "emotional"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return interjection(f), nil }, }) + + AddFuncLookup("loremipsumword", Info{ + Display: "Lorem Ipsum Word", + Category: "word", + Description: "Word of the Lorem Ipsum placeholder text used in design and publishing", + Example: "quia", + Output: "string", + Aliases: []string{ + "lorem word", + "ipsum word", + "placeholder word", + "latin word", + }, + Keywords: []string{ + "lorem", "ipsum", "word", "placeholder", + "latin", "dummy", "filler", "text", + "typography", "mockup", + }, + Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { + return loremIpsumWord(f), nil + }, + }) } diff --git a/vendor/github.com/brianvoe/gofakeit/v7/word_noun.go b/vendor/github.com/brianvoe/gofakeit/v7/word_noun.go index ad7d388c..2e433291 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/word_noun.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/word_noun.go @@ -125,6 +125,12 @@ func addWordNounLookup() { Description: "Person, place, thing, or idea, named or referred to in a sentence", Example: "aunt", Output: "string", + Aliases: []string{ + "random noun", "grammar noun", "word type", "part speech", "naming word", "lexical noun", "nominal word", + }, + Keywords: []string{ + "person", "place", "idea", "sentence", "grammar", "named", "referred", "subject", "object", "entity", "concept", "term", "substantive", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return noun(f), nil }, @@ -136,6 +142,12 @@ func addWordNounLookup() { Description: "General name for people, places, or things, not specific or unique", Example: "part", Output: "string", + Aliases: []string{ + "common noun", "general noun", "generic name", "basic noun", "ordinary noun", "regular noun", "everyday noun", + }, + Keywords: []string{ + "common", "general", "name", "people", "places", "generic", "basic", "ordinary", "standard", "typical", "regular", "normal", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return nounCommon(f), nil }, @@ -147,6 +159,12 @@ func addWordNounLookup() { Description: "Names for physical entities experienced through senses like sight, touch, smell, or taste", Example: "snowman", Output: "string", + Aliases: []string{ + "concrete noun", "physical noun", "tangible noun", "material noun", "sensory noun", "real noun", "perceptible noun", + }, + Keywords: []string{ + "concrete", "physical", "entities", "senses", "sight", "touch", "smell", "taste", "tangible", "material", "solid", "real", "visible", "touchable", "observable", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return nounConcrete(f), nil }, @@ -158,6 +176,12 @@ func addWordNounLookup() { Description: "Ideas, qualities, or states that cannot be perceived with the five senses", Example: "confusion", Output: "string", + Aliases: []string{ + "abstract noun", "concept noun", "idea noun", "intangible noun", "mental noun", "notional noun", "theoretical noun", + }, + Keywords: []string{ + "abstract", "ideas", "qualities", "states", "senses", "concept", "intangible", "mental", "theoretical", "emotional", "spiritual", "intellectual", "philosophical", "metaphysical", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return nounAbstract(f), nil }, @@ -169,6 +193,12 @@ func addWordNounLookup() { Description: "Group of people or things regarded as a unit", Example: "body", Output: "string", + Aliases: []string{ + "collective noun", "group noun", "people group", "crowd noun", "assembly noun", "community noun", "societal noun", + }, + Keywords: []string{ + "collective", "people", "group", "unit", "regarded", "crowd", "assembly", "gathering", "team", "committee", "audience", "class", "family", "society", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return nounCollectivePeople(f), nil }, @@ -180,6 +210,12 @@ func addWordNounLookup() { Description: "Group of animals, like a 'pack' of wolves or a 'flock' of birds", Example: "party", Output: "string", + Aliases: []string{ + "animal collective", "pack noun", "flock noun", "herd noun", "swarm noun", "colony noun", "pride noun", + }, + Keywords: []string{ + "collective", "animal", "group", "pack", "flock", "animals", "herd", "swarm", "pride", "school", "colony", "pod", "gaggle", "murder", "exaltation", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return nounCollectiveAnimal(f), nil }, @@ -191,6 +227,12 @@ func addWordNounLookup() { Description: "Group of objects or items, such as a 'bundle' of sticks or a 'cluster' of grapes", Example: "hand", Output: "string", + Aliases: []string{ + "object collective", "bundle noun", "cluster noun", "collection noun", "set noun", "batch noun", "pile noun", + }, + Keywords: []string{ + "collective", "thing", "group", "objects", "items", "bundle", "cluster", "collection", "set", "batch", "stack", "pile", "heap", "bunch", "array", "assortment", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return nounCollectiveThing(f), nil }, @@ -202,6 +244,12 @@ func addWordNounLookup() { Description: "Items that can be counted individually", Example: "neck", Output: "string", + Aliases: []string{ + "countable noun", "count noun", "discrete item", "enumerable noun", "plural noun", "numerical noun", "measurable noun", + }, + Keywords: []string{ + "countable", "items", "counted", "individually", "discrete", "enumerable", "plural", "many", "few", "number", "objects", "things", "units", "pieces", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return nounCountable(f), nil }, @@ -213,6 +261,12 @@ func addWordNounLookup() { Description: "Items that can't be counted individually", Example: "seafood", Output: "string", + Aliases: []string{ + "uncountable noun", "mass noun", "non-count noun", "bulk noun", "substance noun", "continuous noun", "material noun", + }, + Keywords: []string{ + "uncountable", "items", "counted", "individually", "mass", "bulk", "substance", "material", "liquid", "powder", "grain", "continuous", "indivisible", "measurement", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return nounUncountable(f), nil }, @@ -224,6 +278,12 @@ func addWordNounLookup() { Description: "Specific name for a particular person, place, or organization", Example: "John", Output: "string", + Aliases: []string{ + "proper noun", "specific name", "person name", "place name", "organization name", "capitalized noun", "unique name", + }, + Keywords: []string{ + "proper", "specific", "name", "person", "place", "organization", "capitalized", "title", "brand", "company", "city", "country", "individual", "entity", "designation", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return nounProper(f), nil }, @@ -235,8 +295,15 @@ func addWordNounLookup() { Description: "Word that introduces a noun and identifies it as a noun", Example: "your", Output: "string", + Aliases: []string{ + "determiner word", "article word", "noun introducer", "specifier word", "modifier word", "defining word", "introductory word", + }, + Keywords: []string{ + "determiner", "word", "introduces", "identifies", "article", "specifier", "modifier", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return nounDeterminer(f), nil }, }) + } diff --git a/vendor/github.com/brianvoe/gofakeit/v7/word_phrase.go b/vendor/github.com/brianvoe/gofakeit/v7/word_phrase.go deleted file mode 100644 index 68ff015a..00000000 --- a/vendor/github.com/brianvoe/gofakeit/v7/word_phrase.go +++ /dev/null @@ -1,162 +0,0 @@ -package gofakeit - -import ( - "strings" -) - -// Phrase will return a random phrase -func Phrase() string { return phrase(GlobalFaker) } - -// Phrase will return a random phrase -func (f *Faker) Phrase() string { return phrase(f) } - -func phrase(f *Faker) string { return getRandValue(f, []string{"sentence", "phrase"}) } - -// PhraseNoun will return a random noun phrase -func PhraseNoun() string { return phraseNoun(GlobalFaker) } - -// PhraseNoun will return a random noun phrase -func (f *Faker) PhraseNoun() string { return phraseNoun(f) } - -func phraseNoun(f *Faker) string { - str := "" - - // You may also want to add an adjective to describe the noun - if boolFunc(f) { - str = adjectiveDescriptive(f) + " " + noun(f) - } else { - str = noun(f) - } - - // Add determiner from weighted list - prob, _ := weighted(f, []any{1, 2, 3}, []float32{2, 1.5, 1}) - if prob == 1 { - str = getArticle(str) + " " + str - } else if prob == 2 { - str = "the " + str - } - - return str -} - -// PhraseVerb will return a random preposition phrase -func PhraseVerb() string { return phraseVerb(GlobalFaker) } - -// PhraseVerb will return a random preposition phrase -func (f *Faker) PhraseVerb() string { return phraseVerb(f) } - -func phraseVerb(f *Faker) string { - // Put together a string builder - sb := []string{} - - // You may have an adverb phrase - if boolFunc(f) { - sb = append(sb, phraseAdverb(f)) - } - - // Lets add the primary verb - sb = append(sb, verbAction(f)) - - // You may have a noun phrase - if boolFunc(f) { - sb = append(sb, phraseNoun(f)) - } - - // You may have an adverb phrase - if boolFunc(f) { - sb = append(sb, phraseAdverb(f)) - - // You may also have a preposition phrase - if boolFunc(f) { - sb = append(sb, phrasePreposition(f)) - } - - // You may also hae an adverb phrase - if boolFunc(f) { - sb = append(sb, phraseAdverb(f)) - } - } - - return strings.Join(sb, " ") -} - -// PhraseAdverb will return a random adverb phrase -func PhraseAdverb() string { return phraseAdverb(GlobalFaker) } - -// PhraseAdverb will return a random adverb phrase -func (f *Faker) PhraseAdverb() string { return phraseAdverb(f) } - -func phraseAdverb(f *Faker) string { - if boolFunc(f) { - return adverbDegree(f) + " " + adverbManner(f) - } - - return adverbManner(f) -} - -// PhrasePreposition will return a random preposition phrase -func PhrasePreposition() string { return phrasePreposition(GlobalFaker) } - -// PhrasePreposition will return a random preposition phrase -func (f *Faker) PhrasePreposition() string { return phrasePreposition(f) } - -func phrasePreposition(f *Faker) string { - return prepositionSimple(f) + " " + phraseNoun(f) -} - -func addWordPhraseLookup() { - AddFuncLookup("phrase", Info{ - Display: "Phrase", - Category: "word", - Description: "A small group of words standing together", - Example: "time will tell", - Output: "string", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return phrase(f), nil - }, - }) - - AddFuncLookup("phrasenoun", Info{ - Display: "Noun Phrase", - Category: "word", - Description: "Phrase with a noun as its head, functions within sentence like a noun", - Example: "a tribe", - Output: "string", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return phraseNoun(f), nil - }, - }) - - AddFuncLookup("phraseverb", Info{ - Display: "Verb Phrase", - Category: "word", - Description: "Phrase that Consists of a verb and its modifiers, expressing an action or state", - Example: "a tribe", - Output: "string", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return phraseVerb(f), nil - }, - }) - - AddFuncLookup("phraseadverb", Info{ - Display: "Adverb Phrase", - Category: "word", - Description: "Phrase that modifies a verb, adjective, or another adverb, providing additional information.", - Example: "fully gladly", - Output: "string", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return phraseAdverb(f), nil - }, - }) - - AddFuncLookup("phrasepreposition", Info{ - Display: "Preposition Phrase", - Category: "word", - Description: "Phrase starting with a preposition, showing relation between elements in a sentence.", - Example: "out the black thing", - Output: "string", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return phrasePreposition(f), nil - }, - }) -} diff --git a/vendor/github.com/brianvoe/gofakeit/v7/word_preposition.go b/vendor/github.com/brianvoe/gofakeit/v7/word_preposition.go index 17902dc6..55d0246e 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/word_preposition.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/word_preposition.go @@ -52,6 +52,8 @@ func addWordPrepositionLookup() { Description: "Words used to express the relationship of a noun or pronoun to other words in a sentence", Example: "other than", Output: "string", + Aliases: []string{"relationship connector", "grammar link", "sentence bridge", "word connector"}, + Keywords: []string{"relationship", "noun", "pronoun", "sentence", "grammar", "express", "connector", "link", "relational"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return preposition(f), nil }, @@ -63,6 +65,8 @@ func addWordPrepositionLookup() { Description: "Single-word preposition showing relationships between 2 parts of a sentence", Example: "out", Output: "string", + Aliases: []string{"basic connector", "fundamental link", "single element", "grammar bridge"}, + Keywords: []string{"preposition", "simple", "single-word", "relationships", "parts", "sentence", "grammar", "showing", "basic", "fundamental", "elementary"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return prepositionSimple(f), nil }, @@ -74,6 +78,8 @@ func addWordPrepositionLookup() { Description: "Two-word combination preposition, indicating a complex relation", Example: "before", Output: "string", + Aliases: []string{"two-word connector", "complex relation", "combination element", "grammar bridge"}, + Keywords: []string{"preposition", "double", "two-word", "combination", "complex", "relation", "grammar", "indicating", "compound", "multi-word", "paired"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return prepositionDouble(f), nil }, @@ -85,6 +91,8 @@ func addWordPrepositionLookup() { Description: "Preposition that can be formed by combining two or more prepositions", Example: "according to", Output: "string", + Aliases: []string{"multi-part connector", "complex combination", "formed element", "grammar bridge"}, + Keywords: []string{"preposition", "compound", "combining", "two", "more", "prepositions", "grammar", "formed", "complex", "multi-part", "constructed"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return prepositionCompound(f), nil }, diff --git a/vendor/github.com/brianvoe/gofakeit/v7/word_pronoun.go b/vendor/github.com/brianvoe/gofakeit/v7/word_pronoun.go index f511f7d7..78cbd1e6 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/word_pronoun.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/word_pronoun.go @@ -107,6 +107,8 @@ func addWordPronounLookup() { Description: "Word used in place of a noun to avoid repetition", Example: "me", Output: "string", + Aliases: []string{"noun substitute", "word replacement", "grammar element", "reference word"}, + Keywords: []string{"noun", "replacement", "grammar", "repetition", "substitute", "reference", "avoidance"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return pronoun(f), nil }, @@ -118,6 +120,8 @@ func addWordPronounLookup() { Description: "Pronoun referring to a specific persons or things", Example: "it", Output: "string", + Aliases: []string{"personal reference", "specific entity", "individual pronoun", "grammar element"}, + Keywords: []string{"pronoun", "personal", "specific", "person", "thing", "grammar", "referring", "individual", "entity", "identity"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return pronounPersonal(f), nil }, @@ -129,6 +133,8 @@ func addWordPronounLookup() { Description: "Pronoun used as the object of a verb or preposition", Example: "it", Output: "string", + Aliases: []string{"object reference", "verb object", "preposition object", "grammar function"}, + Keywords: []string{"pronoun", "verb", "preposition", "grammar", "used", "objective", "case", "receiver"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return pronounObject(f), nil }, @@ -140,6 +146,8 @@ func addWordPronounLookup() { Description: "Pronoun indicating ownership or belonging", Example: "mine", Output: "string", + Aliases: []string{"ownership indicator", "belonging reference", "possession word", "grammar element"}, + Keywords: []string{"pronoun", "possessive", "ownership", "belonging", "grammar", "indicating", "possession", "property"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return pronounPossessive(f), nil }, @@ -151,6 +159,8 @@ func addWordPronounLookup() { Description: "Pronoun referring back to the subject of the sentence", Example: "myself", Output: "string", + Aliases: []string{"self reference", "subject reflection", "backward reference", "grammar element"}, + Keywords: []string{"pronoun", "reflective", "subject", "sentence", "grammar", "referring", "reflexive", "self", "mirror"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return pronounReflective(f), nil }, @@ -162,6 +172,8 @@ func addWordPronounLookup() { Description: "Pronoun that does not refer to a specific person or thing", Example: "few", Output: "string", + Aliases: []string{"vague reference", "general pronoun", "unspecific word", "grammar element"}, + Keywords: []string{"pronoun", "indefinite", "specific", "person", "grammar", "refer", "vague", "general", "unspecified"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return pronounIndefinite(f), nil }, @@ -173,6 +185,8 @@ func addWordPronounLookup() { Description: "Pronoun that points out specific people or things", Example: "this", Output: "string", + Aliases: []string{"pointing reference", "specific indicator", "demonstration word", "grammar element"}, + Keywords: []string{"pronoun", "demonstrative", "specific", "people", "grammar", "points", "indicate", "reference", "pointing"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return pronounDemonstrative(f), nil }, @@ -184,6 +198,8 @@ func addWordPronounLookup() { Description: "Pronoun used to ask questions", Example: "what", Output: "string", + Aliases: []string{"question word", "inquiry reference", "interrogation element", "grammar function"}, + Keywords: []string{"pronoun", "interrogative", "question", "ask", "grammar", "used", "inquiry", "wh-word", "questioning"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return pronounInterrogative(f), nil }, @@ -195,6 +211,8 @@ func addWordPronounLookup() { Description: "Pronoun that introduces a clause, referring back to a noun or pronoun", Example: "as", Output: "string", + Aliases: []string{"backward reference", "linking pronoun", "grammar element"}, + Keywords: []string{"pronoun", "relative", "clause", "noun", "grammar", "introduces", "referring", "connector", "link"}, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return pronounRelative(f), nil }, diff --git a/vendor/github.com/brianvoe/gofakeit/v7/word_sentence.go b/vendor/github.com/brianvoe/gofakeit/v7/word_sentence.go deleted file mode 100644 index 1cbc874e..00000000 --- a/vendor/github.com/brianvoe/gofakeit/v7/word_sentence.go +++ /dev/null @@ -1,212 +0,0 @@ -package gofakeit - -import ( - "bytes" - "errors" - "strings" - "unicode" -) - -type paragrapOptions struct { - paragraphCount int - sentenceCount int - wordCount int - separator string -} - -const bytesPerWordEstimation = 6 - -type sentenceGenerator func(f *Faker, wordCount int) string -type wordGenerator func(f *Faker) string - -// Sentence will generate a random sentence -func Sentence(wordCount int) string { return sentence(GlobalFaker, wordCount) } - -// Sentence will generate a random sentence -func (f *Faker) Sentence(wordCount int) string { return sentence(f, wordCount) } - -func sentence(f *Faker, wordCount int) string { - return sentenceGen(f, wordCount, word) -} - -// Paragraph will generate a random paragraphGenerator -func Paragraph(paragraphCount int, sentenceCount int, wordCount int, separator string) string { - return paragraph(GlobalFaker, paragraphCount, sentenceCount, wordCount, separator) -} - -// Paragraph will generate a random paragraphGenerator -func (f *Faker) Paragraph(paragraphCount int, sentenceCount int, wordCount int, separator string) string { - return paragraph(f, paragraphCount, sentenceCount, wordCount, separator) -} - -func paragraph(f *Faker, paragraphCount int, sentenceCount int, wordCount int, separator string) string { - return paragraphGen(f, paragrapOptions{paragraphCount, sentenceCount, wordCount, separator}, sentence) -} - -func sentenceGen(f *Faker, wordCount int, word wordGenerator) string { - if wordCount <= 0 { - return "" - } - - wordSeparator := ' ' - sentence := bytes.Buffer{} - sentence.Grow(wordCount * bytesPerWordEstimation) - - for i := 0; i < wordCount; i++ { - word := word(f) - if i == 0 { - runes := []rune(word) - runes[0] = unicode.ToTitle(runes[0]) - word = string(runes) - } - sentence.WriteString(word) - if i < wordCount-1 { - sentence.WriteRune(wordSeparator) - } - } - sentence.WriteRune('.') - return sentence.String() -} - -func paragraphGen(f *Faker, opts paragrapOptions, sentecer sentenceGenerator) string { - if opts.paragraphCount <= 0 || opts.sentenceCount <= 0 || opts.wordCount <= 0 { - return "" - } - - //to avoid making Go 1.10 dependency, we cannot use strings.Builder - paragraphs := bytes.Buffer{} - //we presume the length - paragraphs.Grow(opts.paragraphCount * opts.sentenceCount * opts.wordCount * bytesPerWordEstimation) - wordSeparator := ' ' - - for i := 0; i < opts.paragraphCount; i++ { - for e := 0; e < opts.sentenceCount; e++ { - paragraphs.WriteString(sentecer(f, opts.wordCount)) - if e < opts.sentenceCount-1 { - paragraphs.WriteRune(wordSeparator) - } - } - - if i < opts.paragraphCount-1 { - paragraphs.WriteString(opts.separator) - } - } - - return paragraphs.String() -} - -// Question will return a random question -func Question() string { - return question(GlobalFaker) -} - -// Question will return a random question -func (f *Faker) Question() string { - return question(f) -} - -func question(f *Faker) string { - return strings.Replace(hipsterSentence(f, number(f, 3, 10)), ".", "?", 1) -} - -// Quote will return a random quote from a random person -func Quote() string { return quote(GlobalFaker) } - -// Quote will return a random quote from a random person -func (f *Faker) Quote() string { return quote(f) } - -func quote(f *Faker) string { - return `"` + hipsterSentence(f, number(f, 3, 10)) + `" - ` + firstName(f) + " " + lastName(f) -} - -func addWordSentenceLookup() { - AddFuncLookup("sentence", Info{ - Display: "Sentence", - Category: "word", - Description: "Set of words expressing a statement, question, exclamation, or command", - Example: "Interpret context record river mind.", - Output: "string", - Params: []Param{ - {Field: "wordcount", Display: "Word Count", Type: "int", Default: "5", Description: "Number of words in a sentence"}, - }, - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - wordCount, err := info.GetInt(m, "wordcount") - if err != nil { - return nil, err - } - if wordCount <= 0 || wordCount > 50 { - return nil, errors.New("invalid word count, must be greater than 0, less than 50") - } - - return sentence(f, wordCount), nil - }, - }) - - AddFuncLookup("paragraph", Info{ - Display: "Paragraph", - Category: "word", - Description: "Distinct section of writing covering a single theme, composed of multiple sentences", - Example: "Interpret context record river mind press self should compare property outcome divide. Combine approach sustain consult discover explanation direct address church husband seek army. Begin own act welfare replace press suspect stay link place manchester specialist. Arrive price satisfy sign force application hair train provide basis right pay. Close mark teacher strengthen information attempt head touch aim iron tv take.", - Output: "string", - Params: []Param{ - {Field: "paragraphcount", Display: "Paragraph Count", Type: "int", Default: "2", Description: "Number of paragraphs"}, - {Field: "sentencecount", Display: "Sentence Count", Type: "int", Default: "2", Description: "Number of sentences in a paragraph"}, - {Field: "wordcount", Display: "Word Count", Type: "int", Default: "5", Description: "Number of words in a sentence"}, - {Field: "paragraphseparator", Display: "Paragraph Separator", Type: "string", Default: "
", Description: "String value to add between paragraphs"}, - }, - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - paragraphCount, err := info.GetInt(m, "paragraphcount") - if err != nil { - return nil, err - } - if paragraphCount <= 0 || paragraphCount > 20 { - return nil, errors.New("invalid paragraph count, must be greater than 0, less than 20") - } - - sentenceCount, err := info.GetInt(m, "sentencecount") - if err != nil { - return nil, err - } - if sentenceCount <= 0 || sentenceCount > 20 { - return nil, errors.New("invalid sentence count, must be greater than 0, less than 20") - } - - wordCount, err := info.GetInt(m, "wordcount") - if err != nil { - return nil, err - } - if wordCount <= 0 || wordCount > 50 { - return nil, errors.New("invalid word count, must be greater than 0, less than 50") - } - - paragraphSeparator, err := info.GetString(m, "paragraphseparator") - if err != nil { - return nil, err - } - - return paragraph(f, paragraphCount, sentenceCount, wordCount, paragraphSeparator), nil - }, - }) - - AddFuncLookup("question", Info{ - Display: "Question", - Category: "word", - Description: "Statement formulated to inquire or seek clarification", - Example: "Roof chia echo?", - Output: "string", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return question(f), nil - }, - }) - - AddFuncLookup("quote", Info{ - Display: "Quote", - Category: "word", - Description: "Direct repetition of someone else's words", - Example: `"Roof chia echo." - Lura Lockman`, - Output: "string", - Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { - return quote(f), nil - }, - }) -} diff --git a/vendor/github.com/brianvoe/gofakeit/v7/word_verb.go b/vendor/github.com/brianvoe/gofakeit/v7/word_verb.go index d268784b..6d24efa1 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/word_verb.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/word_verb.go @@ -64,6 +64,12 @@ func addWordVerbLookup() { Description: "Word expressing an action, event or state", Example: "release", Output: "string", + Aliases: []string{ + "action word", "doing word", "predicate word", "verb form", "process word", + }, + Keywords: []string{ + "movement", "change", "existence", "process", "condition", "happening", "expression", "statement", "activity", "function", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return verb(f), nil }, @@ -72,9 +78,15 @@ func addWordVerbLookup() { AddFuncLookup("verbaction", Info{ Display: "Action Verb", Category: "word", - Description: "Verb Indicating a physical or mental action", + Description: "Verb indicating a physical or mental action", Example: "close", Output: "string", + Aliases: []string{ + "movement word", "doing action", "behavior word", "mental action", "physical action", + }, + Keywords: []string{ + "activity", "task", "operation", "motion", "effort", "performance", "gesture", "response", "execution", "behavior", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return verbAction(f), nil }, @@ -86,6 +98,12 @@ func addWordVerbLookup() { Description: "Verb that requires a direct object to complete its meaning", Example: "follow", Output: "string", + Aliases: []string{ + "object verb", "requires object", "dependent verb", "object-linked", "receiver word", + }, + Keywords: []string{ + "direct", "receiver", "transfer", "target", "completion", "relation", "dependent", "object-based", "action-transfer", "link", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return verbTransitive(f), nil }, @@ -97,6 +115,12 @@ func addWordVerbLookup() { Description: "Verb that does not require a direct object to complete its meaning", Example: "laugh", Output: "string", + Aliases: []string{ + "standalone verb", "independent word", "no object verb", "complete action", "self-contained verb", + }, + Keywords: []string{ + "autonomous", "independent", "non-transfer", "self-complete", "expression", "state", "behavior", "occur", "perform", "action-only", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return verbIntransitive(f), nil }, @@ -105,9 +129,15 @@ func addWordVerbLookup() { AddFuncLookup("verblinking", Info{ Display: "Linking Verb", Category: "word", - Description: "Verb that Connects the subject of a sentence to a subject complement", + Description: "Verb that connects the subject of a sentence to a subject complement", Example: "was", Output: "string", + Aliases: []string{ + "connecting verb", "copular verb", "bridge word", "link word", "equating verb", + }, + Keywords: []string{ + "relation", "connection", "equivalence", "identification", "state", "being", "subject-link", "copula", "connector", "description", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return verbLinking(f), nil }, @@ -119,8 +149,16 @@ func addWordVerbLookup() { Description: "Auxiliary verb that helps the main verb complete the sentence", Example: "be", Output: "string", + Aliases: []string{ + "auxiliary verb", "supporting verb", "assisting word", "helper verb", "modal verb", + }, + Keywords: []string{ + "tense", "mood", "voice", "aspect", "support", "structure", + "compound", "formation", "assistance", + }, Generate: func(f *Faker, m *MapParams, info *Info) (any, error) { return verbHelping(f), nil }, }) + } diff --git a/vendor/github.com/brianvoe/gofakeit/v7/xml.go b/vendor/github.com/brianvoe/gofakeit/v7/xml.go index da467037..ad36a8c9 100644 --- a/vendor/github.com/brianvoe/gofakeit/v7/xml.go +++ b/vendor/github.com/brianvoe/gofakeit/v7/xml.go @@ -149,7 +149,7 @@ func xmlFunc(f *Faker, xo *XMLOptions) ([]byte, error) { } // Check fields length - if xo.Fields == nil || len(xo.Fields) <= 0 { + if len(xo.Fields) <= 0 { return nil, errors.New("must pass fields in order to build json object(s)") } @@ -284,6 +284,17 @@ func addFileXMLLookup() { `, Output: "[]byte", ContentType: "application/xml", + Aliases: []string{ + "xml document", + "extensible markup", + "tagged data", + "hierarchical structure", + "serialized tree", + }, + Keywords: []string{ + "extensible", "markup", "language", "elements", "format", + "structured", "generates", "tags", "attributes", "nested", + }, Params: []Param{ {Field: "type", Display: "Type", Type: "string", Default: "single", Options: []string{"single", "array"}, Description: "Type of XML, single or array"}, {Field: "rootelement", Display: "Root Element", Type: "string", Default: "xml", Description: "Root element wrapper name"}, diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md index 8bf0e5b7..33c88305 100644 --- a/vendor/github.com/cespare/xxhash/v2/README.md +++ b/vendor/github.com/cespare/xxhash/v2/README.md @@ -70,3 +70,5 @@ benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') - [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) - [FreeCache](https://github.com/coocood/freecache) - [FastCache](https://github.com/VictoriaMetrics/fastcache) +- [Ristretto](https://github.com/dgraph-io/ristretto) +- [Badger](https://github.com/dgraph-io/badger) diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go index a9e0d45c..78bddf1c 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go @@ -19,10 +19,13 @@ const ( // Store the primes in an array as well. // // The consts are used when possible in Go code to avoid MOVs but we need a -// contiguous array of the assembly code. +// contiguous array for the assembly code. var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} // Digest implements hash.Hash64. +// +// Note that a zero-valued Digest is not ready to receive writes. +// Call Reset or create a Digest using New before calling other methods. type Digest struct { v1 uint64 v2 uint64 @@ -33,19 +36,31 @@ type Digest struct { n int // how much of mem is used } -// New creates a new Digest that computes the 64-bit xxHash algorithm. +// New creates a new Digest with a zero seed. func New() *Digest { + return NewWithSeed(0) +} + +// NewWithSeed creates a new Digest with the given seed. +func NewWithSeed(seed uint64) *Digest { var d Digest - d.Reset() + d.ResetWithSeed(seed) return &d } // Reset clears the Digest's state so that it can be reused. +// It uses a seed value of zero. func (d *Digest) Reset() { - d.v1 = primes[0] + prime2 - d.v2 = prime2 - d.v3 = 0 - d.v4 = -primes[0] + d.ResetWithSeed(0) +} + +// ResetWithSeed clears the Digest's state so that it can be reused. +// It uses the given seed to initialize the state. +func (d *Digest) ResetWithSeed(seed uint64) { + d.v1 = seed + prime1 + prime2 + d.v2 = seed + prime2 + d.v3 = seed + d.v4 = seed - prime1 d.total = 0 d.n = 0 } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go index 9216e0a4..78f95f25 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go @@ -6,7 +6,7 @@ package xxhash -// Sum64 computes the 64-bit xxHash digest of b. +// Sum64 computes the 64-bit xxHash digest of b with a zero seed. // //go:noescape func Sum64(b []byte) uint64 diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go index 26df13bb..118e49e8 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go @@ -3,7 +3,7 @@ package xxhash -// Sum64 computes the 64-bit xxHash digest of b. +// Sum64 computes the 64-bit xxHash digest of b with a zero seed. func Sum64(b []byte) uint64 { // A simpler version would be // d := New() diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go index e86f1b5f..05f5e7df 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go @@ -5,7 +5,7 @@ package xxhash -// Sum64String computes the 64-bit xxHash digest of s. +// Sum64String computes the 64-bit xxHash digest of s with a zero seed. func Sum64String(s string) uint64 { return Sum64([]byte(s)) } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go index 1c1638fd..cf9d42ae 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go @@ -33,7 +33,7 @@ import ( // // See https://github.com/golang/go/issues/42739 for discussion. -// Sum64String computes the 64-bit xxHash digest of s. +// Sum64String computes the 64-bit xxHash digest of s with a zero seed. // It may be faster than Sum64([]byte(s)) by avoiding a copy. func Sum64String(s string) uint64 { b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})) diff --git a/vendor/github.com/docker/cli/AUTHORS b/vendor/github.com/docker/cli/AUTHORS index d6d23b3d..ad1abd49 100644 --- a/vendor/github.com/docker/cli/AUTHORS +++ b/vendor/github.com/docker/cli/AUTHORS @@ -26,6 +26,7 @@ Akhil Mohan Akihiro Suda Akim Demaille Alan Thompson +Alano Terblanche Albert Callarisa Alberto Roura Albin Kerouanton @@ -65,6 +66,7 @@ Andrew Hsu Andrew Macpherson Andrew McDonnell Andrew Po +Andrew-Zipperer Andrey Petrov Andrii Berehuliak André Martins @@ -124,11 +126,13 @@ Bryan Bess Bryan Boreham Bryan Murphy bryfry +Calvin Liu Cameron Spear Cao Weiwei Carlo Mion Carlos Alexandro Becker Carlos de Paula +Casey Korver Ce Gao Cedric Davies Cezar Sa Espinola @@ -160,6 +164,8 @@ Christophe Vidal Christopher Biscardi Christopher Crone Christopher Jones +Christopher Petito <47751006+krissetto@users.noreply.github.com> +Christopher Petito Christopher Svensson Christy Norman Chun Chen @@ -212,6 +218,7 @@ David Cramer David Dooling David Gageot David Karlsson +David le Blanc David Lechner David Scott David Sheets @@ -298,6 +305,7 @@ Gang Qiao Gary Schaetz Genki Takiuchi George MacRorie +George Margaritis George Xie Gianluca Borello Gildas Cuisinier @@ -306,6 +314,7 @@ Gleb Stsenov Goksu Toprak Gou Rao Govind Rai +Grace Choi Graeme Wiebe Grant Reaber Greg Pflaum @@ -386,6 +395,7 @@ Jezeniel Zapanta Jian Zhang Jie Luo Jilles Oldenbeuving +Jim Chen Jim Galasyn Jim Lin Jimmy Leger @@ -416,6 +426,7 @@ John Willis Jon Johnson Jon Zeolla Jonatas Baldin +Jonathan A. Sternberg Jonathan Boulle Jonathan Lee Jonathan Lomas @@ -470,6 +481,7 @@ Kevin Woblick khaled souf Kim Eik Kir Kolyshkin +Kirill A. Korinsky Kotaro Yoshimatsu Krasi Georgiev Kris-Mikael Krister @@ -530,6 +542,7 @@ Marco Vedovati Marcus Martins Marianna Tessel Marius Ileana +Marius Meschter Marius Sturm Mark Oates Marsh Macy @@ -538,6 +551,7 @@ Mary Anthony Mason Fish Mason Malone Mateusz Major +Mathias Duedahl <64321057+Lussebullen@users.noreply.github.com> Mathieu Champlon Mathieu Rollet Matt Gucci @@ -547,6 +561,7 @@ Matthew Heon Matthieu Hauglustaine Mauro Porras P Max Shytikov +Max-Julian Pogner Maxime Petazzoni Maximillian Fan Xavier Mei ChunTao @@ -610,6 +625,7 @@ Nathan McCauley Neil Peterson Nick Adcock Nick Santos +Nick Sieger Nico Stapelbroek Nicola Kabar Nicolas Borboën @@ -704,6 +720,7 @@ Rory Hunter Ross Boucher Rubens Figueiredo Rui Cao +Rui JingAn Ryan Belgrave Ryan Detzel Ryan Stelly @@ -797,6 +814,7 @@ Tim Hockin Tim Sampson Tim Smith Tim Waugh +Tim Welsh Tim Wraight timfeirg Timothy Hobbs @@ -880,9 +898,11 @@ Zhang Wei Zhang Wentao ZhangHang zhenghenghuo +Zhiwei Liang Zhou Hao Zhoulin Xie Zhu Guihua +Zhuo Zhi Álex González Álvaro Lázaro Átila Camurça Alves diff --git a/vendor/github.com/docker/cli/NOTICE b/vendor/github.com/docker/cli/NOTICE index 58b19b6d..1c40faae 100644 --- a/vendor/github.com/docker/cli/NOTICE +++ b/vendor/github.com/docker/cli/NOTICE @@ -14,6 +14,6 @@ United States and other governments. It is your responsibility to ensure that your use and/or transfer does not violate applicable laws. -For more information, please see https://www.bis.doc.gov +For more information, see https://www.bis.doc.gov See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/docker/cli/cli/compose/interpolation/interpolation.go b/vendor/github.com/docker/cli/cli/compose/interpolation/interpolation.go index 584ade8a..ee11656f 100644 --- a/vendor/github.com/docker/cli/cli/compose/interpolation/interpolation.go +++ b/vendor/github.com/docker/cli/cli/compose/interpolation/interpolation.go @@ -1,5 +1,5 @@ // FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: -//go:build go1.19 +//go:build go1.22 package interpolation diff --git a/vendor/github.com/docker/cli/cli/compose/loader/full-example.yml b/vendor/github.com/docker/cli/cli/compose/loader/full-example.yml index 76447d20..36ebf833 100644 --- a/vendor/github.com/docker/cli/cli/compose/loader/full-example.yml +++ b/vendor/github.com/docker/cli/cli/compose/loader/full-example.yml @@ -1,4 +1,4 @@ -version: "3.12" +version: "3.13" services: foo: @@ -207,6 +207,9 @@ services: aliases: - alias1 - alias3 + driver_opts: + "driveropt1": "optval1" + "driveropt2": "optval2" other-network: ipv4_address: 172.16.238.10 ipv6_address: 2001:3984:3989::10 diff --git a/vendor/github.com/docker/cli/cli/compose/loader/interpolate.go b/vendor/github.com/docker/cli/cli/compose/loader/interpolate.go index 24682614..82c36d7d 100644 --- a/vendor/github.com/docker/cli/cli/compose/loader/interpolate.go +++ b/vendor/github.com/docker/cli/cli/compose/loader/interpolate.go @@ -1,5 +1,5 @@ // FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: -//go:build go1.19 +//go:build go1.22 package loader @@ -29,6 +29,7 @@ var interpolateTypeCastMapping = map[interp.Path]interp.Cast{ servicePath("ulimits", interp.PathMatchAll, "hard"): toInt, servicePath("ulimits", interp.PathMatchAll, "soft"): toInt, servicePath("privileged"): toBoolean, + servicePath("oom_score_adj"): toInt, servicePath("read_only"): toBoolean, servicePath("stdin_open"): toBoolean, servicePath("tty"): toBoolean, diff --git a/vendor/github.com/docker/cli/cli/compose/loader/loader.go b/vendor/github.com/docker/cli/cli/compose/loader/loader.go index 84090075..7bc420b2 100644 --- a/vendor/github.com/docker/cli/cli/compose/loader/loader.go +++ b/vendor/github.com/docker/cli/cli/compose/loader/loader.go @@ -1,5 +1,5 @@ // FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: -//go:build go1.19 +//go:build go1.22 package loader @@ -21,8 +21,8 @@ import ( "github.com/docker/docker/api/types/versions" "github.com/docker/go-connections/nat" units "github.com/docker/go-units" + "github.com/go-viper/mapstructure/v2" "github.com/google/shlex" - "github.com/mitchellh/mapstructure" "github.com/pkg/errors" "github.com/sirupsen/logrus" yaml "gopkg.in/yaml.v2" diff --git a/vendor/github.com/docker/cli/cli/compose/loader/merge.go b/vendor/github.com/docker/cli/cli/compose/loader/merge.go index ab33ee61..34455d59 100644 --- a/vendor/github.com/docker/cli/cli/compose/loader/merge.go +++ b/vendor/github.com/docker/cli/cli/compose/loader/merge.go @@ -1,5 +1,5 @@ // FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: -//go:build go1.19 +//go:build go1.22 package loader diff --git a/vendor/github.com/docker/cli/cli/compose/schema/data/config_schema_v3.13.json b/vendor/github.com/docker/cli/cli/compose/schema/data/config_schema_v3.13.json new file mode 100644 index 00000000..8daa8892 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/compose/schema/data/config_schema_v3.13.json @@ -0,0 +1,680 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "id": "config_schema_v3.13.json", + "type": "object", + + "properties": { + "version": { + "type": "string", + "default": "3.13" + }, + + "services": { + "id": "#/properties/services", + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/service" + } + }, + "additionalProperties": false + }, + + "networks": { + "id": "#/properties/networks", + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/network" + } + } + }, + + "volumes": { + "id": "#/properties/volumes", + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/volume" + } + }, + "additionalProperties": false + }, + + "secrets": { + "id": "#/properties/secrets", + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/secret" + } + }, + "additionalProperties": false + }, + + "configs": { + "id": "#/properties/configs", + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/config" + } + }, + "additionalProperties": false + } + }, + + "patternProperties": {"^x-": {}}, + "additionalProperties": false, + + "definitions": { + + "service": { + "id": "#/definitions/service", + "type": "object", + + "properties": { + "deploy": {"$ref": "#/definitions/deployment"}, + "build": { + "oneOf": [ + {"type": "string"}, + { + "type": "object", + "properties": { + "context": {"type": "string"}, + "dockerfile": {"type": "string"}, + "args": {"$ref": "#/definitions/list_or_dict"}, + "labels": {"$ref": "#/definitions/list_or_dict"}, + "cache_from": {"$ref": "#/definitions/list_of_strings"}, + "network": {"type": "string"}, + "target": {"type": "string"}, + "shm_size": {"type": ["integer", "string"]}, + "extra_hosts": {"$ref": "#/definitions/list_or_dict"} + }, + "additionalProperties": true + } + ] + }, + "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "cgroupns_mode": {"type": "string"}, + "cgroup_parent": {"type": "string"}, + "command": { + "oneOf": [ + {"type": "string"}, + {"type": "array", "items": {"type": "string"}} + ] + }, + "configs": { + "type": "array", + "items": { + "oneOf": [ + {"type": "string"}, + { + "type": "object", + "properties": { + "source": {"type": "string"}, + "target": {"type": "string"}, + "uid": {"type": "string"}, + "gid": {"type": "string"}, + "mode": {"type": "number"} + } + } + ] + } + }, + "container_name": {"type": "string"}, + "credential_spec": { + "type": "object", + "properties": { + "config": {"type": "string"}, + "file": {"type": "string"}, + "registry": {"type": "string"} + }, + "additionalProperties": false + }, + "depends_on": {"$ref": "#/definitions/list_of_strings"}, + "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "dns": {"$ref": "#/definitions/string_or_list"}, + "dns_search": {"$ref": "#/definitions/string_or_list"}, + "domainname": {"type": "string"}, + "entrypoint": { + "oneOf": [ + {"type": "string"}, + {"type": "array", "items": {"type": "string"}} + ] + }, + "env_file": {"$ref": "#/definitions/string_or_list"}, + "environment": {"$ref": "#/definitions/list_or_dict"}, + + "expose": { + "type": "array", + "items": { + "type": ["string", "number"], + "format": "expose" + }, + "uniqueItems": true + }, + + "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "extra_hosts": {"$ref": "#/definitions/list_or_dict"}, + "healthcheck": {"$ref": "#/definitions/healthcheck"}, + "hostname": {"type": "string"}, + "image": {"type": "string"}, + "init": {"type": "boolean"}, + "ipc": {"type": "string"}, + "isolation": {"type": "string"}, + "labels": {"$ref": "#/definitions/list_or_dict"}, + "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + + "logging": { + "type": "object", + + "properties": { + "driver": {"type": "string"}, + "options": { + "type": "object", + "patternProperties": { + "^.+$": {"type": ["string", "number", "null"]} + } + } + }, + "additionalProperties": false + }, + + "mac_address": {"type": "string"}, + "network_mode": {"type": "string"}, + + "networks": { + "oneOf": [ + {"$ref": "#/definitions/list_of_strings"}, + { + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "oneOf": [ + { + "type": "object", + "properties": { + "aliases": {"$ref": "#/definitions/list_of_strings"}, + "driver_opts": { + "type": "object", + "patternProperties": { + "^.+$": { "type": ["string", "number"] } + } + }, + "ipv4_address": {"type": "string"}, + "ipv6_address": {"type": "string"} + }, + "additionalProperties": false + }, + {"type": "null"} + ] + } + }, + "additionalProperties": false + } + ] + }, + "pid": {"type": ["string", "null"]}, + + "ports": { + "type": "array", + "items": { + "oneOf": [ + {"type": "number", "format": "ports"}, + {"type": "string", "format": "ports"}, + { + "type": "object", + "properties": { + "mode": {"type": "string"}, + "target": {"type": "integer"}, + "published": {"type": "integer"}, + "protocol": {"type": "string"} + }, + "additionalProperties": false + } + ] + }, + "uniqueItems": true + }, + + "privileged": {"type": "boolean"}, + "read_only": {"type": "boolean"}, + "restart": {"type": "string"}, + "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "shm_size": {"type": ["number", "string"]}, + "secrets": { + "type": "array", + "items": { + "oneOf": [ + {"type": "string"}, + { + "type": "object", + "properties": { + "source": {"type": "string"}, + "target": {"type": "string"}, + "uid": {"type": "string"}, + "gid": {"type": "string"}, + "mode": {"type": "number"} + } + } + ] + } + }, + "sysctls": {"$ref": "#/definitions/list_or_dict"}, + "stdin_open": {"type": "boolean"}, + "stop_grace_period": {"type": "string", "format": "duration"}, + "stop_signal": {"type": "string"}, + "tmpfs": {"$ref": "#/definitions/string_or_list"}, + "tty": {"type": "boolean"}, + "ulimits": { + "type": "object", + "patternProperties": { + "^[a-z]+$": { + "oneOf": [ + {"type": "integer"}, + { + "type":"object", + "properties": { + "hard": {"type": "integer"}, + "soft": {"type": "integer"} + }, + "required": ["soft", "hard"], + "additionalProperties": false + } + ] + } + } + }, + "oom_score_adj": {"type": "integer"}, + "user": {"type": "string"}, + "userns_mode": {"type": "string"}, + "volumes": { + "type": "array", + "items": { + "oneOf": [ + {"type": "string"}, + { + "type": "object", + "required": ["type"], + "properties": { + "type": {"type": "string"}, + "source": {"type": "string"}, + "target": {"type": "string"}, + "read_only": {"type": "boolean"}, + "consistency": {"type": "string"}, + "bind": { + "type": "object", + "properties": { + "propagation": {"type": "string"} + } + }, + "volume": { + "type": "object", + "properties": { + "nocopy": {"type": "boolean"} + } + }, + "tmpfs": { + "type": "object", + "properties": { + "size": { + "type": "integer", + "minimum": 0 + } + } + } + }, + "additionalProperties": false + } + ], + "uniqueItems": true + } + }, + "working_dir": {"type": "string"} + }, + "patternProperties": {"^x-": {}}, + "additionalProperties": false + }, + + "healthcheck": { + "id": "#/definitions/healthcheck", + "type": "object", + "additionalProperties": false, + "properties": { + "disable": {"type": "boolean"}, + "interval": {"type": "string", "format": "duration"}, + "retries": {"type": "number"}, + "test": { + "oneOf": [ + {"type": "string"}, + {"type": "array", "items": {"type": "string"}} + ] + }, + "timeout": {"type": "string", "format": "duration"}, + "start_period": {"type": "string", "format": "duration"}, + "start_interval": {"type": "string", "format": "duration"} + } + }, + "deployment": { + "id": "#/definitions/deployment", + "type": ["object", "null"], + "properties": { + "mode": {"type": "string"}, + "endpoint_mode": {"type": "string"}, + "replicas": {"type": "integer"}, + "labels": {"$ref": "#/definitions/list_or_dict"}, + "rollback_config": { + "type": "object", + "properties": { + "parallelism": {"type": "integer"}, + "delay": {"type": "string", "format": "duration"}, + "failure_action": {"type": "string"}, + "monitor": {"type": "string", "format": "duration"}, + "max_failure_ratio": {"type": "number"}, + "order": {"type": "string", "enum": [ + "start-first", "stop-first" + ]} + }, + "additionalProperties": false + }, + "update_config": { + "type": "object", + "properties": { + "parallelism": {"type": "integer"}, + "delay": {"type": "string", "format": "duration"}, + "failure_action": {"type": "string"}, + "monitor": {"type": "string", "format": "duration"}, + "max_failure_ratio": {"type": "number"}, + "order": {"type": "string", "enum": [ + "start-first", "stop-first" + ]} + }, + "additionalProperties": false + }, + "resources": { + "type": "object", + "properties": { + "limits": { + "type": "object", + "properties": { + "cpus": {"type": "string"}, + "memory": {"type": "string"}, + "pids": {"type": "integer"} + }, + "additionalProperties": false + }, + "reservations": { + "type": "object", + "properties": { + "cpus": {"type": "string"}, + "memory": {"type": "string"}, + "generic_resources": {"$ref": "#/definitions/generic_resources"} + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + "restart_policy": { + "type": "object", + "properties": { + "condition": {"type": "string"}, + "delay": {"type": "string", "format": "duration"}, + "max_attempts": {"type": "integer"}, + "window": {"type": "string", "format": "duration"} + }, + "additionalProperties": false + }, + "placement": { + "type": "object", + "properties": { + "constraints": {"type": "array", "items": {"type": "string"}}, + "preferences": { + "type": "array", + "items": { + "type": "object", + "properties": { + "spread": {"type": "string"} + }, + "additionalProperties": false + } + }, + "max_replicas_per_node": {"type": "integer"} + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + + "generic_resources": { + "id": "#/definitions/generic_resources", + "type": "array", + "items": { + "type": "object", + "properties": { + "discrete_resource_spec": { + "type": "object", + "properties": { + "kind": {"type": "string"}, + "value": {"type": "number"} + }, + "additionalProperties": false + } + }, + "additionalProperties": false + } + }, + + "network": { + "id": "#/definitions/network", + "type": ["object", "null"], + "properties": { + "name": {"type": "string"}, + "driver": {"type": "string"}, + "driver_opts": { + "type": "object", + "patternProperties": { + "^.+$": {"type": ["string", "number"]} + } + }, + "ipam": { + "type": "object", + "properties": { + "driver": {"type": "string"}, + "config": { + "type": "array", + "items": { + "type": "object", + "properties": { + "subnet": {"type": "string"} + }, + "additionalProperties": false + } + } + }, + "additionalProperties": false + }, + "external": { + "type": ["boolean", "object"], + "properties": { + "name": {"type": "string"} + }, + "additionalProperties": false + }, + "internal": {"type": "boolean"}, + "attachable": {"type": "boolean"}, + "labels": {"$ref": "#/definitions/list_or_dict"} + }, + "patternProperties": {"^x-": {}}, + "additionalProperties": false + }, + + "volume": { + "id": "#/definitions/volume", + "type": ["object", "null"], + "properties": { + "name": {"type": "string"}, + "driver": {"type": "string"}, + "driver_opts": { + "type": "object", + "patternProperties": { + "^.+$": {"type": ["string", "number"]} + } + }, + "external": { + "type": ["boolean", "object"], + "properties": { + "name": {"type": "string"} + }, + "additionalProperties": false + }, + "labels": {"$ref": "#/definitions/list_or_dict"}, + "x-cluster-spec": { + "type": "object", + "properties": { + "group": {"type": "string"}, + "access_mode": { + "type": "object", + "properties": { + "scope": {"type": "string"}, + "sharing": {"type": "string"}, + "block_volume": {"type": "object"}, + "mount_volume": { + "type": "object", + "properties": { + "fs_type": {"type": "string"}, + "mount_flags": {"type": "array", "items": {"type": "string"}} + } + } + } + }, + "accessibility_requirements": { + "type": "object", + "properties": { + "requisite": { + "type": "array", + "items": { + "type": "object", + "properties": { + "segments": {"$ref": "#/definitions/list_or_dict"} + } + } + }, + "preferred": { + "type": "array", + "items": { + "type": "object", + "properties": { + "segments": {"$ref": "#/definitions/list_or_dict"} + } + } + } + } + }, + "capacity_range": { + "type": "object", + "properties": { + "required_bytes": {"type": "string"}, + "limit_bytes": {"type": "string"} + } + }, + "availability": {"type": "string"} + } + } + }, + "patternProperties": {"^x-": {}}, + "additionalProperties": false + }, + + "secret": { + "id": "#/definitions/secret", + "type": "object", + "properties": { + "name": {"type": "string"}, + "file": {"type": "string"}, + "external": { + "type": ["boolean", "object"], + "properties": { + "name": {"type": "string"} + } + }, + "labels": {"$ref": "#/definitions/list_or_dict"}, + "driver": {"type": "string"}, + "driver_opts": { + "type": "object", + "patternProperties": { + "^.+$": {"type": ["string", "number"]} + } + }, + "template_driver": {"type": "string"} + }, + "patternProperties": {"^x-": {}}, + "additionalProperties": false + }, + + "config": { + "id": "#/definitions/config", + "type": "object", + "properties": { + "name": {"type": "string"}, + "file": {"type": "string"}, + "external": { + "type": ["boolean", "object"], + "properties": { + "name": {"type": "string"} + } + }, + "labels": {"$ref": "#/definitions/list_or_dict"}, + "template_driver": {"type": "string"} + }, + "patternProperties": {"^x-": {}}, + "additionalProperties": false + }, + + "string_or_list": { + "oneOf": [ + {"type": "string"}, + {"$ref": "#/definitions/list_of_strings"} + ] + }, + + "list_of_strings": { + "type": "array", + "items": {"type": "string"}, + "uniqueItems": true + }, + + "list_or_dict": { + "oneOf": [ + { + "type": "object", + "patternProperties": { + ".+": { + "type": ["string", "number", "null"] + } + }, + "additionalProperties": false + }, + {"type": "array", "items": {"type": "string"}, "uniqueItems": true} + ] + }, + + "constraints": { + "service": { + "id": "#/definitions/constraints/service", + "anyOf": [ + {"required": ["build"]}, + {"required": ["image"]} + ], + "properties": { + "build": { + "required": ["context"] + } + } + } + } + } +} diff --git a/vendor/github.com/docker/cli/cli/compose/schema/schema.go b/vendor/github.com/docker/cli/cli/compose/schema/schema.go index 3ef704af..2ef1245b 100644 --- a/vendor/github.com/docker/cli/cli/compose/schema/schema.go +++ b/vendor/github.com/docker/cli/cli/compose/schema/schema.go @@ -1,5 +1,5 @@ // FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: -//go:build go1.19 +//go:build go1.22 package schema @@ -14,7 +14,7 @@ import ( ) const ( - defaultVersion = "3.12" + defaultVersion = "3.13" versionField = "version" ) @@ -43,7 +43,7 @@ func init() { } // Version returns the version of the config, defaulting to the latest "3.x" -// version (3.12). If only the major version "3" is specified, it is used as +// version (3.13). If only the major version "3" is specified, it is used as // version "3.x" and returns the default version (latest 3.x). func Version(config map[string]any) string { version, ok := config[versionField] @@ -102,7 +102,7 @@ func getDescription(err validationError) string { switch err.parent.Type() { case "invalid_type": if expectedType, ok := err.parent.Details()["expected"].(string); ok { - return fmt.Sprintf("must be a %s", humanReadableType(expectedType)) + return "must be a " + humanReadableType(expectedType) } case jsonschemaOneOf, jsonschemaAnyOf: if err.child == nil { diff --git a/vendor/github.com/docker/cli/cli/compose/template/template.go b/vendor/github.com/docker/cli/cli/compose/template/template.go index dd3acaf2..1507c0ee 100644 --- a/vendor/github.com/docker/cli/cli/compose/template/template.go +++ b/vendor/github.com/docker/cli/cli/compose/template/template.go @@ -1,5 +1,5 @@ // FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: -//go:build go1.19 +//go:build go1.22 package template @@ -48,7 +48,7 @@ type Mapping func(string) (string, bool) // the substitution and an error. type SubstituteFunc func(string, Mapping) (string, bool, error) -// SubstituteWith subsitute variables in the string with their values. +// SubstituteWith substitutes variables in the string with their values. // It accepts additional substitute function. func SubstituteWith(template string, mapping Mapping, pattern *regexp.Regexp, subsFuncs ...SubstituteFunc) (string, error) { var err error diff --git a/vendor/github.com/docker/cli/cli/compose/types/types.go b/vendor/github.com/docker/cli/cli/compose/types/types.go index f6954ff7..55b80365 100644 --- a/vendor/github.com/docker/cli/cli/compose/types/types.go +++ b/vendor/github.com/docker/cli/cli/compose/types/types.go @@ -1,5 +1,5 @@ // FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: -//go:build go1.19 +//go:build go1.22 package types @@ -207,6 +207,7 @@ type ServiceConfig struct { Tty bool `mapstructure:"tty" yaml:"tty,omitempty" json:"tty,omitempty"` Ulimits map[string]*UlimitsConfig `yaml:",omitempty" json:"ulimits,omitempty"` User string `yaml:",omitempty" json:"user,omitempty"` + OomScoreAdj int64 `yaml:",omitempty" json:"oom_score_adj,omitempty"` UserNSMode string `mapstructure:"userns_mode" yaml:"userns_mode,omitempty" json:"userns_mode,omitempty"` Volumes []ServiceVolumeConfig `yaml:",omitempty" json:"volumes,omitempty"` WorkingDir string `mapstructure:"working_dir" yaml:"working_dir,omitempty" json:"working_dir,omitempty"` @@ -374,9 +375,10 @@ type PlacementPreferences struct { // ServiceNetworkConfig is the network configuration for a service type ServiceNetworkConfig struct { - Aliases []string `yaml:",omitempty" json:"aliases,omitempty"` - Ipv4Address string `mapstructure:"ipv4_address" yaml:"ipv4_address,omitempty" json:"ipv4_address,omitempty"` - Ipv6Address string `mapstructure:"ipv6_address" yaml:"ipv6_address,omitempty" json:"ipv6_address,omitempty"` + Aliases []string `yaml:",omitempty" json:"aliases,omitempty"` + DriverOpts map[string]string `mapstructure:"driver_opts" yaml:"driver_opts,omitempty" json:"driver_opts,omitempty"` + Ipv4Address string `mapstructure:"ipv4_address" yaml:"ipv4_address,omitempty" json:"ipv4_address,omitempty"` + Ipv6Address string `mapstructure:"ipv6_address" yaml:"ipv6_address,omitempty" json:"ipv6_address,omitempty"` } // ServicePortConfig is the port configuration for a service diff --git a/vendor/github.com/docker/cli/opts/config.go b/vendor/github.com/docker/cli/opts/config.go index 3be0fa93..1423ae3b 100644 --- a/vendor/github.com/docker/cli/opts/config.go +++ b/vendor/github.com/docker/cli/opts/config.go @@ -2,6 +2,7 @@ package opts import ( "encoding/csv" + "errors" "fmt" "os" "strconv" @@ -68,7 +69,7 @@ func (o *ConfigOpt) Set(value string) error { } if options.ConfigName == "" { - return fmt.Errorf("source is required") + return errors.New("source is required") } if options.File.Name == "" { options.File.Name = options.ConfigName diff --git a/vendor/github.com/docker/cli/opts/envfile.go b/vendor/github.com/docker/cli/opts/envfile.go index 26aa3c3a..3a16e6c1 100644 --- a/vendor/github.com/docker/cli/opts/envfile.go +++ b/vendor/github.com/docker/cli/opts/envfile.go @@ -2,6 +2,8 @@ package opts import ( "os" + + "github.com/docker/cli/pkg/kvfile" ) // ParseEnvFile reads a file with environment variables enumerated by lines @@ -18,5 +20,5 @@ import ( // environment variables, that's why we just strip leading whitespace and // nothing more. func ParseEnvFile(filename string) ([]string, error) { - return parseKeyValueFile(filename, os.LookupEnv) + return kvfile.Parse(filename, os.LookupEnv) } diff --git a/vendor/github.com/docker/cli/opts/file.go b/vendor/github.com/docker/cli/opts/file.go deleted file mode 100644 index 72b90e11..00000000 --- a/vendor/github.com/docker/cli/opts/file.go +++ /dev/null @@ -1,76 +0,0 @@ -package opts - -import ( - "bufio" - "bytes" - "fmt" - "os" - "strings" - "unicode" - "unicode/utf8" -) - -const whiteSpaces = " \t" - -// ErrBadKey typed error for bad environment variable -type ErrBadKey struct { - msg string -} - -func (e ErrBadKey) Error() string { - return fmt.Sprintf("poorly formatted environment: %s", e.msg) -} - -func parseKeyValueFile(filename string, emptyFn func(string) (string, bool)) ([]string, error) { - fh, err := os.Open(filename) - if err != nil { - return []string{}, err - } - defer fh.Close() - - lines := []string{} - scanner := bufio.NewScanner(fh) - currentLine := 0 - utf8bom := []byte{0xEF, 0xBB, 0xBF} - for scanner.Scan() { - scannedBytes := scanner.Bytes() - if !utf8.Valid(scannedBytes) { - return []string{}, fmt.Errorf("env file %s contains invalid utf8 bytes at line %d: %v", filename, currentLine+1, scannedBytes) - } - // We trim UTF8 BOM - if currentLine == 0 { - scannedBytes = bytes.TrimPrefix(scannedBytes, utf8bom) - } - // trim the line from all leading whitespace first - line := strings.TrimLeftFunc(string(scannedBytes), unicode.IsSpace) - currentLine++ - // line is not empty, and not starting with '#' - if len(line) > 0 && !strings.HasPrefix(line, "#") { - variable, value, hasValue := strings.Cut(line, "=") - - // trim the front of a variable, but nothing else - variable = strings.TrimLeft(variable, whiteSpaces) - if strings.ContainsAny(variable, whiteSpaces) { - return []string{}, ErrBadKey{fmt.Sprintf("variable '%s' contains whitespaces", variable)} - } - if len(variable) == 0 { - return []string{}, ErrBadKey{fmt.Sprintf("no variable name on line '%s'", line)} - } - - if hasValue { - // pass the value through, no trimming - lines = append(lines, variable+"="+value) - } else { - var present bool - if emptyFn != nil { - value, present = emptyFn(line) - } - if present { - // if only a pass-through variable is given, clean it up. - lines = append(lines, strings.TrimSpace(variable)+"="+value) - } - } - } - } - return lines, scanner.Err() -} diff --git a/vendor/github.com/docker/cli/opts/mount.go b/vendor/github.com/docker/cli/opts/mount.go index 430b858e..3a4ee31a 100644 --- a/vendor/github.com/docker/cli/opts/mount.go +++ b/vendor/github.com/docker/cli/opts/mount.go @@ -165,11 +165,11 @@ func (m *MountOpt) Set(value string) error { } if mount.Type == "" { - return fmt.Errorf("type is required") + return errors.New("type is required") } if mount.Target == "" { - return fmt.Errorf("target is required") + return errors.New("target is required") } if mount.VolumeOptions != nil && mount.Type != mounttypes.TypeVolume { diff --git a/vendor/github.com/docker/cli/opts/network.go b/vendor/github.com/docker/cli/opts/network.go index e36ef405..413aec7b 100644 --- a/vendor/github.com/docker/cli/opts/network.go +++ b/vendor/github.com/docker/cli/opts/network.go @@ -2,6 +2,7 @@ package opts import ( "encoding/csv" + "errors" "fmt" "regexp" "strings" @@ -83,11 +84,11 @@ func (n *NetworkOpt) Set(value string) error { //nolint:gocyclo } netOpt.DriverOpts[key] = val default: - return fmt.Errorf("invalid field key %s", key) + return errors.New("invalid field key " + key) } } if len(netOpt.Target) == 0 { - return fmt.Errorf("network name/id is not specified") + return errors.New("network name/id is not specified") } } else { netOpt.Target = value @@ -126,7 +127,7 @@ func parseDriverOpt(driverOpt string) (string, string, error) { // TODO(thaJeztah): should value be converted to lowercase as well, or only the key? key, value, ok := strings.Cut(strings.ToLower(driverOpt), "=") if !ok || key == "" { - return "", "", fmt.Errorf("invalid key value pair format in driver options") + return "", "", errors.New("invalid key value pair format in driver options") } key = strings.TrimSpace(key) value = strings.TrimSpace(value) diff --git a/vendor/github.com/docker/cli/opts/opts.go b/vendor/github.com/docker/cli/opts/opts.go index 80de1605..157b30f3 100644 --- a/vendor/github.com/docker/cli/opts/opts.go +++ b/vendor/github.com/docker/cli/opts/opts.go @@ -266,6 +266,8 @@ func validateDomain(val string) (string, error) { return "", fmt.Errorf("%s is not a valid domain", val) } +const whiteSpaces = " \t" + // ValidateLabel validates that the specified string is a valid label, and returns it. // // Labels are in the form of key=value; key must be a non-empty string, and not @@ -401,7 +403,7 @@ func ParseCPUs(value string) (int64, error) { } nano := cpu.Mul(cpu, big.NewRat(1e9, 1)) if !nano.IsInt() { - return 0, fmt.Errorf("value is too precise") + return 0, errors.New("value is too precise") } return nano.Num().Int64(), nil } @@ -409,14 +411,14 @@ func ParseCPUs(value string) (int64, error) { // ParseLink parses and validates the specified string as a link format (name:alias) func ParseLink(val string) (string, string, error) { if val == "" { - return "", "", fmt.Errorf("empty string specified for links") + return "", "", errors.New("empty string specified for links") } // We expect two parts, but restrict to three to allow detecting invalid formats. arr := strings.SplitN(val, ":", 3) // TODO(thaJeztah): clean up this logic!! if len(arr) > 2 { - return "", "", fmt.Errorf("bad format for links: %s", val) + return "", "", errors.New("bad format for links: " + val) } // TODO(thaJeztah): this should trim the "/" prefix as well?? if len(arr) == 1 { diff --git a/vendor/github.com/docker/cli/opts/parse.go b/vendor/github.com/docker/cli/opts/parse.go index 381648fe..996d4d7e 100644 --- a/vendor/github.com/docker/cli/opts/parse.go +++ b/vendor/github.com/docker/cli/opts/parse.go @@ -1,11 +1,12 @@ package opts import ( - "fmt" + "errors" "os" "strconv" "strings" + "github.com/docker/cli/pkg/kvfile" "github.com/docker/docker/api/types/container" ) @@ -25,7 +26,7 @@ func ReadKVEnvStrings(files []string, override []string) ([]string, error) { func readKVStrings(files []string, override []string, emptyFn func(string) (string, bool)) ([]string, error) { var variables []string for _, ef := range files { - parsedVars, err := parseKeyValueFile(ef, emptyFn) + parsedVars, err := kvfile.Parse(ef, emptyFn) if err != nil { return nil, err } @@ -81,12 +82,12 @@ func ParseRestartPolicy(policy string) (container.RestartPolicy, error) { p := container.RestartPolicy{} k, v, ok := strings.Cut(policy, ":") if ok && k == "" { - return container.RestartPolicy{}, fmt.Errorf("invalid restart policy format: no policy provided before colon") + return container.RestartPolicy{}, errors.New("invalid restart policy format: no policy provided before colon") } if v != "" { count, err := strconv.Atoi(v) if err != nil { - return container.RestartPolicy{}, fmt.Errorf("invalid restart policy format: maximum retry count must be an integer") + return container.RestartPolicy{}, errors.New("invalid restart policy format: maximum retry count must be an integer") } p.MaximumRetryCount = count } diff --git a/vendor/github.com/docker/cli/opts/port.go b/vendor/github.com/docker/cli/opts/port.go index fe41cdd2..099aae35 100644 --- a/vendor/github.com/docker/cli/opts/port.go +++ b/vendor/github.com/docker/cli/opts/port.go @@ -2,6 +2,7 @@ package opts import ( "encoding/csv" + "errors" "fmt" "net" "regexp" @@ -102,7 +103,7 @@ func (p *PortOpt) Set(value string) error { for _, portBindings := range portBindingMap { for _, portBinding := range portBindings { if portBinding.HostIP != "" { - return fmt.Errorf("hostip is not supported") + return errors.New("hostip is not supported") } } } @@ -148,6 +149,7 @@ func ConvertPortToPortConfig( for _, binding := range portBindings[port] { if p := net.ParseIP(binding.HostIP); p != nil && !p.IsUnspecified() { + // TODO(thaJeztah): use context-logger, so that this output can be suppressed (in tests). logrus.Warnf("ignoring IP-address (%s:%s) service will listen on '0.0.0.0'", net.JoinHostPort(binding.HostIP, binding.HostPort), port) } diff --git a/vendor/github.com/docker/cli/opts/secret.go b/vendor/github.com/docker/cli/opts/secret.go index 750dbe4f..09d2b2b3 100644 --- a/vendor/github.com/docker/cli/opts/secret.go +++ b/vendor/github.com/docker/cli/opts/secret.go @@ -2,6 +2,7 @@ package opts import ( "encoding/csv" + "errors" "fmt" "os" "strconv" @@ -62,12 +63,12 @@ func (o *SecretOpt) Set(value string) error { options.File.Mode = os.FileMode(m) default: - return fmt.Errorf("invalid field in secret request: %s", key) + return errors.New("invalid field in secret request: " + key) } } if options.SecretName == "" { - return fmt.Errorf("source is required") + return errors.New("source is required") } if options.File.Name == "" { options.File.Name = options.SecretName diff --git a/vendor/github.com/docker/cli/opts/throttledevice.go b/vendor/github.com/docker/cli/opts/throttledevice.go index bdf454eb..8bf12880 100644 --- a/vendor/github.com/docker/cli/opts/throttledevice.go +++ b/vendor/github.com/docker/cli/opts/throttledevice.go @@ -94,7 +94,7 @@ func (opt *ThrottledeviceOpt) String() string { // GetList returns a slice of pointers to ThrottleDevices. func (opt *ThrottledeviceOpt) GetList() []*blkiodev.ThrottleDevice { - out := make([]*blkiodev.ThrottleDevice, 0, len(opt.values)) + out := make([]*blkiodev.ThrottleDevice, len(opt.values)) copy(out, opt.values) return out } diff --git a/vendor/github.com/docker/cli/opts/ulimit.go b/vendor/github.com/docker/cli/opts/ulimit.go index 5176b999..1409a109 100644 --- a/vendor/github.com/docker/cli/opts/ulimit.go +++ b/vendor/github.com/docker/cli/opts/ulimit.go @@ -4,24 +4,27 @@ import ( "fmt" "sort" + "github.com/docker/docker/api/types/container" "github.com/docker/go-units" ) // UlimitOpt defines a map of Ulimits type UlimitOpt struct { - values *map[string]*units.Ulimit + values *map[string]*container.Ulimit } // NewUlimitOpt creates a new UlimitOpt. Ulimits are not validated. -func NewUlimitOpt(ref *map[string]*units.Ulimit) *UlimitOpt { +func NewUlimitOpt(ref *map[string]*container.Ulimit) *UlimitOpt { + // TODO(thaJeztah): why do we need a map with pointers here? if ref == nil { - ref = &map[string]*units.Ulimit{} + ref = &map[string]*container.Ulimit{} } return &UlimitOpt{ref} } // Set validates a Ulimit and sets its name as a key in UlimitOpt func (o *UlimitOpt) Set(val string) error { + // FIXME(thaJeztah): these functions also need to be moved over from go-units. l, err := units.ParseUlimit(val) if err != nil { return err @@ -43,8 +46,8 @@ func (o *UlimitOpt) String() string { } // GetList returns a slice of pointers to Ulimits. Values are sorted by name. -func (o *UlimitOpt) GetList() []*units.Ulimit { - ulimits := make([]*units.Ulimit, 0, len(*o.values)) +func (o *UlimitOpt) GetList() []*container.Ulimit { + ulimits := make([]*container.Ulimit, 0, len(*o.values)) for _, v := range *o.values { ulimits = append(ulimits, v) } diff --git a/vendor/github.com/docker/cli/pkg/kvfile/kvfile.go b/vendor/github.com/docker/cli/pkg/kvfile/kvfile.go new file mode 100644 index 00000000..f6ac8ef4 --- /dev/null +++ b/vendor/github.com/docker/cli/pkg/kvfile/kvfile.go @@ -0,0 +1,130 @@ +// Package kvfile provides utilities to parse line-delimited key/value files +// such as used for label-files and env-files. +// +// # File format +// +// key/value files use the following syntax: +// +// - File must be valid UTF-8. +// - BOM headers are removed. +// - Leading whitespace is removed for each line. +// - Lines starting with "#" are ignored. +// - Empty lines are ignored. +// - Key/Value pairs are provided as "KEY[=]". +// - Maximum line-length is limited to [bufio.MaxScanTokenSize]. +// +// # Interpolation, substitution, and escaping +// +// Both keys and values are used as-is; no interpolation, substitution or +// escaping is supported, and quotes are considered part of the key or value. +// Whitespace in values (including leading and trailing) is preserved. Given +// that the file format is line-delimited, neither key, nor value, can contain +// newlines. +// +// # Key/Value pairs +// +// Key/Value pairs take the following format: +// +// KEY[=] +// +// KEY is required and may not contain whitespaces or NUL characters. Any +// other character (except for the "=" delimiter) are accepted, but it is +// recommended to use a subset of the POSIX portable character set, as +// outlined in [Environment Variables]. +// +// VALUE is optional, but may be empty. If no value is provided (i.e., no +// equal sign ("=") is present), the KEY is omitted in the result, but some +// functions accept a lookup-function to provide a default value for the +// given key. +// +// [Environment Variables]: https://pubs.opengroup.org/onlinepubs/7908799/xbd/envvar.html +package kvfile + +import ( + "bufio" + "bytes" + "fmt" + "io" + "os" + "strings" + "unicode" + "unicode/utf8" +) + +// Parse parses a line-delimited key/value pairs separated by equal sign. +// It accepts a lookupFn to lookup default values for keys that do not define +// a value. An error is produced if parsing failed, the content contains invalid +// UTF-8 characters, or a key contains whitespaces. +func Parse(filename string, lookupFn func(key string) (value string, found bool)) ([]string, error) { + fh, err := os.Open(filename) + if err != nil { + return []string{}, err + } + out, err := parseKeyValueFile(fh, lookupFn) + _ = fh.Close() + if err != nil { + return []string{}, fmt.Errorf("invalid env file (%s): %v", filename, err) + } + return out, nil +} + +// ParseFromReader parses a line-delimited key/value pairs separated by equal sign. +// It accepts a lookupFn to lookup default values for keys that do not define +// a value. An error is produced if parsing failed, the content contains invalid +// UTF-8 characters, or a key contains whitespaces. +func ParseFromReader(r io.Reader, lookupFn func(key string) (value string, found bool)) ([]string, error) { + return parseKeyValueFile(r, lookupFn) +} + +const whiteSpaces = " \t" + +func parseKeyValueFile(r io.Reader, lookupFn func(string) (string, bool)) ([]string, error) { + lines := []string{} + scanner := bufio.NewScanner(r) + utf8bom := []byte{0xEF, 0xBB, 0xBF} + for currentLine := 1; scanner.Scan(); currentLine++ { + scannedBytes := scanner.Bytes() + if !utf8.Valid(scannedBytes) { + return []string{}, fmt.Errorf("invalid utf8 bytes at line %d: %v", currentLine, scannedBytes) + } + // We trim UTF8 BOM + if currentLine == 1 { + scannedBytes = bytes.TrimPrefix(scannedBytes, utf8bom) + } + // trim the line from all leading whitespace first. trailing whitespace + // is part of the value, and is kept unmodified. + line := strings.TrimLeftFunc(string(scannedBytes), unicode.IsSpace) + + if len(line) == 0 || line[0] == '#' { + // skip empty lines and comments (lines starting with '#') + continue + } + + key, _, hasValue := strings.Cut(line, "=") + if len(key) == 0 { + return []string{}, fmt.Errorf("no variable name on line '%s'", line) + } + + // leading whitespace was already removed from the line, but + // variables are not allowed to contain whitespace or have + // trailing whitespace. + if strings.ContainsAny(key, whiteSpaces) { + return []string{}, fmt.Errorf("variable '%s' contains whitespaces", key) + } + + if hasValue { + // key/value pair is valid and has a value; add the line as-is. + lines = append(lines, line) + continue + } + + if lookupFn != nil { + // No value given; try to look up the value. The value may be + // empty but if no value is found, the key is omitted. + if value, found := lookupFn(line); found { + lines = append(lines, key+"="+value) + } + } + } + return lines, scanner.Err() +} diff --git a/vendor/github.com/fsnotify/fsnotify/.cirrus.yml b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml deleted file mode 100644 index ffc7b992..00000000 --- a/vendor/github.com/fsnotify/fsnotify/.cirrus.yml +++ /dev/null @@ -1,13 +0,0 @@ -freebsd_task: - name: 'FreeBSD' - freebsd_instance: - image_family: freebsd-13-2 - install_script: - - pkg update -f - - pkg install -y go - test_script: - # run tests as user "cirrus" instead of root - - pw useradd cirrus -m - - chown -R cirrus:cirrus . - - FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... - - sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... diff --git a/vendor/github.com/fsnotify/fsnotify/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore index 391cc076..4cd0cbaf 100644 --- a/vendor/github.com/fsnotify/fsnotify/.gitignore +++ b/vendor/github.com/fsnotify/fsnotify/.gitignore @@ -1,7 +1,6 @@ -# go test -c output -*.test -*.test.exe +# Setup a Global .gitignore for OS and editor generated files: +# https://help.github.com/articles/ignoring-files +# git config --global core.excludesfile ~/.gitignore_global -# Output of go build ./cmd/fsnotify -/fsnotify -/fsnotify.exe +.vagrant +*.sublime-project diff --git a/vendor/github.com/fsnotify/fsnotify/.mailmap b/vendor/github.com/fsnotify/fsnotify/.mailmap deleted file mode 100644 index a04f2907..00000000 --- a/vendor/github.com/fsnotify/fsnotify/.mailmap +++ /dev/null @@ -1,2 +0,0 @@ -Chris Howey -Nathan Youngman <4566+nathany@users.noreply.github.com> diff --git a/vendor/github.com/fsnotify/fsnotify/.travis.yml b/vendor/github.com/fsnotify/fsnotify/.travis.yml new file mode 100644 index 00000000..a9c30165 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/.travis.yml @@ -0,0 +1,36 @@ +sudo: false +language: go + +go: + - "stable" + - "1.11.x" + - "1.10.x" + - "1.9.x" + +matrix: + include: + - go: "stable" + env: GOLINT=true + allow_failures: + - go: tip + fast_finish: true + + +before_install: + - if [ ! -z "${GOLINT}" ]; then go get -u golang.org/x/lint/golint; fi + +script: + - go test --race ./... + +after_script: + - test -z "$(gofmt -s -l -w . | tee /dev/stderr)" + - if [ ! -z "${GOLINT}" ]; then echo running golint; golint --set_exit_status ./...; else echo skipping golint; fi + - go vet ./... + +os: + - linux + - osx + - windows + +notifications: + email: false diff --git a/vendor/github.com/fsnotify/fsnotify/AUTHORS b/vendor/github.com/fsnotify/fsnotify/AUTHORS new file mode 100644 index 00000000..5ab5d41c --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/AUTHORS @@ -0,0 +1,52 @@ +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# You can update this list using the following command: +# +# $ git shortlog -se | awk '{print $2 " " $3 " " $4}' + +# Please keep the list sorted. + +Aaron L +Adrien Bustany +Amit Krishnan +Anmol Sethi +Bjørn Erik Pedersen +Bruno Bigras +Caleb Spare +Case Nelson +Chris Howey +Christoffer Buchholz +Daniel Wagner-Hall +Dave Cheney +Evan Phoenix +Francisco Souza +Hari haran +John C Barstow +Kelvin Fo +Ken-ichirou MATSUZAWA +Matt Layher +Nathan Youngman +Nickolai Zeldovich +Patrick +Paul Hammond +Pawel Knap +Pieter Droogendijk +Pursuit92 +Riku Voipio +Rob Figueiredo +Rodrigo Chiossi +Slawek Ligus +Soge Zhang +Tiffany Jernigan +Tilak Sharma +Tom Payne +Travis Cline +Tudor Golubenco +Vahe Khachikyan +Yukang +bronze1man +debrando +henrikedwards +铁哥 diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md index e0e57575..be4d7ea2 100644 --- a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md +++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -1,230 +1,6 @@ # Changelog -Unreleased ----------- -Nothing yet. - -1.7.0 - 2023-10-22 ------------------- -This version of fsnotify needs Go 1.17. - -### Additions - -- illumos: add FEN backend to support illumos and Solaris. ([#371]) - -- all: add `NewBufferedWatcher()` to use a buffered channel, which can be useful - in cases where you can't control the kernel buffer and receive a large number - of events in bursts. ([#550], [#572]) - -- all: add `AddWith()`, which is identical to `Add()` but allows passing - options. ([#521]) - -- windows: allow setting the ReadDirectoryChangesW() buffer size with - `fsnotify.WithBufferSize()`; the default of 64K is the highest value that - works on all platforms and is enough for most purposes, but in some cases a - highest buffer is needed. ([#521]) - -### Changes and fixes - -- inotify: remove watcher if a watched path is renamed ([#518]) - - After a rename the reported name wasn't updated, or even an empty string. - Inotify doesn't provide any good facilities to update it, so just remove the - watcher. This is already how it worked on kqueue and FEN. - - On Windows this does work, and remains working. - -- windows: don't listen for file attribute changes ([#520]) - - File attribute changes are sent as `FILE_ACTION_MODIFIED` by the Windows API, - with no way to see if they're a file write or attribute change, so would show - up as a fsnotify.Write event. This is never useful, and could result in many - spurious Write events. - -- windows: return `ErrEventOverflow` if the buffer is full ([#525]) - - Before it would merely return "short read", making it hard to detect this - error. - -- kqueue: make sure events for all files are delivered properly when removing a - watched directory ([#526]) - - Previously they would get sent with `""` (empty string) or `"."` as the path - name. - -- kqueue: don't emit spurious Create events for symbolic links ([#524]) - - The link would get resolved but kqueue would "forget" it already saw the link - itself, resulting on a Create for every Write event for the directory. - -- all: return `ErrClosed` on `Add()` when the watcher is closed ([#516]) - -- other: add `Watcher.Errors` and `Watcher.Events` to the no-op `Watcher` in - `backend_other.go`, making it easier to use on unsupported platforms such as - WASM, AIX, etc. ([#528]) - -- other: use the `backend_other.go` no-op if the `appengine` build tag is set; - Google AppEngine forbids usage of the unsafe package so the inotify backend - won't compile there. - -[#371]: https://github.com/fsnotify/fsnotify/pull/371 -[#516]: https://github.com/fsnotify/fsnotify/pull/516 -[#518]: https://github.com/fsnotify/fsnotify/pull/518 -[#520]: https://github.com/fsnotify/fsnotify/pull/520 -[#521]: https://github.com/fsnotify/fsnotify/pull/521 -[#524]: https://github.com/fsnotify/fsnotify/pull/524 -[#525]: https://github.com/fsnotify/fsnotify/pull/525 -[#526]: https://github.com/fsnotify/fsnotify/pull/526 -[#528]: https://github.com/fsnotify/fsnotify/pull/528 -[#537]: https://github.com/fsnotify/fsnotify/pull/537 -[#550]: https://github.com/fsnotify/fsnotify/pull/550 -[#572]: https://github.com/fsnotify/fsnotify/pull/572 - -1.6.0 - 2022-10-13 ------------------- -This version of fsnotify needs Go 1.16 (this was already the case since 1.5.1, -but not documented). It also increases the minimum Linux version to 2.6.32. - -### Additions - -- all: add `Event.Has()` and `Op.Has()` ([#477]) - - This makes checking events a lot easier; for example: - - if event.Op&Write == Write && !(event.Op&Remove == Remove) { - } - - Becomes: - - if event.Has(Write) && !event.Has(Remove) { - } - -- all: add cmd/fsnotify ([#463]) - - A command-line utility for testing and some examples. - -### Changes and fixes - -- inotify: don't ignore events for files that don't exist ([#260], [#470]) - - Previously the inotify watcher would call `os.Lstat()` to check if a file - still exists before emitting events. - - This was inconsistent with other platforms and resulted in inconsistent event - reporting (e.g. when a file is quickly removed and re-created), and generally - a source of confusion. It was added in 2013 to fix a memory leak that no - longer exists. - -- all: return `ErrNonExistentWatch` when `Remove()` is called on a path that's - not watched ([#460]) - -- inotify: replace epoll() with non-blocking inotify ([#434]) - - Non-blocking inotify was not generally available at the time this library was - written in 2014, but now it is. As a result, the minimum Linux version is - bumped from 2.6.27 to 2.6.32. This hugely simplifies the code and is faster. - -- kqueue: don't check for events every 100ms ([#480]) - - The watcher would wake up every 100ms, even when there was nothing to do. Now - it waits until there is something to do. - -- macos: retry opening files on EINTR ([#475]) - -- kqueue: skip unreadable files ([#479]) - - kqueue requires a file descriptor for every file in a directory; this would - fail if a file was unreadable by the current user. Now these files are simply - skipped. - -- windows: fix renaming a watched directory if the parent is also watched ([#370]) - -- windows: increase buffer size from 4K to 64K ([#485]) - -- windows: close file handle on Remove() ([#288]) - -- kqueue: put pathname in the error if watching a file fails ([#471]) - -- inotify, windows: calling Close() more than once could race ([#465]) - -- kqueue: improve Close() performance ([#233]) - -- all: various documentation additions and clarifications. - -[#233]: https://github.com/fsnotify/fsnotify/pull/233 -[#260]: https://github.com/fsnotify/fsnotify/pull/260 -[#288]: https://github.com/fsnotify/fsnotify/pull/288 -[#370]: https://github.com/fsnotify/fsnotify/pull/370 -[#434]: https://github.com/fsnotify/fsnotify/pull/434 -[#460]: https://github.com/fsnotify/fsnotify/pull/460 -[#463]: https://github.com/fsnotify/fsnotify/pull/463 -[#465]: https://github.com/fsnotify/fsnotify/pull/465 -[#470]: https://github.com/fsnotify/fsnotify/pull/470 -[#471]: https://github.com/fsnotify/fsnotify/pull/471 -[#475]: https://github.com/fsnotify/fsnotify/pull/475 -[#477]: https://github.com/fsnotify/fsnotify/pull/477 -[#479]: https://github.com/fsnotify/fsnotify/pull/479 -[#480]: https://github.com/fsnotify/fsnotify/pull/480 -[#485]: https://github.com/fsnotify/fsnotify/pull/485 - -## [1.5.4] - 2022-04-25 - -* Windows: add missing defer to `Watcher.WatchList` [#447](https://github.com/fsnotify/fsnotify/pull/447) -* go.mod: use latest x/sys [#444](https://github.com/fsnotify/fsnotify/pull/444) -* Fix compilation for OpenBSD [#443](https://github.com/fsnotify/fsnotify/pull/443) - -## [1.5.3] - 2022-04-22 - -* This version is retracted. An incorrect branch is published accidentally [#445](https://github.com/fsnotify/fsnotify/issues/445) - -## [1.5.2] - 2022-04-21 - -* Add a feature to return the directories and files that are being monitored [#374](https://github.com/fsnotify/fsnotify/pull/374) -* Fix potential crash on windows if `raw.FileNameLength` exceeds `syscall.MAX_PATH` [#361](https://github.com/fsnotify/fsnotify/pull/361) -* Allow build on unsupported GOOS [#424](https://github.com/fsnotify/fsnotify/pull/424) -* Don't set `poller.fd` twice in `newFdPoller` [#406](https://github.com/fsnotify/fsnotify/pull/406) -* fix go vet warnings: call to `(*T).Fatalf` from a non-test goroutine [#416](https://github.com/fsnotify/fsnotify/pull/416) - -## [1.5.1] - 2021-08-24 - -* Revert Add AddRaw to not follow symlinks [#394](https://github.com/fsnotify/fsnotify/pull/394) - -## [1.5.0] - 2021-08-20 - -* Go: Increase minimum required version to Go 1.12 [#381](https://github.com/fsnotify/fsnotify/pull/381) -* Feature: Add AddRaw method which does not follow symlinks when adding a watch [#289](https://github.com/fsnotify/fsnotify/pull/298) -* Windows: Follow symlinks by default like on all other systems [#289](https://github.com/fsnotify/fsnotify/pull/289) -* CI: Use GitHub Actions for CI and cover go 1.12-1.17 - [#378](https://github.com/fsnotify/fsnotify/pull/378) - [#381](https://github.com/fsnotify/fsnotify/pull/381) - [#385](https://github.com/fsnotify/fsnotify/pull/385) -* Go 1.14+: Fix unsafe pointer conversion [#325](https://github.com/fsnotify/fsnotify/pull/325) - -## [1.4.9] - 2020-03-11 - -* Move example usage to the readme #329. This may resolve #328. - -## [1.4.8] - 2020-03-10 - -* CI: test more go versions (@nathany 1d13583d846ea9d66dcabbfefbfb9d8e6fb05216) -* Tests: Queued inotify events could have been read by the test before max_queued_events was hit (@matthias-stone #265) -* Tests: t.Fatalf -> t.Errorf in go routines (@gdey #266) -* CI: Less verbosity (@nathany #267) -* Tests: Darwin: Exchangedata is deprecated on 10.13 (@nathany #267) -* Tests: Check if channels are closed in the example (@alexeykazakov #244) -* CI: Only run golint on latest version of go and fix issues (@cpuguy83 #284) -* CI: Add windows to travis matrix (@cpuguy83 #284) -* Docs: Remover appveyor badge (@nathany 11844c0959f6fff69ba325d097fce35bd85a8e93) -* Linux: create epoll and pipe fds with close-on-exec (@JohannesEbke #219) -* Linux: open files with close-on-exec (@linxiulei #273) -* Docs: Plan to support fanotify (@nathany ab058b44498e8b7566a799372a39d150d9ea0119 ) -* Project: Add go.mod (@nathany #309) -* Project: Revise editor config (@nathany #309) -* Project: Update copyright for 2019 (@nathany #309) -* CI: Drop go1.8 from CI matrix (@nathany #309) -* Docs: Updating the FAQ section for supportability with NFS & FUSE filesystems (@Pratik32 4bf2d1fec78374803a39307bfb8d340688f4f28e ) - -## [1.4.7] - 2018-01-09 +## v1.4.7 / 2018-01-09 * BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine) * Tests: Fix missing verb on format string (thanks @rchiossi) @@ -234,62 +10,62 @@ but not documented). It also increases the minimum Linux version to 2.6.32. * Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich) * Docs: replace references to OS X with macOS -## [1.4.2] - 2016-10-10 +## v1.4.2 / 2016-10-10 * Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack) -## [1.4.1] - 2016-10-04 +## v1.4.1 / 2016-10-04 * Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack) -## [1.4.0] - 2016-10-01 +## v1.4.0 / 2016-10-01 * add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie) -## [1.3.1] - 2016-06-28 +## v1.3.1 / 2016-06-28 * Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc) -## [1.3.0] - 2016-04-19 +## v1.3.0 / 2016-04-19 * Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135) -## [1.2.10] - 2016-03-02 +## v1.2.10 / 2016-03-02 * Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj) -## [1.2.9] - 2016-01-13 +## v1.2.9 / 2016-01-13 kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep) -## [1.2.8] - 2015-12-17 +## v1.2.8 / 2015-12-17 * kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test) * inotify: fix race in test * enable race detection for continuous integration (Linux, Mac, Windows) -## [1.2.5] - 2015-10-17 +## v1.2.5 / 2015-10-17 * inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki) * inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken) * kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie) * kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion) -## [1.2.1] - 2015-10-14 +## v1.2.1 / 2015-10-14 * kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx) -## [1.2.0] - 2015-02-08 +## v1.2.0 / 2015-02-08 * inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD) * inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD) * kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59) -## [1.1.1] - 2015-02-05 +## v1.1.1 / 2015-02-05 * inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD) -## [1.1.0] - 2014-12-12 +## v1.1.0 / 2014-12-12 * kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43) * add low-level functions @@ -301,22 +77,22 @@ kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsn * kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48) * kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) -## [1.0.4] - 2014-09-07 +## v1.0.4 / 2014-09-07 * kqueue: add dragonfly to the build tags. * Rename source code files, rearrange code so exported APIs are at the top. * Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang) -## [1.0.3] - 2014-08-19 +## v1.0.3 / 2014-08-19 * [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36) -## [1.0.2] - 2014-08-17 +## v1.0.2 / 2014-08-17 * [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) * [Fix] Make ./path and path equivalent. (thanks @zhsso) -## [1.0.0] - 2014-08-15 +## v1.0.0 / 2014-08-15 * [API] Remove AddWatch on Windows, use Add. * Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30) @@ -370,51 +146,51 @@ kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsn * no tests for the current implementation * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195) -## [0.9.3] - 2014-12-31 +## v0.9.3 / 2014-12-31 * kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) -## [0.9.2] - 2014-08-17 +## v0.9.2 / 2014-08-17 * [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) -## [0.9.1] - 2014-06-12 +## v0.9.1 / 2014-06-12 * Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) -## [0.9.0] - 2014-01-17 +## v0.9.0 / 2014-01-17 * IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany) * [Fix] kqueue: fix deadlock [#77][] (thanks @cespare) * [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library. -## [0.8.12] - 2013-11-13 +## v0.8.12 / 2013-11-13 * [API] Remove FD_SET and friends from Linux adapter -## [0.8.11] - 2013-11-02 +## v0.8.11 / 2013-11-02 * [Doc] Add Changelog [#72][] (thanks @nathany) * [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond) -## [0.8.10] - 2013-10-19 +## v0.8.10 / 2013-10-19 * [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott) * [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer) * [Doc] specify OS-specific limits in README (thanks @debrando) -## [0.8.9] - 2013-09-08 +## v0.8.9 / 2013-09-08 * [Doc] Contributing (thanks @nathany) * [Doc] update package path in example code [#63][] (thanks @paulhammond) * [Doc] GoCI badge in README (Linux only) [#60][] * [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany) -## [0.8.8] - 2013-06-17 +## v0.8.8 / 2013-06-17 * [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie) -## [0.8.7] - 2013-06-03 +## v0.8.7 / 2013-06-03 * [API] Make syscall flags internal * [Fix] inotify: ignore event changes @@ -422,74 +198,74 @@ kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsn * [Fix] tests on Windows * lower case error messages -## [0.8.6] - 2013-05-23 +## v0.8.6 / 2013-05-23 * kqueue: Use EVT_ONLY flag on Darwin * [Doc] Update README with full example -## [0.8.5] - 2013-05-09 +## v0.8.5 / 2013-05-09 * [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg) -## [0.8.4] - 2013-04-07 +## v0.8.4 / 2013-04-07 * [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz) -## [0.8.3] - 2013-03-13 +## v0.8.3 / 2013-03-13 * [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin) * [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin) -## [0.8.2] - 2013-02-07 +## v0.8.2 / 2013-02-07 * [Doc] add Authors * [Fix] fix data races for map access [#29][] (thanks @fsouza) -## [0.8.1] - 2013-01-09 +## v0.8.1 / 2013-01-09 * [Fix] Windows path separators * [Doc] BSD License -## [0.8.0] - 2012-11-09 +## v0.8.0 / 2012-11-09 * kqueue: directory watching improvements (thanks @vmirage) * inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto) * [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr) -## [0.7.4] - 2012-10-09 +## v0.7.4 / 2012-10-09 * [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji) * [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig) * [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig) * [Fix] kqueue: modify after recreation of file -## [0.7.3] - 2012-09-27 +## v0.7.3 / 2012-09-27 * [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage) * [Fix] kqueue: no longer get duplicate CREATE events -## [0.7.2] - 2012-09-01 +## v0.7.2 / 2012-09-01 * kqueue: events for created directories -## [0.7.1] - 2012-07-14 +## v0.7.1 / 2012-07-14 * [Fix] for renaming files -## [0.7.0] - 2012-07-02 +## v0.7.0 / 2012-07-02 * [Feature] FSNotify flags * [Fix] inotify: Added file name back to event path -## [0.6.0] - 2012-06-06 +## v0.6.0 / 2012-06-06 * kqueue: watch files after directory created (thanks @tmc) -## [0.5.1] - 2012-05-22 +## v0.5.1 / 2012-05-22 * [Fix] inotify: remove all watches before Close() -## [0.5.0] - 2012-05-03 +## v0.5.0 / 2012-05-03 * [API] kqueue: return errors during watch instead of sending over channel * kqueue: match symlink behavior on Linux @@ -497,22 +273,22 @@ kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsn * [Fix] kqueue: handle EINTR (reported by @robfig) * [Doc] Godoc example [#1][] (thanks @davecheney) -## [0.4.0] - 2012-03-30 +## v0.4.0 / 2012-03-30 * Go 1 released: build with go tool * [Feature] Windows support using winfsnotify * Windows does not have attribute change notifications * Roll attribute notifications into IsModify -## [0.3.0] - 2012-02-19 +## v0.3.0 / 2012-02-19 * kqueue: add files when watch directory -## [0.2.0] - 2011-12-30 +## v0.2.0 / 2011-12-30 * update to latest Go weekly code -## [0.1.0] - 2011-10-19 +## v0.1.0 / 2011-10-19 * kqueue: add watch on file creation to match inotify * kqueue: create file event diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md index ea379759..828a60b2 100644 --- a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md +++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md @@ -1,26 +1,77 @@ -Thank you for your interest in contributing to fsnotify! We try to review and -merge PRs in a reasonable timeframe, but please be aware that: +# Contributing -- To avoid "wasted" work, please discus changes on the issue tracker first. You - can just send PRs, but they may end up being rejected for one reason or the - other. +## Issues -- fsnotify is a cross-platform library, and changes must work reasonably well on - all supported platforms. +* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues). +* Please indicate the platform you are using fsnotify on. +* A code example to reproduce the problem is appreciated. -- Changes will need to be compatible; old code should still compile, and the - runtime behaviour can't change in ways that are likely to lead to problems for - users. +## Pull Requests -Testing -------- -Just `go test ./...` runs all the tests; the CI runs this on all supported -platforms. Testing different platforms locally can be done with something like -[goon] or [Vagrant], but this isn't super-easy to set up at the moment. +### Contributor License Agreement -Use the `-short` flag to make the "stress test" run faster. +fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual). +Please indicate that you have signed the CLA in your pull request. -[goon]: https://github.com/arp242/goon -[Vagrant]: https://www.vagrantup.com/ -[integration_test.go]: /integration_test.go +### How fsnotify is Developed + +* Development is done on feature branches. +* Tests are run on BSD, Linux, macOS and Windows. +* Pull requests are reviewed and [applied to master][am] using [hub][]. + * Maintainers may modify or squash commits rather than asking contributors to. +* To issue a new release, the maintainers will: + * Update the CHANGELOG + * Tag a version, which will become available through gopkg.in. + +### How to Fork + +For smooth sailing, always use the original import path. Installing with `go get` makes this easy. + +1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`) +2. Create your feature branch (`git checkout -b my-new-feature`) +3. Ensure everything works and the tests pass (see below) +4. Commit your changes (`git commit -am 'Add some feature'`) + +Contribute upstream: + +1. Fork fsnotify on GitHub +2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`) +3. Push to the branch (`git push fork my-new-feature`) +4. Create a new Pull Request on GitHub + +This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/). + +### Testing + +fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows. + +Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on. + +To aid in cross-platform testing there is a Vagrantfile for Linux and BSD. + +* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/) +* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder. +* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password) +* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd fsnotify/fsnotify; go test'`. +* When you're done, you will want to halt or destroy the Vagrant boxes. + +Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory. + +Right now there is no equivalent solution for Windows and macOS, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads). + +### Maintainers + +Help maintaining fsnotify is welcome. To be a maintainer: + +* Submit a pull request and sign the CLA as above. +* You must be able to run the test suite on Mac, Windows, Linux and BSD. + +To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][]. + +All code changes should be internal pull requests. + +Releases are tagged using [Semantic Versioning](http://semver.org/). + +[hub]: https://github.com/github/hub +[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs diff --git a/vendor/github.com/fsnotify/fsnotify/LICENSE b/vendor/github.com/fsnotify/fsnotify/LICENSE index fb03ade7..e180c8fb 100644 --- a/vendor/github.com/fsnotify/fsnotify/LICENSE +++ b/vendor/github.com/fsnotify/fsnotify/LICENSE @@ -1,25 +1,28 @@ -Copyright © 2012 The Go Authors. All rights reserved. -Copyright © fsnotify Authors. All rights reserved. +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2012-2019 fsnotify Authors. All rights reserved. -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright notice, this - list of conditions and the following disclaimer in the documentation and/or - other materials provided with the distribution. -* Neither the name of Google Inc. nor the names of its contributors may be used - to endorse or promote products derived from this software without specific - prior written permission. + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md index e480733d..b2629e52 100644 --- a/vendor/github.com/fsnotify/fsnotify/README.md +++ b/vendor/github.com/fsnotify/fsnotify/README.md @@ -1,184 +1,130 @@ -fsnotify is a Go library to provide cross-platform filesystem notifications on -Windows, Linux, macOS, BSD, and illumos. +# File system notifications for Go -Go 1.17 or newer is required; the full documentation is at -https://pkg.go.dev/github.com/fsnotify/fsnotify +[![GoDoc](https://godoc.org/github.com/fsnotify/fsnotify?status.svg)](https://godoc.org/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify) ---- +fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather than `syscall` from the standard library. Ensure you have the latest version installed by running: -Platform support: +```console +go get -u golang.org/x/sys/... +``` -| Backend | OS | Status | -| :-------------------- | :--------- | :------------------------------------------------------------------------ | -| inotify | Linux | Supported | -| kqueue | BSD, macOS | Supported | -| ReadDirectoryChangesW | Windows | Supported | -| FEN | illumos | Supported | -| fanotify | Linux 5.9+ | [Not yet](https://github.com/fsnotify/fsnotify/issues/114) | -| AHAFS | AIX | [aix branch]; experimental due to lack of maintainer and test environment | -| FSEvents | macOS | [Needs support in x/sys/unix][fsevents] | -| USN Journals | Windows | [Needs support in x/sys/windows][usn] | -| Polling | *All* | [Not yet](https://github.com/fsnotify/fsnotify/issues/9) | +Cross platform: Windows, Linux, BSD and macOS. -Linux and illumos should include Android and Solaris, but these are currently -untested. +| Adapter | OS | Status | +| --------------------- | -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | +| inotify | Linux 2.6.27 or later, Android\* | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) | +| kqueue | BSD, macOS, iOS\* | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) | +| ReadDirectoryChangesW | Windows | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) | +| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) | +| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/issues/12) | +| fanotify | Linux 2.6.37+ | [Planned](https://github.com/fsnotify/fsnotify/issues/114) | +| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) | +| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) | -[fsevents]: https://github.com/fsnotify/fsnotify/issues/11#issuecomment-1279133120 -[usn]: https://github.com/fsnotify/fsnotify/issues/53#issuecomment-1279829847 -[aix branch]: https://github.com/fsnotify/fsnotify/issues/353#issuecomment-1284590129 +\* Android and iOS are untested. -Usage ------ -A basic example: +Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information. + +## API stability + +fsnotify is a fork of [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA). + +All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). Further API changes are [planned](https://github.com/fsnotify/fsnotify/milestones), and will be tagged with a new major revision number. + +Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`. + +## Usage ```go package main import ( - "log" + "log" - "github.com/fsnotify/fsnotify" + "github.com/fsnotify/fsnotify" ) func main() { - // Create new watcher. - watcher, err := fsnotify.NewWatcher() - if err != nil { - log.Fatal(err) - } - defer watcher.Close() + watcher, err := fsnotify.NewWatcher() + if err != nil { + log.Fatal(err) + } + defer watcher.Close() - // Start listening for events. - go func() { - for { - select { - case event, ok := <-watcher.Events: - if !ok { - return - } - log.Println("event:", event) - if event.Has(fsnotify.Write) { - log.Println("modified file:", event.Name) - } - case err, ok := <-watcher.Errors: - if !ok { - return - } - log.Println("error:", err) - } - } - }() + done := make(chan bool) + go func() { + for { + select { + case event, ok := <-watcher.Events: + if !ok { + return + } + log.Println("event:", event) + if event.Op&fsnotify.Write == fsnotify.Write { + log.Println("modified file:", event.Name) + } + case err, ok := <-watcher.Errors: + if !ok { + return + } + log.Println("error:", err) + } + } + }() - // Add a path. - err = watcher.Add("/tmp") - if err != nil { - log.Fatal(err) - } - - // Block main goroutine forever. - <-make(chan struct{}) + err = watcher.Add("/tmp/foo") + if err != nil { + log.Fatal(err) + } + <-done } ``` -Some more examples can be found in [cmd/fsnotify](cmd/fsnotify), which can be -run with: +## Contributing - % go run ./cmd/fsnotify +Please refer to [CONTRIBUTING][] before opening an issue or pull request. -Further detailed documentation can be found in godoc: -https://pkg.go.dev/github.com/fsnotify/fsnotify +## Example -FAQ ---- -### Will a file still be watched when it's moved to another directory? -No, not unless you are watching the location it was moved to. +See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go). -### Are subdirectories watched? -No, you must add watches for any directory you want to watch (a recursive -watcher is on the roadmap: [#18]). +## FAQ +**When a file is moved to another directory is it still being watched?** + +No (it shouldn't be, unless you are watching where it was moved to). + +**When I watch a directory, are all subdirectories watched as well?** + +No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]). + +**Do I have to watch the Error and Event channels in a separate goroutine?** + +As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7]) + +**Why am I receiving multiple events for the same file on OS X?** + +Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]). + +**How many files can be watched at once?** + +There are OS-specific limits as to how many watches can be created: +* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error. +* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error. + +**Why don't notifications work with NFS filesystems or filesystem in userspace (FUSE)?** + +fsnotify requires support from underlying OS to work. The current NFS protocol does not provide network level support for file notifications. + +[#62]: https://github.com/howeyc/fsnotify/issues/62 [#18]: https://github.com/fsnotify/fsnotify/issues/18 - -### Do I have to watch the Error and Event channels in a goroutine? -Yes. You can read both channels in the same goroutine using `select` (you don't -need a separate goroutine for both channels; see the example). - -### Why don't notifications work with NFS, SMB, FUSE, /proc, or /sys? -fsnotify requires support from underlying OS to work. The current NFS and SMB -protocols does not provide network level support for file notifications, and -neither do the /proc and /sys virtual filesystems. - -This could be fixed with a polling watcher ([#9]), but it's not yet implemented. - -[#9]: https://github.com/fsnotify/fsnotify/issues/9 - -### Why do I get many Chmod events? -Some programs may generate a lot of attribute changes; for example Spotlight on -macOS, anti-virus programs, backup applications, and some others are known to do -this. As a rule, it's typically best to ignore Chmod events. They're often not -useful, and tend to cause problems. - -Spotlight indexing on macOS can result in multiple events (see [#15]). A -temporary workaround is to add your folder(s) to the *Spotlight Privacy -settings* until we have a native FSEvents implementation (see [#11]). - [#11]: https://github.com/fsnotify/fsnotify/issues/11 -[#15]: https://github.com/fsnotify/fsnotify/issues/15 +[#7]: https://github.com/howeyc/fsnotify/issues/7 -### Watching a file doesn't work well -Watching individual files (rather than directories) is generally not recommended -as many programs (especially editors) update files atomically: it will write to -a temporary file which is then moved to to destination, overwriting the original -(or some variant thereof). The watcher on the original file is now lost, as that -no longer exists. +[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md -The upshot of this is that a power failure or crash won't leave a half-written -file. +## Related Projects -Watch the parent directory and use `Event.Name` to filter out files you're not -interested in. There is an example of this in `cmd/fsnotify/file.go`. +* [notify](https://github.com/rjeczalik/notify) +* [fsevents](https://github.com/fsnotify/fsevents) -Platform-specific notes ------------------------ -### Linux -When a file is removed a REMOVE event won't be emitted until all file -descriptors are closed; it will emit a CHMOD instead: - - fp := os.Open("file") - os.Remove("file") // CHMOD - fp.Close() // REMOVE - -This is the event that inotify sends, so not much can be changed about this. - -The `fs.inotify.max_user_watches` sysctl variable specifies the upper limit for -the number of watches per user, and `fs.inotify.max_user_instances` specifies -the maximum number of inotify instances per user. Every Watcher you create is an -"instance", and every path you add is a "watch". - -These are also exposed in `/proc` as `/proc/sys/fs/inotify/max_user_watches` and -`/proc/sys/fs/inotify/max_user_instances` - -To increase them you can use `sysctl` or write the value to proc file: - - # The default values on Linux 5.18 - sysctl fs.inotify.max_user_watches=124983 - sysctl fs.inotify.max_user_instances=128 - -To make the changes persist on reboot edit `/etc/sysctl.conf` or -`/usr/lib/sysctl.d/50-default.conf` (details differ per Linux distro; check your -distro's documentation): - - fs.inotify.max_user_watches=124983 - fs.inotify.max_user_instances=128 - -Reaching the limit will result in a "no space left on device" or "too many open -files" error. - -### kqueue (macOS, all BSD systems) -kqueue requires opening a file descriptor for every file that's being watched; -so if you're watching a directory with five files then that's six file -descriptors. You will run in to your system's "max open files" limit faster on -these platforms. - -The sysctl variables `kern.maxfiles` and `kern.maxfilesperproc` can be used to -control the maximum number of open files. diff --git a/vendor/github.com/fsnotify/fsnotify/backend_fen.go b/vendor/github.com/fsnotify/fsnotify/backend_fen.go deleted file mode 100644 index 28497f1d..00000000 --- a/vendor/github.com/fsnotify/fsnotify/backend_fen.go +++ /dev/null @@ -1,640 +0,0 @@ -//go:build solaris -// +build solaris - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh - -package fsnotify - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "sync" - - "golang.org/x/sys/unix" -) - -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. - Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. - Errors chan error - - mu sync.Mutex - port *unix.EventPort - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - dirs map[string]struct{} // Explicitly watched directories - watches map[string]struct{} // Explicitly watched non-directories -} - -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) -} - -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { - w := &Watcher{ - Events: make(chan Event, sz), - Errors: make(chan error), - dirs: make(map[string]struct{}), - watches: make(map[string]struct{}), - done: make(chan struct{}), - } - - var err error - w.port, err = unix.NewEventPort() - if err != nil { - return nil, fmt.Errorf("fsnotify.NewWatcher: %w", err) - } - - go w.readEvents() - return w, nil -} - -// sendEvent attempts to send an event to the user, returning true if the event -// was put in the channel successfully and false if the watcher has been closed. -func (w *Watcher) sendEvent(name string, op Op) (sent bool) { - select { - case w.Events <- Event{Name: name, Op: op}: - return true - case <-w.done: - return false - } -} - -// sendError attempts to send an error to the user, returning true if the error -// was put in the channel successfully and false if the watcher has been closed. -func (w *Watcher) sendError(err error) (sent bool) { - select { - case w.Errors <- err: - return true - case <-w.done: - return false - } -} - -func (w *Watcher) isClosed() bool { - select { - case <-w.done: - return true - default: - return false - } -} - -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { - // Take the lock used by associateFile to prevent lingering events from - // being processed after the close - w.mu.Lock() - defer w.mu.Unlock() - if w.isClosed() { - return nil - } - close(w.done) - return w.port.Close() -} - -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } - -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { - if w.isClosed() { - return ErrClosed - } - if w.port.PathIsWatched(name) { - return nil - } - - _ = getOptions(opts...) - - // Currently we resolve symlinks that were explicitly requested to be - // watched. Otherwise we would use LStat here. - stat, err := os.Stat(name) - if err != nil { - return err - } - - // Associate all files in the directory. - if stat.IsDir() { - err := w.handleDirectory(name, stat, true, w.associateFile) - if err != nil { - return err - } - - w.mu.Lock() - w.dirs[name] = struct{}{} - w.mu.Unlock() - return nil - } - - err = w.associateFile(name, stat, true) - if err != nil { - return err - } - - w.mu.Lock() - w.watches[name] = struct{}{} - w.mu.Unlock() - return nil -} - -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { - if w.isClosed() { - return nil - } - if !w.port.PathIsWatched(name) { - return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) - } - - // The user has expressed an intent. Immediately remove this name from - // whichever watch list it might be in. If it's not in there the delete - // doesn't cause harm. - w.mu.Lock() - delete(w.watches, name) - delete(w.dirs, name) - w.mu.Unlock() - - stat, err := os.Stat(name) - if err != nil { - return err - } - - // Remove associations for every file in the directory. - if stat.IsDir() { - err := w.handleDirectory(name, stat, false, w.dissociateFile) - if err != nil { - return err - } - return nil - } - - err = w.port.DissociatePath(name) - if err != nil { - return err - } - - return nil -} - -// readEvents contains the main loop that runs in a goroutine watching for events. -func (w *Watcher) readEvents() { - // If this function returns, the watcher has been closed and we can close - // these channels - defer func() { - close(w.Errors) - close(w.Events) - }() - - pevents := make([]unix.PortEvent, 8) - for { - count, err := w.port.Get(pevents, 1, nil) - if err != nil && err != unix.ETIME { - // Interrupted system call (count should be 0) ignore and continue - if errors.Is(err, unix.EINTR) && count == 0 { - continue - } - // Get failed because we called w.Close() - if errors.Is(err, unix.EBADF) && w.isClosed() { - return - } - // There was an error not caused by calling w.Close() - if !w.sendError(err) { - return - } - } - - p := pevents[:count] - for _, pevent := range p { - if pevent.Source != unix.PORT_SOURCE_FILE { - // Event from unexpected source received; should never happen. - if !w.sendError(errors.New("Event from unexpected source received")) { - return - } - continue - } - - err = w.handleEvent(&pevent) - if err != nil { - if !w.sendError(err) { - return - } - } - } - } -} - -func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error { - files, err := os.ReadDir(path) - if err != nil { - return err - } - - // Handle all children of the directory. - for _, entry := range files { - finfo, err := entry.Info() - if err != nil { - return err - } - err = handler(filepath.Join(path, finfo.Name()), finfo, false) - if err != nil { - return err - } - } - - // And finally handle the directory itself. - return handler(path, stat, follow) -} - -// handleEvent might need to emit more than one fsnotify event if the events -// bitmap matches more than one event type (e.g. the file was both modified and -// had the attributes changed between when the association was created and the -// when event was returned) -func (w *Watcher) handleEvent(event *unix.PortEvent) error { - var ( - events = event.Events - path = event.Path - fmode = event.Cookie.(os.FileMode) - reRegister = true - ) - - w.mu.Lock() - _, watchedDir := w.dirs[path] - _, watchedPath := w.watches[path] - w.mu.Unlock() - isWatched := watchedDir || watchedPath - - if events&unix.FILE_DELETE != 0 { - if !w.sendEvent(path, Remove) { - return nil - } - reRegister = false - } - if events&unix.FILE_RENAME_FROM != 0 { - if !w.sendEvent(path, Rename) { - return nil - } - // Don't keep watching the new file name - reRegister = false - } - if events&unix.FILE_RENAME_TO != 0 { - // We don't report a Rename event for this case, because Rename events - // are interpreted as referring to the _old_ name of the file, and in - // this case the event would refer to the new name of the file. This - // type of rename event is not supported by fsnotify. - - // inotify reports a Remove event in this case, so we simulate this - // here. - if !w.sendEvent(path, Remove) { - return nil - } - // Don't keep watching the file that was removed - reRegister = false - } - - // The file is gone, nothing left to do. - if !reRegister { - if watchedDir { - w.mu.Lock() - delete(w.dirs, path) - w.mu.Unlock() - } - if watchedPath { - w.mu.Lock() - delete(w.watches, path) - w.mu.Unlock() - } - return nil - } - - // If we didn't get a deletion the file still exists and we're going to have - // to watch it again. Let's Stat it now so that we can compare permissions - // and have what we need to continue watching the file - - stat, err := os.Lstat(path) - if err != nil { - // This is unexpected, but we should still emit an event. This happens - // most often on "rm -r" of a subdirectory inside a watched directory We - // get a modify event of something happening inside, but by the time we - // get here, the sudirectory is already gone. Clearly we were watching - // this path but now it is gone. Let's tell the user that it was - // removed. - if !w.sendEvent(path, Remove) { - return nil - } - // Suppress extra write events on removed directories; they are not - // informative and can be confusing. - return nil - } - - // resolve symlinks that were explicitly watched as we would have at Add() - // time. this helps suppress spurious Chmod events on watched symlinks - if isWatched { - stat, err = os.Stat(path) - if err != nil { - // The symlink still exists, but the target is gone. Report the - // Remove similar to above. - if !w.sendEvent(path, Remove) { - return nil - } - // Don't return the error - } - } - - if events&unix.FILE_MODIFIED != 0 { - if fmode.IsDir() { - if watchedDir { - if err := w.updateDirectory(path); err != nil { - return err - } - } else { - if !w.sendEvent(path, Write) { - return nil - } - } - } else { - if !w.sendEvent(path, Write) { - return nil - } - } - } - if events&unix.FILE_ATTRIB != 0 && stat != nil { - // Only send Chmod if perms changed - if stat.Mode().Perm() != fmode.Perm() { - if !w.sendEvent(path, Chmod) { - return nil - } - } - } - - if stat != nil { - // If we get here, it means we've hit an event above that requires us to - // continue watching the file or directory - return w.associateFile(path, stat, isWatched) - } - return nil -} - -func (w *Watcher) updateDirectory(path string) error { - // The directory was modified, so we must find unwatched entities and watch - // them. If something was removed from the directory, nothing will happen, - // as everything else should still be watched. - files, err := os.ReadDir(path) - if err != nil { - return err - } - - for _, entry := range files { - path := filepath.Join(path, entry.Name()) - if w.port.PathIsWatched(path) { - continue - } - - finfo, err := entry.Info() - if err != nil { - return err - } - err = w.associateFile(path, finfo, false) - if err != nil { - if !w.sendError(err) { - return nil - } - } - if !w.sendEvent(path, Create) { - return nil - } - } - return nil -} - -func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) error { - if w.isClosed() { - return ErrClosed - } - // This is primarily protecting the call to AssociatePath but it is - // important and intentional that the call to PathIsWatched is also - // protected by this mutex. Without this mutex, AssociatePath has been seen - // to error out that the path is already associated. - w.mu.Lock() - defer w.mu.Unlock() - - if w.port.PathIsWatched(path) { - // Remove the old association in favor of this one If we get ENOENT, - // then while the x/sys/unix wrapper still thought that this path was - // associated, the underlying event port did not. This call will have - // cleared up that discrepancy. The most likely cause is that the event - // has fired but we haven't processed it yet. - err := w.port.DissociatePath(path) - if err != nil && err != unix.ENOENT { - return err - } - } - // FILE_NOFOLLOW means we watch symlinks themselves rather than their - // targets. - events := unix.FILE_MODIFIED | unix.FILE_ATTRIB | unix.FILE_NOFOLLOW - if follow { - // We *DO* follow symlinks for explicitly watched entries. - events = unix.FILE_MODIFIED | unix.FILE_ATTRIB - } - return w.port.AssociatePath(path, stat, - events, - stat.Mode()) -} - -func (w *Watcher) dissociateFile(path string, stat os.FileInfo, unused bool) error { - if !w.port.PathIsWatched(path) { - return nil - } - return w.port.DissociatePath(path) -} - -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { - if w.isClosed() { - return nil - } - - w.mu.Lock() - defer w.mu.Unlock() - - entries := make([]string, 0, len(w.watches)+len(w.dirs)) - for pathname := range w.dirs { - entries = append(entries, pathname) - } - for pathname := range w.watches { - entries = append(entries, pathname) - } - - return entries -} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go deleted file mode 100644 index 921c1c1e..00000000 --- a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go +++ /dev/null @@ -1,594 +0,0 @@ -//go:build linux && !appengine -// +build linux,!appengine - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh - -package fsnotify - -import ( - "errors" - "fmt" - "io" - "os" - "path/filepath" - "strings" - "sync" - "unsafe" - - "golang.org/x/sys/unix" -) - -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. - Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. - Errors chan error - - // Store fd here as os.File.Read() will no longer return on close after - // calling Fd(). See: https://github.com/golang/go/issues/26439 - fd int - inotifyFile *os.File - watches *watches - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - closeMu sync.Mutex - doneResp chan struct{} // Channel to respond to Close -} - -type ( - watches struct { - mu sync.RWMutex - wd map[uint32]*watch // wd → watch - path map[string]uint32 // pathname → wd - } - watch struct { - wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) - flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) - path string // Watch path. - } -) - -func newWatches() *watches { - return &watches{ - wd: make(map[uint32]*watch), - path: make(map[string]uint32), - } -} - -func (w *watches) len() int { - w.mu.RLock() - defer w.mu.RUnlock() - return len(w.wd) -} - -func (w *watches) add(ww *watch) { - w.mu.Lock() - defer w.mu.Unlock() - w.wd[ww.wd] = ww - w.path[ww.path] = ww.wd -} - -func (w *watches) remove(wd uint32) { - w.mu.Lock() - defer w.mu.Unlock() - delete(w.path, w.wd[wd].path) - delete(w.wd, wd) -} - -func (w *watches) removePath(path string) (uint32, bool) { - w.mu.Lock() - defer w.mu.Unlock() - - wd, ok := w.path[path] - if !ok { - return 0, false - } - - delete(w.path, path) - delete(w.wd, wd) - - return wd, true -} - -func (w *watches) byPath(path string) *watch { - w.mu.RLock() - defer w.mu.RUnlock() - return w.wd[w.path[path]] -} - -func (w *watches) byWd(wd uint32) *watch { - w.mu.RLock() - defer w.mu.RUnlock() - return w.wd[wd] -} - -func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error { - w.mu.Lock() - defer w.mu.Unlock() - - var existing *watch - wd, ok := w.path[path] - if ok { - existing = w.wd[wd] - } - - upd, err := f(existing) - if err != nil { - return err - } - if upd != nil { - w.wd[upd.wd] = upd - w.path[upd.path] = upd.wd - - if upd.wd != wd { - delete(w.wd, wd) - } - } - - return nil -} - -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) -} - -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { - // Need to set nonblocking mode for SetDeadline to work, otherwise blocking - // I/O operations won't terminate on close. - fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK) - if fd == -1 { - return nil, errno - } - - w := &Watcher{ - fd: fd, - inotifyFile: os.NewFile(uintptr(fd), ""), - watches: newWatches(), - Events: make(chan Event, sz), - Errors: make(chan error), - done: make(chan struct{}), - doneResp: make(chan struct{}), - } - - go w.readEvents() - return w, nil -} - -// Returns true if the event was sent, or false if watcher is closed. -func (w *Watcher) sendEvent(e Event) bool { - select { - case w.Events <- e: - return true - case <-w.done: - return false - } -} - -// Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { - select { - case w.Errors <- err: - return true - case <-w.done: - return false - } -} - -func (w *Watcher) isClosed() bool { - select { - case <-w.done: - return true - default: - return false - } -} - -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { - w.closeMu.Lock() - if w.isClosed() { - w.closeMu.Unlock() - return nil - } - close(w.done) - w.closeMu.Unlock() - - // Causes any blocking reads to return with an error, provided the file - // still supports deadline operations. - err := w.inotifyFile.Close() - if err != nil { - return err - } - - // Wait for goroutine to close - <-w.doneResp - - return nil -} - -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } - -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { - if w.isClosed() { - return ErrClosed - } - - name = filepath.Clean(name) - _ = getOptions(opts...) - - var flags uint32 = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | - unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | - unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF - - return w.watches.updatePath(name, func(existing *watch) (*watch, error) { - if existing != nil { - flags |= existing.flags | unix.IN_MASK_ADD - } - - wd, err := unix.InotifyAddWatch(w.fd, name, flags) - if wd == -1 { - return nil, err - } - - if existing == nil { - return &watch{ - wd: uint32(wd), - path: name, - flags: flags, - }, nil - } - - existing.wd = uint32(wd) - existing.flags = flags - return existing, nil - }) -} - -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { - if w.isClosed() { - return nil - } - return w.remove(filepath.Clean(name)) -} - -func (w *Watcher) remove(name string) error { - wd, ok := w.watches.removePath(name) - if !ok { - return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) - } - - success, errno := unix.InotifyRmWatch(w.fd, wd) - if success == -1 { - // TODO: Perhaps it's not helpful to return an error here in every case; - // The only two possible errors are: - // - // - EBADF, which happens when w.fd is not a valid file descriptor - // of any kind. - // - EINVAL, which is when fd is not an inotify descriptor or wd - // is not a valid watch descriptor. Watch descriptors are - // invalidated when they are removed explicitly or implicitly; - // explicitly by inotify_rm_watch, implicitly when the file they - // are watching is deleted. - return errno - } - return nil -} - -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { - if w.isClosed() { - return nil - } - - entries := make([]string, 0, w.watches.len()) - w.watches.mu.RLock() - for pathname := range w.watches.path { - entries = append(entries, pathname) - } - w.watches.mu.RUnlock() - - return entries -} - -// readEvents reads from the inotify file descriptor, converts the -// received events into Event objects and sends them via the Events channel -func (w *Watcher) readEvents() { - defer func() { - close(w.doneResp) - close(w.Errors) - close(w.Events) - }() - - var ( - buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events - errno error // Syscall errno - ) - for { - // See if we have been closed. - if w.isClosed() { - return - } - - n, err := w.inotifyFile.Read(buf[:]) - switch { - case errors.Unwrap(err) == os.ErrClosed: - return - case err != nil: - if !w.sendError(err) { - return - } - continue - } - - if n < unix.SizeofInotifyEvent { - var err error - if n == 0 { - err = io.EOF // If EOF is received. This should really never happen. - } else if n < 0 { - err = errno // If an error occurred while reading. - } else { - err = errors.New("notify: short read in readEvents()") // Read was too short. - } - if !w.sendError(err) { - return - } - continue - } - - var offset uint32 - // We don't know how many events we just read into the buffer - // While the offset points to at least one whole event... - for offset <= uint32(n-unix.SizeofInotifyEvent) { - var ( - // Point "raw" to the event in the buffer - raw = (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) - mask = uint32(raw.Mask) - nameLen = uint32(raw.Len) - ) - - if mask&unix.IN_Q_OVERFLOW != 0 { - if !w.sendError(ErrEventOverflow) { - return - } - } - - // If the event happened to the watched directory or the watched file, the kernel - // doesn't append the filename to the event, but we would like to always fill the - // the "Name" field with a valid filename. We retrieve the path of the watch from - // the "paths" map. - watch := w.watches.byWd(uint32(raw.Wd)) - - // inotify will automatically remove the watch on deletes; just need - // to clean our state here. - if watch != nil && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { - w.watches.remove(watch.wd) - } - // We can't really update the state when a watched path is moved; - // only IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove - // the watch. - if watch != nil && mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF { - err := w.remove(watch.path) - if err != nil && !errors.Is(err, ErrNonExistentWatch) { - if !w.sendError(err) { - return - } - } - } - - var name string - if watch != nil { - name = watch.path - } - if nameLen > 0 { - // Point "bytes" at the first byte of the filename - bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] - // The filename is padded with NULL bytes. TrimRight() gets rid of those. - name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") - } - - event := w.newEvent(name, mask) - - // Send the events that are not ignored on the events channel - if mask&unix.IN_IGNORED == 0 { - if !w.sendEvent(event) { - return - } - } - - // Move to the next event in the buffer - offset += unix.SizeofInotifyEvent + nameLen - } - } -} - -// newEvent returns an platform-independent Event based on an inotify mask. -func (w *Watcher) newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { - e.Op |= Create - } - if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE { - e.Op |= Remove - } - if mask&unix.IN_MODIFY == unix.IN_MODIFY { - e.Op |= Write - } - if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { - e.Op |= Rename - } - if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { - e.Op |= Chmod - } - return e -} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go deleted file mode 100644 index 063a0915..00000000 --- a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go +++ /dev/null @@ -1,782 +0,0 @@ -//go:build freebsd || openbsd || netbsd || dragonfly || darwin -// +build freebsd openbsd netbsd dragonfly darwin - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh - -package fsnotify - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "sync" - - "golang.org/x/sys/unix" -) - -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. - Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. - Errors chan error - - done chan struct{} - kq int // File descriptor (as returned by the kqueue() syscall). - closepipe [2]int // Pipe used for closing. - mu sync.Mutex // Protects access to watcher data - watches map[string]int // Watched file descriptors (key: path). - watchesByDir map[string]map[int]struct{} // Watched file descriptors indexed by the parent directory (key: dirname(path)). - userWatches map[string]struct{} // Watches added with Watcher.Add() - dirFlags map[string]uint32 // Watched directories to fflags used in kqueue. - paths map[int]pathInfo // File descriptors to path names for processing kqueue events. - fileExists map[string]struct{} // Keep track of if we know this file exists (to stop duplicate create events). - isClosed bool // Set to true when Close() is first called -} - -type pathInfo struct { - name string - isDir bool -} - -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) -} - -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { - kq, closepipe, err := newKqueue() - if err != nil { - return nil, err - } - - w := &Watcher{ - kq: kq, - closepipe: closepipe, - watches: make(map[string]int), - watchesByDir: make(map[string]map[int]struct{}), - dirFlags: make(map[string]uint32), - paths: make(map[int]pathInfo), - fileExists: make(map[string]struct{}), - userWatches: make(map[string]struct{}), - Events: make(chan Event, sz), - Errors: make(chan error), - done: make(chan struct{}), - } - - go w.readEvents() - return w, nil -} - -// newKqueue creates a new kernel event queue and returns a descriptor. -// -// This registers a new event on closepipe, which will trigger an event when -// it's closed. This way we can use kevent() without timeout/polling; without -// the closepipe, it would block forever and we wouldn't be able to stop it at -// all. -func newKqueue() (kq int, closepipe [2]int, err error) { - kq, err = unix.Kqueue() - if kq == -1 { - return kq, closepipe, err - } - - // Register the close pipe. - err = unix.Pipe(closepipe[:]) - if err != nil { - unix.Close(kq) - return kq, closepipe, err - } - - // Register changes to listen on the closepipe. - changes := make([]unix.Kevent_t, 1) - // SetKevent converts int to the platform-specific types. - unix.SetKevent(&changes[0], closepipe[0], unix.EVFILT_READ, - unix.EV_ADD|unix.EV_ENABLE|unix.EV_ONESHOT) - - ok, err := unix.Kevent(kq, changes, nil, nil) - if ok == -1 { - unix.Close(kq) - unix.Close(closepipe[0]) - unix.Close(closepipe[1]) - return kq, closepipe, err - } - return kq, closepipe, nil -} - -// Returns true if the event was sent, or false if watcher is closed. -func (w *Watcher) sendEvent(e Event) bool { - select { - case w.Events <- e: - return true - case <-w.done: - return false - } -} - -// Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { - select { - case w.Errors <- err: - return true - case <-w.done: - return false - } -} - -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return nil - } - w.isClosed = true - - // copy paths to remove while locked - pathsToRemove := make([]string, 0, len(w.watches)) - for name := range w.watches { - pathsToRemove = append(pathsToRemove, name) - } - w.mu.Unlock() // Unlock before calling Remove, which also locks - for _, name := range pathsToRemove { - w.Remove(name) - } - - // Send "quit" message to the reader goroutine. - unix.Close(w.closepipe[1]) - close(w.done) - - return nil -} - -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } - -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { - _ = getOptions(opts...) - - w.mu.Lock() - w.userWatches[name] = struct{}{} - w.mu.Unlock() - _, err := w.addWatch(name, noteAllEvents) - return err -} - -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { - return w.remove(name, true) -} - -func (w *Watcher) remove(name string, unwatchFiles bool) error { - name = filepath.Clean(name) - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return nil - } - watchfd, ok := w.watches[name] - w.mu.Unlock() - if !ok { - return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) - } - - err := w.register([]int{watchfd}, unix.EV_DELETE, 0) - if err != nil { - return err - } - - unix.Close(watchfd) - - w.mu.Lock() - isDir := w.paths[watchfd].isDir - delete(w.watches, name) - delete(w.userWatches, name) - - parentName := filepath.Dir(name) - delete(w.watchesByDir[parentName], watchfd) - - if len(w.watchesByDir[parentName]) == 0 { - delete(w.watchesByDir, parentName) - } - - delete(w.paths, watchfd) - delete(w.dirFlags, name) - delete(w.fileExists, name) - w.mu.Unlock() - - // Find all watched paths that are in this directory that are not external. - if unwatchFiles && isDir { - var pathsToRemove []string - w.mu.Lock() - for fd := range w.watchesByDir[name] { - path := w.paths[fd] - if _, ok := w.userWatches[path.name]; !ok { - pathsToRemove = append(pathsToRemove, path.name) - } - } - w.mu.Unlock() - for _, name := range pathsToRemove { - // Since these are internal, not much sense in propagating error to - // the user, as that will just confuse them with an error about a - // path they did not explicitly watch themselves. - w.Remove(name) - } - } - return nil -} - -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { - w.mu.Lock() - defer w.mu.Unlock() - if w.isClosed { - return nil - } - - entries := make([]string, 0, len(w.userWatches)) - for pathname := range w.userWatches { - entries = append(entries, pathname) - } - - return entries -} - -// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) -const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME - -// addWatch adds name to the watched file set; the flags are interpreted as -// described in kevent(2). -// -// Returns the real path to the file which was added, with symlinks resolved. -func (w *Watcher) addWatch(name string, flags uint32) (string, error) { - var isDir bool - name = filepath.Clean(name) - - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return "", ErrClosed - } - watchfd, alreadyWatching := w.watches[name] - // We already have a watch, but we can still override flags. - if alreadyWatching { - isDir = w.paths[watchfd].isDir - } - w.mu.Unlock() - - if !alreadyWatching { - fi, err := os.Lstat(name) - if err != nil { - return "", err - } - - // Don't watch sockets or named pipes - if (fi.Mode()&os.ModeSocket == os.ModeSocket) || (fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe) { - return "", nil - } - - // Follow Symlinks. - if fi.Mode()&os.ModeSymlink == os.ModeSymlink { - link, err := os.Readlink(name) - if err != nil { - // Return nil because Linux can add unresolvable symlinks to the - // watch list without problems, so maintain consistency with - // that. There will be no file events for broken symlinks. - // TODO: more specific check; returns os.PathError; ENOENT? - return "", nil - } - - w.mu.Lock() - _, alreadyWatching = w.watches[link] - w.mu.Unlock() - - if alreadyWatching { - // Add to watches so we don't get spurious Create events later - // on when we diff the directories. - w.watches[name] = 0 - w.fileExists[name] = struct{}{} - return link, nil - } - - name = link - fi, err = os.Lstat(name) - if err != nil { - return "", nil - } - } - - // Retry on EINTR; open() can return EINTR in practice on macOS. - // See #354, and Go issues 11180 and 39237. - for { - watchfd, err = unix.Open(name, openMode, 0) - if err == nil { - break - } - if errors.Is(err, unix.EINTR) { - continue - } - - return "", err - } - - isDir = fi.IsDir() - } - - err := w.register([]int{watchfd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags) - if err != nil { - unix.Close(watchfd) - return "", err - } - - if !alreadyWatching { - w.mu.Lock() - parentName := filepath.Dir(name) - w.watches[name] = watchfd - - watchesByDir, ok := w.watchesByDir[parentName] - if !ok { - watchesByDir = make(map[int]struct{}, 1) - w.watchesByDir[parentName] = watchesByDir - } - watchesByDir[watchfd] = struct{}{} - w.paths[watchfd] = pathInfo{name: name, isDir: isDir} - w.mu.Unlock() - } - - if isDir { - // Watch the directory if it has not been watched before, or if it was - // watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) - w.mu.Lock() - - watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && - (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) - // Store flags so this watch can be updated later - w.dirFlags[name] = flags - w.mu.Unlock() - - if watchDir { - if err := w.watchDirectoryFiles(name); err != nil { - return "", err - } - } - } - return name, nil -} - -// readEvents reads from kqueue and converts the received kevents into -// Event values that it sends down the Events channel. -func (w *Watcher) readEvents() { - defer func() { - close(w.Events) - close(w.Errors) - _ = unix.Close(w.kq) - unix.Close(w.closepipe[0]) - }() - - eventBuffer := make([]unix.Kevent_t, 10) - for closed := false; !closed; { - kevents, err := w.read(eventBuffer) - // EINTR is okay, the syscall was interrupted before timeout expired. - if err != nil && err != unix.EINTR { - if !w.sendError(fmt.Errorf("fsnotify.readEvents: %w", err)) { - closed = true - } - continue - } - - // Flush the events we received to the Events channel - for _, kevent := range kevents { - var ( - watchfd = int(kevent.Ident) - mask = uint32(kevent.Fflags) - ) - - // Shut down the loop when the pipe is closed, but only after all - // other events have been processed. - if watchfd == w.closepipe[0] { - closed = true - continue - } - - w.mu.Lock() - path := w.paths[watchfd] - w.mu.Unlock() - - event := w.newEvent(path.name, mask) - - if event.Has(Rename) || event.Has(Remove) { - w.remove(event.Name, false) - w.mu.Lock() - delete(w.fileExists, event.Name) - w.mu.Unlock() - } - - if path.isDir && event.Has(Write) && !event.Has(Remove) { - w.sendDirectoryChangeEvents(event.Name) - } else { - if !w.sendEvent(event) { - closed = true - continue - } - } - - if event.Has(Remove) { - // Look for a file that may have overwritten this; for example, - // mv f1 f2 will delete f2, then create f2. - if path.isDir { - fileDir := filepath.Clean(event.Name) - w.mu.Lock() - _, found := w.watches[fileDir] - w.mu.Unlock() - if found { - err := w.sendDirectoryChangeEvents(fileDir) - if err != nil { - if !w.sendError(err) { - closed = true - } - } - } - } else { - filePath := filepath.Clean(event.Name) - if fi, err := os.Lstat(filePath); err == nil { - err := w.sendFileCreatedEventIfNew(filePath, fi) - if err != nil { - if !w.sendError(err) { - closed = true - } - } - } - } - } - } - } -} - -// newEvent returns an platform-independent Event based on kqueue Fflags. -func (w *Watcher) newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { - e.Op |= Remove - } - if mask&unix.NOTE_WRITE == unix.NOTE_WRITE { - e.Op |= Write - } - if mask&unix.NOTE_RENAME == unix.NOTE_RENAME { - e.Op |= Rename - } - if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { - e.Op |= Chmod - } - // No point sending a write and delete event at the same time: if it's gone, - // then it's gone. - if e.Op.Has(Write) && e.Op.Has(Remove) { - e.Op &^= Write - } - return e -} - -// watchDirectoryFiles to mimic inotify when adding a watch on a directory -func (w *Watcher) watchDirectoryFiles(dirPath string) error { - // Get all files - files, err := os.ReadDir(dirPath) - if err != nil { - return err - } - - for _, f := range files { - path := filepath.Join(dirPath, f.Name()) - - fi, err := f.Info() - if err != nil { - return fmt.Errorf("%q: %w", path, err) - } - - cleanPath, err := w.internalWatch(path, fi) - if err != nil { - // No permission to read the file; that's not a problem: just skip. - // But do add it to w.fileExists to prevent it from being picked up - // as a "new" file later (it still shows up in the directory - // listing). - switch { - case errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM): - cleanPath = filepath.Clean(path) - default: - return fmt.Errorf("%q: %w", path, err) - } - } - - w.mu.Lock() - w.fileExists[cleanPath] = struct{}{} - w.mu.Unlock() - } - - return nil -} - -// Search the directory for new files and send an event for them. -// -// This functionality is to have the BSD watcher match the inotify, which sends -// a create event for files created in a watched directory. -func (w *Watcher) sendDirectoryChangeEvents(dir string) error { - files, err := os.ReadDir(dir) - if err != nil { - // Directory no longer exists: we can ignore this safely. kqueue will - // still give us the correct events. - if errors.Is(err, os.ErrNotExist) { - return nil - } - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) - } - - for _, f := range files { - fi, err := f.Info() - if err != nil { - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) - } - - err = w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi) - if err != nil { - // Don't need to send an error if this file isn't readable. - if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) { - return nil - } - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) - } - } - return nil -} - -// sendFileCreatedEvent sends a create event if the file isn't already being tracked. -func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fi os.FileInfo) (err error) { - w.mu.Lock() - _, doesExist := w.fileExists[filePath] - w.mu.Unlock() - if !doesExist { - if !w.sendEvent(Event{Name: filePath, Op: Create}) { - return - } - } - - // like watchDirectoryFiles (but without doing another ReadDir) - filePath, err = w.internalWatch(filePath, fi) - if err != nil { - return err - } - - w.mu.Lock() - w.fileExists[filePath] = struct{}{} - w.mu.Unlock() - - return nil -} - -func (w *Watcher) internalWatch(name string, fi os.FileInfo) (string, error) { - if fi.IsDir() { - // mimic Linux providing delete events for subdirectories, but preserve - // the flags used if currently watching subdirectory - w.mu.Lock() - flags := w.dirFlags[name] - w.mu.Unlock() - - flags |= unix.NOTE_DELETE | unix.NOTE_RENAME - return w.addWatch(name, flags) - } - - // watch file to mimic Linux inotify - return w.addWatch(name, noteAllEvents) -} - -// Register events with the queue. -func (w *Watcher) register(fds []int, flags int, fflags uint32) error { - changes := make([]unix.Kevent_t, len(fds)) - for i, fd := range fds { - // SetKevent converts int to the platform-specific types. - unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags) - changes[i].Fflags = fflags - } - - // Register the events. - success, err := unix.Kevent(w.kq, changes, nil, nil) - if success == -1 { - return err - } - return nil -} - -// read retrieves pending events, or waits until an event occurs. -func (w *Watcher) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { - n, err := unix.Kevent(w.kq, nil, events, nil) - if err != nil { - return nil, err - } - return events[0:n], nil -} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_other.go b/vendor/github.com/fsnotify/fsnotify/backend_other.go deleted file mode 100644 index d34a23c0..00000000 --- a/vendor/github.com/fsnotify/fsnotify/backend_other.go +++ /dev/null @@ -1,205 +0,0 @@ -//go:build appengine || (!darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows) -// +build appengine !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh - -package fsnotify - -import "errors" - -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. - Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. - Errors chan error -} - -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return nil, errors.New("fsnotify not supported on the current platform") -} - -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { return NewWatcher() } - -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { return nil } - -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { return nil } - -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return nil } - -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { return nil } - -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { return nil } diff --git a/vendor/github.com/fsnotify/fsnotify/backend_windows.go b/vendor/github.com/fsnotify/fsnotify/backend_windows.go deleted file mode 100644 index 9bc91e5d..00000000 --- a/vendor/github.com/fsnotify/fsnotify/backend_windows.go +++ /dev/null @@ -1,827 +0,0 @@ -//go:build windows -// +build windows - -// Windows backend based on ReadDirectoryChangesW() -// -// https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-readdirectorychangesw -// -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh - -package fsnotify - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "reflect" - "runtime" - "strings" - "sync" - "unsafe" - - "golang.org/x/sys/windows" -) - -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. - Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. - Errors chan error - - port windows.Handle // Handle to completion port - input chan *input // Inputs to the reader are sent on this channel - quit chan chan<- error - - mu sync.Mutex // Protects access to watches, closed - watches watchMap // Map of watches (key: i-number) - closed bool // Set to true when Close() is first called -} - -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(50) -} - -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { - port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0) - if err != nil { - return nil, os.NewSyscallError("CreateIoCompletionPort", err) - } - w := &Watcher{ - port: port, - watches: make(watchMap), - input: make(chan *input, 1), - Events: make(chan Event, sz), - Errors: make(chan error), - quit: make(chan chan<- error, 1), - } - go w.readEvents() - return w, nil -} - -func (w *Watcher) isClosed() bool { - w.mu.Lock() - defer w.mu.Unlock() - return w.closed -} - -func (w *Watcher) sendEvent(name string, mask uint64) bool { - if mask == 0 { - return false - } - - event := w.newEvent(name, uint32(mask)) - select { - case ch := <-w.quit: - w.quit <- ch - case w.Events <- event: - } - return true -} - -// Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { - select { - case w.Errors <- err: - return true - case <-w.quit: - } - return false -} - -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { - if w.isClosed() { - return nil - } - - w.mu.Lock() - w.closed = true - w.mu.Unlock() - - // Send "quit" message to the reader goroutine - ch := make(chan error) - w.quit <- ch - if err := w.wakeupReader(); err != nil { - return err - } - return <-ch -} - -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } - -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { - if w.isClosed() { - return ErrClosed - } - - with := getOptions(opts...) - if with.bufsize < 4096 { - return fmt.Errorf("fsnotify.WithBufferSize: buffer size cannot be smaller than 4096 bytes") - } - - in := &input{ - op: opAddWatch, - path: filepath.Clean(name), - flags: sysFSALLEVENTS, - reply: make(chan error), - bufsize: with.bufsize, - } - w.input <- in - if err := w.wakeupReader(); err != nil { - return err - } - return <-in.reply -} - -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { - if w.isClosed() { - return nil - } - - in := &input{ - op: opRemoveWatch, - path: filepath.Clean(name), - reply: make(chan error), - } - w.input <- in - if err := w.wakeupReader(); err != nil { - return err - } - return <-in.reply -} - -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { - if w.isClosed() { - return nil - } - - w.mu.Lock() - defer w.mu.Unlock() - - entries := make([]string, 0, len(w.watches)) - for _, entry := range w.watches { - for _, watchEntry := range entry { - entries = append(entries, watchEntry.path) - } - } - - return entries -} - -// These options are from the old golang.org/x/exp/winfsnotify, where you could -// add various options to the watch. This has long since been removed. -// -// The "sys" in the name is misleading as they're not part of any "system". -// -// This should all be removed at some point, and just use windows.FILE_NOTIFY_* -const ( - sysFSALLEVENTS = 0xfff - sysFSCREATE = 0x100 - sysFSDELETE = 0x200 - sysFSDELETESELF = 0x400 - sysFSMODIFY = 0x2 - sysFSMOVE = 0xc0 - sysFSMOVEDFROM = 0x40 - sysFSMOVEDTO = 0x80 - sysFSMOVESELF = 0x800 - sysFSIGNORED = 0x8000 -) - -func (w *Watcher) newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { - e.Op |= Create - } - if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF { - e.Op |= Remove - } - if mask&sysFSMODIFY == sysFSMODIFY { - e.Op |= Write - } - if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { - e.Op |= Rename - } - return e -} - -const ( - opAddWatch = iota - opRemoveWatch -) - -const ( - provisional uint64 = 1 << (32 + iota) -) - -type input struct { - op int - path string - flags uint32 - bufsize int - reply chan error -} - -type inode struct { - handle windows.Handle - volume uint32 - index uint64 -} - -type watch struct { - ov windows.Overlapped - ino *inode // i-number - recurse bool // Recursive watch? - path string // Directory path - mask uint64 // Directory itself is being watched with these notify flags - names map[string]uint64 // Map of names being watched and their notify flags - rename string // Remembers the old name while renaming a file - buf []byte // buffer, allocated later -} - -type ( - indexMap map[uint64]*watch - watchMap map[uint32]indexMap -) - -func (w *Watcher) wakeupReader() error { - err := windows.PostQueuedCompletionStatus(w.port, 0, 0, nil) - if err != nil { - return os.NewSyscallError("PostQueuedCompletionStatus", err) - } - return nil -} - -func (w *Watcher) getDir(pathname string) (dir string, err error) { - attr, err := windows.GetFileAttributes(windows.StringToUTF16Ptr(pathname)) - if err != nil { - return "", os.NewSyscallError("GetFileAttributes", err) - } - if attr&windows.FILE_ATTRIBUTE_DIRECTORY != 0 { - dir = pathname - } else { - dir, _ = filepath.Split(pathname) - dir = filepath.Clean(dir) - } - return -} - -func (w *Watcher) getIno(path string) (ino *inode, err error) { - h, err := windows.CreateFile(windows.StringToUTF16Ptr(path), - windows.FILE_LIST_DIRECTORY, - windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE, - nil, windows.OPEN_EXISTING, - windows.FILE_FLAG_BACKUP_SEMANTICS|windows.FILE_FLAG_OVERLAPPED, 0) - if err != nil { - return nil, os.NewSyscallError("CreateFile", err) - } - - var fi windows.ByHandleFileInformation - err = windows.GetFileInformationByHandle(h, &fi) - if err != nil { - windows.CloseHandle(h) - return nil, os.NewSyscallError("GetFileInformationByHandle", err) - } - ino = &inode{ - handle: h, - volume: fi.VolumeSerialNumber, - index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), - } - return ino, nil -} - -// Must run within the I/O thread. -func (m watchMap) get(ino *inode) *watch { - if i := m[ino.volume]; i != nil { - return i[ino.index] - } - return nil -} - -// Must run within the I/O thread. -func (m watchMap) set(ino *inode, watch *watch) { - i := m[ino.volume] - if i == nil { - i = make(indexMap) - m[ino.volume] = i - } - i[ino.index] = watch -} - -// Must run within the I/O thread. -func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error { - //pathname, recurse := recursivePath(pathname) - recurse := false - - dir, err := w.getDir(pathname) - if err != nil { - return err - } - - ino, err := w.getIno(dir) - if err != nil { - return err - } - w.mu.Lock() - watchEntry := w.watches.get(ino) - w.mu.Unlock() - if watchEntry == nil { - _, err := windows.CreateIoCompletionPort(ino.handle, w.port, 0, 0) - if err != nil { - windows.CloseHandle(ino.handle) - return os.NewSyscallError("CreateIoCompletionPort", err) - } - watchEntry = &watch{ - ino: ino, - path: dir, - names: make(map[string]uint64), - recurse: recurse, - buf: make([]byte, bufsize), - } - w.mu.Lock() - w.watches.set(ino, watchEntry) - w.mu.Unlock() - flags |= provisional - } else { - windows.CloseHandle(ino.handle) - } - if pathname == dir { - watchEntry.mask |= flags - } else { - watchEntry.names[filepath.Base(pathname)] |= flags - } - - err = w.startRead(watchEntry) - if err != nil { - return err - } - - if pathname == dir { - watchEntry.mask &= ^provisional - } else { - watchEntry.names[filepath.Base(pathname)] &= ^provisional - } - return nil -} - -// Must run within the I/O thread. -func (w *Watcher) remWatch(pathname string) error { - pathname, recurse := recursivePath(pathname) - - dir, err := w.getDir(pathname) - if err != nil { - return err - } - ino, err := w.getIno(dir) - if err != nil { - return err - } - - w.mu.Lock() - watch := w.watches.get(ino) - w.mu.Unlock() - - if recurse && !watch.recurse { - return fmt.Errorf("can't use \\... with non-recursive watch %q", pathname) - } - - err = windows.CloseHandle(ino.handle) - if err != nil { - w.sendError(os.NewSyscallError("CloseHandle", err)) - } - if watch == nil { - return fmt.Errorf("%w: %s", ErrNonExistentWatch, pathname) - } - if pathname == dir { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) - watch.mask = 0 - } else { - name := filepath.Base(pathname) - w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) - delete(watch.names, name) - } - - return w.startRead(watch) -} - -// Must run within the I/O thread. -func (w *Watcher) deleteWatch(watch *watch) { - for name, mask := range watch.names { - if mask&provisional == 0 { - w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) - } - delete(watch.names, name) - } - if watch.mask != 0 { - if watch.mask&provisional == 0 { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) - } - watch.mask = 0 - } -} - -// Must run within the I/O thread. -func (w *Watcher) startRead(watch *watch) error { - err := windows.CancelIo(watch.ino.handle) - if err != nil { - w.sendError(os.NewSyscallError("CancelIo", err)) - w.deleteWatch(watch) - } - mask := w.toWindowsFlags(watch.mask) - for _, m := range watch.names { - mask |= w.toWindowsFlags(m) - } - if mask == 0 { - err := windows.CloseHandle(watch.ino.handle) - if err != nil { - w.sendError(os.NewSyscallError("CloseHandle", err)) - } - w.mu.Lock() - delete(w.watches[watch.ino.volume], watch.ino.index) - w.mu.Unlock() - return nil - } - - // We need to pass the array, rather than the slice. - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&watch.buf)) - rdErr := windows.ReadDirectoryChanges(watch.ino.handle, - (*byte)(unsafe.Pointer(hdr.Data)), uint32(hdr.Len), - watch.recurse, mask, nil, &watch.ov, 0) - if rdErr != nil { - err := os.NewSyscallError("ReadDirectoryChanges", rdErr) - if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { - // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) - err = nil - } - w.deleteWatch(watch) - w.startRead(watch) - return err - } - return nil -} - -// readEvents reads from the I/O completion port, converts the -// received events into Event objects and sends them via the Events channel. -// Entry point to the I/O thread. -func (w *Watcher) readEvents() { - var ( - n uint32 - key uintptr - ov *windows.Overlapped - ) - runtime.LockOSThread() - - for { - // This error is handled after the watch == nil check below. - qErr := windows.GetQueuedCompletionStatus(w.port, &n, &key, &ov, windows.INFINITE) - - watch := (*watch)(unsafe.Pointer(ov)) - if watch == nil { - select { - case ch := <-w.quit: - w.mu.Lock() - var indexes []indexMap - for _, index := range w.watches { - indexes = append(indexes, index) - } - w.mu.Unlock() - for _, index := range indexes { - for _, watch := range index { - w.deleteWatch(watch) - w.startRead(watch) - } - } - - err := windows.CloseHandle(w.port) - if err != nil { - err = os.NewSyscallError("CloseHandle", err) - } - close(w.Events) - close(w.Errors) - ch <- err - return - case in := <-w.input: - switch in.op { - case opAddWatch: - in.reply <- w.addWatch(in.path, uint64(in.flags), in.bufsize) - case opRemoveWatch: - in.reply <- w.remWatch(in.path) - } - default: - } - continue - } - - switch qErr { - case nil: - // No error - case windows.ERROR_MORE_DATA: - if watch == nil { - w.sendError(errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")) - } else { - // The i/o succeeded but the buffer is full. - // In theory we should be building up a full packet. - // In practice we can get away with just carrying on. - n = uint32(unsafe.Sizeof(watch.buf)) - } - case windows.ERROR_ACCESS_DENIED: - // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) - w.deleteWatch(watch) - w.startRead(watch) - continue - case windows.ERROR_OPERATION_ABORTED: - // CancelIo was called on this handle - continue - default: - w.sendError(os.NewSyscallError("GetQueuedCompletionPort", qErr)) - continue - } - - var offset uint32 - for { - if n == 0 { - w.sendError(ErrEventOverflow) - break - } - - // Point "raw" to the event in the buffer - raw := (*windows.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) - - // Create a buf that is the size of the path name - size := int(raw.FileNameLength / 2) - var buf []uint16 - // TODO: Use unsafe.Slice in Go 1.17; https://stackoverflow.com/questions/51187973 - sh := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - sh.Data = uintptr(unsafe.Pointer(&raw.FileName)) - sh.Len = size - sh.Cap = size - name := windows.UTF16ToString(buf) - fullname := filepath.Join(watch.path, name) - - var mask uint64 - switch raw.Action { - case windows.FILE_ACTION_REMOVED: - mask = sysFSDELETESELF - case windows.FILE_ACTION_MODIFIED: - mask = sysFSMODIFY - case windows.FILE_ACTION_RENAMED_OLD_NAME: - watch.rename = name - case windows.FILE_ACTION_RENAMED_NEW_NAME: - // Update saved path of all sub-watches. - old := filepath.Join(watch.path, watch.rename) - w.mu.Lock() - for _, watchMap := range w.watches { - for _, ww := range watchMap { - if strings.HasPrefix(ww.path, old) { - ww.path = filepath.Join(fullname, strings.TrimPrefix(ww.path, old)) - } - } - } - w.mu.Unlock() - - if watch.names[watch.rename] != 0 { - watch.names[name] |= watch.names[watch.rename] - delete(watch.names, watch.rename) - mask = sysFSMOVESELF - } - } - - sendNameEvent := func() { - w.sendEvent(fullname, watch.names[name]&mask) - } - if raw.Action != windows.FILE_ACTION_RENAMED_NEW_NAME { - sendNameEvent() - } - if raw.Action == windows.FILE_ACTION_REMOVED { - w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) - delete(watch.names, name) - } - - w.sendEvent(fullname, watch.mask&w.toFSnotifyFlags(raw.Action)) - if raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { - fullname = filepath.Join(watch.path, watch.rename) - sendNameEvent() - } - - // Move to the next event in the buffer - if raw.NextEntryOffset == 0 { - break - } - offset += raw.NextEntryOffset - - // Error! - if offset >= n { - //lint:ignore ST1005 Windows should be capitalized - w.sendError(errors.New( - "Windows system assumed buffer larger than it is, events have likely been missed")) - break - } - } - - if err := w.startRead(watch); err != nil { - w.sendError(err) - } - } -} - -func (w *Watcher) toWindowsFlags(mask uint64) uint32 { - var m uint32 - if mask&sysFSMODIFY != 0 { - m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE - } - if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { - m |= windows.FILE_NOTIFY_CHANGE_FILE_NAME | windows.FILE_NOTIFY_CHANGE_DIR_NAME - } - return m -} - -func (w *Watcher) toFSnotifyFlags(action uint32) uint64 { - switch action { - case windows.FILE_ACTION_ADDED: - return sysFSCREATE - case windows.FILE_ACTION_REMOVED: - return sysFSDELETE - case windows.FILE_ACTION_MODIFIED: - return sysFSMODIFY - case windows.FILE_ACTION_RENAMED_OLD_NAME: - return sysFSMOVEDFROM - case windows.FILE_ACTION_RENAMED_NEW_NAME: - return sysFSMOVEDTO - } - return 0 -} diff --git a/vendor/github.com/fsnotify/fsnotify/fen.go b/vendor/github.com/fsnotify/fsnotify/fen.go new file mode 100644 index 00000000..ced39cb8 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/fen.go @@ -0,0 +1,37 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package fsnotify + +import ( + "errors" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + return nil, errors.New("FEN based watcher not yet supported for fsnotify\n") +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + return nil +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + return nil +} diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go index 24c99cc4..89cab046 100644 --- a/vendor/github.com/fsnotify/fsnotify/fsnotify.go +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -1,146 +1,68 @@ -// Package fsnotify provides a cross-platform interface for file system -// notifications. -// -// Currently supported systems: -// -// Linux 2.6.32+ via inotify -// BSD, macOS via kqueue -// Windows via ReadDirectoryChangesW -// illumos via FEN +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !plan9 + +// Package fsnotify provides a platform-independent interface for file system notifications. package fsnotify import ( + "bytes" "errors" "fmt" - "path/filepath" - "strings" ) -// Event represents a file system notification. +// Event represents a single file system notification. type Event struct { - // Path to the file or directory. - // - // Paths are relative to the input; for example with Add("dir") the Name - // will be set to "dir/file" if you create that file, but if you use - // Add("/path/to/dir") it will be "/path/to/dir/file". - Name string - - // File operation that triggered the event. - // - // This is a bitmask and some systems may send multiple operations at once. - // Use the Event.Has() method instead of comparing with ==. - Op Op + Name string // Relative path to the file or directory. + Op Op // File operation that triggered the event. } // Op describes a set of file operations. type Op uint32 -// The operations fsnotify can trigger; see the documentation on [Watcher] for a -// full description, and check them with [Event.Has]. +// These are the generalized file operations that can trigger a notification. const ( - // A new pathname was created. Create Op = 1 << iota - - // The pathname was written to; this does *not* mean the write has finished, - // and a write can be followed by more writes. Write - - // The path was removed; any watches on it will be removed. Some "remove" - // operations may trigger a Rename if the file is actually moved (for - // example "remove to trash" is often a rename). Remove - - // The path was renamed to something else; any watched on it will be - // removed. Rename - - // File attributes were changed. - // - // It's generally not recommended to take action on this event, as it may - // get triggered very frequently by some software. For example, Spotlight - // indexing on macOS, anti-virus software, backup software, etc. Chmod ) -// Common errors that can be reported. -var ( - ErrNonExistentWatch = errors.New("fsnotify: can't remove non-existent watch") - ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow") - ErrClosed = errors.New("fsnotify: watcher already closed") -) +func (op Op) String() string { + // Use a buffer for efficient string concatenation + var buffer bytes.Buffer -func (o Op) String() string { - var b strings.Builder - if o.Has(Create) { - b.WriteString("|CREATE") + if op&Create == Create { + buffer.WriteString("|CREATE") } - if o.Has(Remove) { - b.WriteString("|REMOVE") + if op&Remove == Remove { + buffer.WriteString("|REMOVE") } - if o.Has(Write) { - b.WriteString("|WRITE") + if op&Write == Write { + buffer.WriteString("|WRITE") } - if o.Has(Rename) { - b.WriteString("|RENAME") + if op&Rename == Rename { + buffer.WriteString("|RENAME") } - if o.Has(Chmod) { - b.WriteString("|CHMOD") + if op&Chmod == Chmod { + buffer.WriteString("|CHMOD") } - if b.Len() == 0 { - return "[no events]" + if buffer.Len() == 0 { + return "" } - return b.String()[1:] + return buffer.String()[1:] // Strip leading pipe } -// Has reports if this operation has the given operation. -func (o Op) Has(h Op) bool { return o&h != 0 } - -// Has reports if this event has the given operation. -func (e Event) Has(op Op) bool { return e.Op.Has(op) } - -// String returns a string representation of the event with their path. +// String returns a string representation of the event in the form +// "file: REMOVE|WRITE|..." func (e Event) String() string { - return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name) + return fmt.Sprintf("%q: %s", e.Name, e.Op.String()) } -type ( - addOpt func(opt *withOpts) - withOpts struct { - bufsize int - } +// Common errors that can be reported by a watcher +var ( + ErrEventOverflow = errors.New("fsnotify queue overflow") ) - -var defaultOpts = withOpts{ - bufsize: 65536, // 64K -} - -func getOptions(opts ...addOpt) withOpts { - with := defaultOpts - for _, o := range opts { - o(&with) - } - return with -} - -// WithBufferSize sets the [ReadDirectoryChangesW] buffer size. -// -// This only has effect on Windows systems, and is a no-op for other backends. -// -// The default value is 64K (65536 bytes) which is the highest value that works -// on all filesystems and should be enough for most applications, but if you -// have a large burst of events it may not be enough. You can increase it if -// you're hitting "queue or buffer overflow" errors ([ErrEventOverflow]). -// -// [ReadDirectoryChangesW]: https://learn.microsoft.com/en-gb/windows/win32/api/winbase/nf-winbase-readdirectorychangesw -func WithBufferSize(bytes int) addOpt { - return func(opt *withOpts) { opt.bufsize = bytes } -} - -// Check if this path is recursive (ends with "/..." or "\..."), and return the -// path with the /... stripped. -func recursivePath(path string) (string, bool) { - if filepath.Base(path) == "..." { - return filepath.Dir(path), true - } - return path, false -} diff --git a/vendor/github.com/fsnotify/fsnotify/inotify.go b/vendor/github.com/fsnotify/fsnotify/inotify.go new file mode 100644 index 00000000..d9fd1b88 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/inotify.go @@ -0,0 +1,337 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package fsnotify + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + "unsafe" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + mu sync.Mutex // Map access + fd int + poller *fdPoller + watches map[string]*watch // Map of inotify watches (key: path) + paths map[int]string // Map of watched paths (key: watch descriptor) + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + doneResp chan struct{} // Channel to respond to Close +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + // Create inotify fd + fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC) + if fd == -1 { + return nil, errno + } + // Create epoll + poller, err := newFdPoller(fd) + if err != nil { + unix.Close(fd) + return nil, err + } + w := &Watcher{ + fd: fd, + poller: poller, + watches: make(map[string]*watch), + paths: make(map[int]string), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + doneResp: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +func (w *Watcher) isClosed() bool { + select { + case <-w.done: + return true + default: + return false + } +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + if w.isClosed() { + return nil + } + + // Send 'close' signal to goroutine, and set the Watcher to closed. + close(w.done) + + // Wake up goroutine + w.poller.wake() + + // Wait for goroutine to close + <-w.doneResp + + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + name = filepath.Clean(name) + if w.isClosed() { + return errors.New("inotify instance already closed") + } + + const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | + unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | + unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF + + var flags uint32 = agnosticEvents + + w.mu.Lock() + defer w.mu.Unlock() + watchEntry := w.watches[name] + if watchEntry != nil { + flags |= watchEntry.flags | unix.IN_MASK_ADD + } + wd, errno := unix.InotifyAddWatch(w.fd, name, flags) + if wd == -1 { + return errno + } + + if watchEntry == nil { + w.watches[name] = &watch{wd: uint32(wd), flags: flags} + w.paths[wd] = name + } else { + watchEntry.wd = uint32(wd) + watchEntry.flags = flags + } + + return nil +} + +// Remove stops watching the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + + // Fetch the watch. + w.mu.Lock() + defer w.mu.Unlock() + watch, ok := w.watches[name] + + // Remove it from inotify. + if !ok { + return fmt.Errorf("can't remove non-existent inotify watch for: %s", name) + } + + // We successfully removed the watch if InotifyRmWatch doesn't return an + // error, we need to clean up our internal state to ensure it matches + // inotify's kernel state. + delete(w.paths, int(watch.wd)) + delete(w.watches, name) + + // inotify_rm_watch will return EINVAL if the file has been deleted; + // the inotify will already have been removed. + // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously + // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE + // so that EINVAL means that the wd is being rm_watch()ed or its file removed + // by another thread and we have not received IN_IGNORE event. + success, errno := unix.InotifyRmWatch(w.fd, watch.wd) + if success == -1 { + // TODO: Perhaps it's not helpful to return an error here in every case. + // the only two possible errors are: + // EBADF, which happens when w.fd is not a valid file descriptor of any kind. + // EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor. + // Watch descriptors are invalidated when they are removed explicitly or implicitly; + // explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted. + return errno + } + + return nil +} + +type watch struct { + wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) + flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) +} + +// readEvents reads from the inotify file descriptor, converts the +// received events into Event objects and sends them via the Events channel +func (w *Watcher) readEvents() { + var ( + buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events + n int // Number of bytes read with read() + errno error // Syscall errno + ok bool // For poller.wait + ) + + defer close(w.doneResp) + defer close(w.Errors) + defer close(w.Events) + defer unix.Close(w.fd) + defer w.poller.close() + + for { + // See if we have been closed. + if w.isClosed() { + return + } + + ok, errno = w.poller.wait() + if errno != nil { + select { + case w.Errors <- errno: + case <-w.done: + return + } + continue + } + + if !ok { + continue + } + + n, errno = unix.Read(w.fd, buf[:]) + // If a signal interrupted execution, see if we've been asked to close, and try again. + // http://man7.org/linux/man-pages/man7/signal.7.html : + // "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable" + if errno == unix.EINTR { + continue + } + + // unix.Read might have been woken up by Close. If so, we're done. + if w.isClosed() { + return + } + + if n < unix.SizeofInotifyEvent { + var err error + if n == 0 { + // If EOF is received. This should really never happen. + err = io.EOF + } else if n < 0 { + // If an error occurred while reading. + err = errno + } else { + // Read was too short. + err = errors.New("notify: short read in readEvents()") + } + select { + case w.Errors <- err: + case <-w.done: + return + } + continue + } + + var offset uint32 + // We don't know how many events we just read into the buffer + // While the offset points to at least one whole event... + for offset <= uint32(n-unix.SizeofInotifyEvent) { + // Point "raw" to the event in the buffer + raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) + + mask := uint32(raw.Mask) + nameLen := uint32(raw.Len) + + if mask&unix.IN_Q_OVERFLOW != 0 { + select { + case w.Errors <- ErrEventOverflow: + case <-w.done: + return + } + } + + // If the event happened to the watched directory or the watched file, the kernel + // doesn't append the filename to the event, but we would like to always fill the + // the "Name" field with a valid filename. We retrieve the path of the watch from + // the "paths" map. + w.mu.Lock() + name, ok := w.paths[int(raw.Wd)] + // IN_DELETE_SELF occurs when the file/directory being watched is removed. + // This is a sign to clean up the maps, otherwise we are no longer in sync + // with the inotify kernel state which has already deleted the watch + // automatically. + if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { + delete(w.paths, int(raw.Wd)) + delete(w.watches, name) + } + w.mu.Unlock() + + if nameLen > 0 { + // Point "bytes" at the first byte of the filename + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent])) + // The filename is padded with NULL bytes. TrimRight() gets rid of those. + name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + } + + event := newEvent(name, mask) + + // Send the events that are not ignored on the events channel + if !event.ignoreLinux(mask) { + select { + case w.Events <- event: + case <-w.done: + return + } + } + + // Move to the next event in the buffer + offset += unix.SizeofInotifyEvent + nameLen + } + } +} + +// Certain types of events can be "ignored" and not sent over the Events +// channel. Such as events marked ignore by the kernel, or MODIFY events +// against files that do not exist. +func (e *Event) ignoreLinux(mask uint32) bool { + // Ignore anything the inotify API says to ignore + if mask&unix.IN_IGNORED == unix.IN_IGNORED { + return true + } + + // If the event is not a DELETE or RENAME, the file must exist. + // Otherwise the event is ignored. + // *Note*: this was put in place because it was seen that a MODIFY + // event was sent after the DELETE. This ignores that MODIFY and + // assumes a DELETE will come or has come if the file doesn't exist. + if !(e.Op&Remove == Remove || e.Op&Rename == Rename) { + _, statErr := os.Lstat(e.Name) + return os.IsNotExist(statErr) + } + return false +} + +// newEvent returns an platform-independent Event based on an inotify mask. +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { + e.Op |= Create + } + if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE { + e.Op |= Remove + } + if mask&unix.IN_MODIFY == unix.IN_MODIFY { + e.Op |= Write + } + if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { + e.Op |= Rename + } + if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { + e.Op |= Chmod + } + return e +} diff --git a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go new file mode 100644 index 00000000..b33f2b4d --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go @@ -0,0 +1,187 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package fsnotify + +import ( + "errors" + + "golang.org/x/sys/unix" +) + +type fdPoller struct { + fd int // File descriptor (as returned by the inotify_init() syscall) + epfd int // Epoll file descriptor + pipe [2]int // Pipe for waking up +} + +func emptyPoller(fd int) *fdPoller { + poller := new(fdPoller) + poller.fd = fd + poller.epfd = -1 + poller.pipe[0] = -1 + poller.pipe[1] = -1 + return poller +} + +// Create a new inotify poller. +// This creates an inotify handler, and an epoll handler. +func newFdPoller(fd int) (*fdPoller, error) { + var errno error + poller := emptyPoller(fd) + defer func() { + if errno != nil { + poller.close() + } + }() + poller.fd = fd + + // Create epoll fd + poller.epfd, errno = unix.EpollCreate1(unix.EPOLL_CLOEXEC) + if poller.epfd == -1 { + return nil, errno + } + // Create pipe; pipe[0] is the read end, pipe[1] the write end. + errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK|unix.O_CLOEXEC) + if errno != nil { + return nil, errno + } + + // Register inotify fd with epoll + event := unix.EpollEvent{ + Fd: int32(poller.fd), + Events: unix.EPOLLIN, + } + errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event) + if errno != nil { + return nil, errno + } + + // Register pipe fd with epoll + event = unix.EpollEvent{ + Fd: int32(poller.pipe[0]), + Events: unix.EPOLLIN, + } + errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event) + if errno != nil { + return nil, errno + } + + return poller, nil +} + +// Wait using epoll. +// Returns true if something is ready to be read, +// false if there is not. +func (poller *fdPoller) wait() (bool, error) { + // 3 possible events per fd, and 2 fds, makes a maximum of 6 events. + // I don't know whether epoll_wait returns the number of events returned, + // or the total number of events ready. + // I decided to catch both by making the buffer one larger than the maximum. + events := make([]unix.EpollEvent, 7) + for { + n, errno := unix.EpollWait(poller.epfd, events, -1) + if n == -1 { + if errno == unix.EINTR { + continue + } + return false, errno + } + if n == 0 { + // If there are no events, try again. + continue + } + if n > 6 { + // This should never happen. More events were returned than should be possible. + return false, errors.New("epoll_wait returned more events than I know what to do with") + } + ready := events[:n] + epollhup := false + epollerr := false + epollin := false + for _, event := range ready { + if event.Fd == int32(poller.fd) { + if event.Events&unix.EPOLLHUP != 0 { + // This should not happen, but if it does, treat it as a wakeup. + epollhup = true + } + if event.Events&unix.EPOLLERR != 0 { + // If an error is waiting on the file descriptor, we should pretend + // something is ready to read, and let unix.Read pick up the error. + epollerr = true + } + if event.Events&unix.EPOLLIN != 0 { + // There is data to read. + epollin = true + } + } + if event.Fd == int32(poller.pipe[0]) { + if event.Events&unix.EPOLLHUP != 0 { + // Write pipe descriptor was closed, by us. This means we're closing down the + // watcher, and we should wake up. + } + if event.Events&unix.EPOLLERR != 0 { + // If an error is waiting on the pipe file descriptor. + // This is an absolute mystery, and should never ever happen. + return false, errors.New("Error on the pipe descriptor.") + } + if event.Events&unix.EPOLLIN != 0 { + // This is a regular wakeup, so we have to clear the buffer. + err := poller.clearWake() + if err != nil { + return false, err + } + } + } + } + + if epollhup || epollerr || epollin { + return true, nil + } + return false, nil + } +} + +// Close the write end of the poller. +func (poller *fdPoller) wake() error { + buf := make([]byte, 1) + n, errno := unix.Write(poller.pipe[1], buf) + if n == -1 { + if errno == unix.EAGAIN { + // Buffer is full, poller will wake. + return nil + } + return errno + } + return nil +} + +func (poller *fdPoller) clearWake() error { + // You have to be woken up a LOT in order to get to 100! + buf := make([]byte, 100) + n, errno := unix.Read(poller.pipe[0], buf) + if n == -1 { + if errno == unix.EAGAIN { + // Buffer is empty, someone else cleared our wake. + return nil + } + return errno + } + return nil +} + +// Close all poller file descriptors, but not the one passed to it. +func (poller *fdPoller) close() { + if poller.pipe[1] != -1 { + unix.Close(poller.pipe[1]) + } + if poller.pipe[0] != -1 { + unix.Close(poller.pipe[0]) + } + if poller.epfd != -1 { + unix.Close(poller.epfd) + } +} diff --git a/vendor/github.com/fsnotify/fsnotify/kqueue.go b/vendor/github.com/fsnotify/fsnotify/kqueue.go new file mode 100644 index 00000000..86e76a3d --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/kqueue.go @@ -0,0 +1,521 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd openbsd netbsd dragonfly darwin + +package fsnotify + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + "time" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + + kq int // File descriptor (as returned by the kqueue() syscall). + + mu sync.Mutex // Protects access to watcher data + watches map[string]int // Map of watched file descriptors (key: path). + externalWatches map[string]bool // Map of watches added by user of the library. + dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue. + paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events. + fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events). + isClosed bool // Set to true when Close() is first called +} + +type pathInfo struct { + name string + isDir bool +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + kq, err := kqueue() + if err != nil { + return nil, err + } + + w := &Watcher{ + kq: kq, + watches: make(map[string]int), + dirFlags: make(map[string]uint32), + paths: make(map[int]pathInfo), + fileExists: make(map[string]bool), + externalWatches: make(map[string]bool), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return nil + } + w.isClosed = true + + // copy paths to remove while locked + var pathsToRemove = make([]string, 0, len(w.watches)) + for name := range w.watches { + pathsToRemove = append(pathsToRemove, name) + } + w.mu.Unlock() + // unlock before calling Remove, which also locks + + for _, name := range pathsToRemove { + w.Remove(name) + } + + // send a "quit" message to the reader goroutine + close(w.done) + + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + w.mu.Lock() + w.externalWatches[name] = true + w.mu.Unlock() + _, err := w.addWatch(name, noteAllEvents) + return err +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + w.mu.Lock() + watchfd, ok := w.watches[name] + w.mu.Unlock() + if !ok { + return fmt.Errorf("can't remove non-existent kevent watch for: %s", name) + } + + const registerRemove = unix.EV_DELETE + if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil { + return err + } + + unix.Close(watchfd) + + w.mu.Lock() + isDir := w.paths[watchfd].isDir + delete(w.watches, name) + delete(w.paths, watchfd) + delete(w.dirFlags, name) + w.mu.Unlock() + + // Find all watched paths that are in this directory that are not external. + if isDir { + var pathsToRemove []string + w.mu.Lock() + for _, path := range w.paths { + wdir, _ := filepath.Split(path.name) + if filepath.Clean(wdir) == name { + if !w.externalWatches[path.name] { + pathsToRemove = append(pathsToRemove, path.name) + } + } + } + w.mu.Unlock() + for _, name := range pathsToRemove { + // Since these are internal, not much sense in propagating error + // to the user, as that will just confuse them with an error about + // a path they did not explicitly watch themselves. + w.Remove(name) + } + } + + return nil +} + +// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) +const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME + +// keventWaitTime to block on each read from kevent +var keventWaitTime = durationToTimespec(100 * time.Millisecond) + +// addWatch adds name to the watched file set. +// The flags are interpreted as described in kevent(2). +// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks. +func (w *Watcher) addWatch(name string, flags uint32) (string, error) { + var isDir bool + // Make ./name and name equivalent + name = filepath.Clean(name) + + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return "", errors.New("kevent instance already closed") + } + watchfd, alreadyWatching := w.watches[name] + // We already have a watch, but we can still override flags. + if alreadyWatching { + isDir = w.paths[watchfd].isDir + } + w.mu.Unlock() + + if !alreadyWatching { + fi, err := os.Lstat(name) + if err != nil { + return "", err + } + + // Don't watch sockets. + if fi.Mode()&os.ModeSocket == os.ModeSocket { + return "", nil + } + + // Don't watch named pipes. + if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe { + return "", nil + } + + // Follow Symlinks + // Unfortunately, Linux can add bogus symlinks to watch list without + // issue, and Windows can't do symlinks period (AFAIK). To maintain + // consistency, we will act like everything is fine. There will simply + // be no file events for broken symlinks. + // Hence the returns of nil on errors. + if fi.Mode()&os.ModeSymlink == os.ModeSymlink { + name, err = filepath.EvalSymlinks(name) + if err != nil { + return "", nil + } + + w.mu.Lock() + _, alreadyWatching = w.watches[name] + w.mu.Unlock() + + if alreadyWatching { + return name, nil + } + + fi, err = os.Lstat(name) + if err != nil { + return "", nil + } + } + + watchfd, err = unix.Open(name, openMode, 0700) + if watchfd == -1 { + return "", err + } + + isDir = fi.IsDir() + } + + const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE + if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil { + unix.Close(watchfd) + return "", err + } + + if !alreadyWatching { + w.mu.Lock() + w.watches[name] = watchfd + w.paths[watchfd] = pathInfo{name: name, isDir: isDir} + w.mu.Unlock() + } + + if isDir { + // Watch the directory if it has not been watched before, + // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) + w.mu.Lock() + + watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && + (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) + // Store flags so this watch can be updated later + w.dirFlags[name] = flags + w.mu.Unlock() + + if watchDir { + if err := w.watchDirectoryFiles(name); err != nil { + return "", err + } + } + } + return name, nil +} + +// readEvents reads from kqueue and converts the received kevents into +// Event values that it sends down the Events channel. +func (w *Watcher) readEvents() { + eventBuffer := make([]unix.Kevent_t, 10) + +loop: + for { + // See if there is a message on the "done" channel + select { + case <-w.done: + break loop + default: + } + + // Get new events + kevents, err := read(w.kq, eventBuffer, &keventWaitTime) + // EINTR is okay, the syscall was interrupted before timeout expired. + if err != nil && err != unix.EINTR { + select { + case w.Errors <- err: + case <-w.done: + break loop + } + continue + } + + // Flush the events we received to the Events channel + for len(kevents) > 0 { + kevent := &kevents[0] + watchfd := int(kevent.Ident) + mask := uint32(kevent.Fflags) + w.mu.Lock() + path := w.paths[watchfd] + w.mu.Unlock() + event := newEvent(path.name, mask) + + if path.isDir && !(event.Op&Remove == Remove) { + // Double check to make sure the directory exists. This can happen when + // we do a rm -fr on a recursively watched folders and we receive a + // modification event first but the folder has been deleted and later + // receive the delete event + if _, err := os.Lstat(event.Name); os.IsNotExist(err) { + // mark is as delete event + event.Op |= Remove + } + } + + if event.Op&Rename == Rename || event.Op&Remove == Remove { + w.Remove(event.Name) + w.mu.Lock() + delete(w.fileExists, event.Name) + w.mu.Unlock() + } + + if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) { + w.sendDirectoryChangeEvents(event.Name) + } else { + // Send the event on the Events channel. + select { + case w.Events <- event: + case <-w.done: + break loop + } + } + + if event.Op&Remove == Remove { + // Look for a file that may have overwritten this. + // For example, mv f1 f2 will delete f2, then create f2. + if path.isDir { + fileDir := filepath.Clean(event.Name) + w.mu.Lock() + _, found := w.watches[fileDir] + w.mu.Unlock() + if found { + // make sure the directory exists before we watch for changes. When we + // do a recursive watch and perform rm -fr, the parent directory might + // have gone missing, ignore the missing directory and let the + // upcoming delete event remove the watch from the parent directory. + if _, err := os.Lstat(fileDir); err == nil { + w.sendDirectoryChangeEvents(fileDir) + } + } + } else { + filePath := filepath.Clean(event.Name) + if fileInfo, err := os.Lstat(filePath); err == nil { + w.sendFileCreatedEventIfNew(filePath, fileInfo) + } + } + } + + // Move to next event + kevents = kevents[1:] + } + } + + // cleanup + err := unix.Close(w.kq) + if err != nil { + // only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors. + select { + case w.Errors <- err: + default: + } + } + close(w.Events) + close(w.Errors) +} + +// newEvent returns an platform-independent Event based on kqueue Fflags. +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { + e.Op |= Remove + } + if mask&unix.NOTE_WRITE == unix.NOTE_WRITE { + e.Op |= Write + } + if mask&unix.NOTE_RENAME == unix.NOTE_RENAME { + e.Op |= Rename + } + if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { + e.Op |= Chmod + } + return e +} + +func newCreateEvent(name string) Event { + return Event{Name: name, Op: Create} +} + +// watchDirectoryFiles to mimic inotify when adding a watch on a directory +func (w *Watcher) watchDirectoryFiles(dirPath string) error { + // Get all files + files, err := ioutil.ReadDir(dirPath) + if err != nil { + return err + } + + for _, fileInfo := range files { + filePath := filepath.Join(dirPath, fileInfo.Name()) + filePath, err = w.internalWatch(filePath, fileInfo) + if err != nil { + return err + } + + w.mu.Lock() + w.fileExists[filePath] = true + w.mu.Unlock() + } + + return nil +} + +// sendDirectoryEvents searches the directory for newly created files +// and sends them over the event channel. This functionality is to have +// the BSD version of fsnotify match Linux inotify which provides a +// create event for files created in a watched directory. +func (w *Watcher) sendDirectoryChangeEvents(dirPath string) { + // Get all files + files, err := ioutil.ReadDir(dirPath) + if err != nil { + select { + case w.Errors <- err: + case <-w.done: + return + } + } + + // Search for new files + for _, fileInfo := range files { + filePath := filepath.Join(dirPath, fileInfo.Name()) + err := w.sendFileCreatedEventIfNew(filePath, fileInfo) + + if err != nil { + return + } + } +} + +// sendFileCreatedEvent sends a create event if the file isn't already being tracked. +func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) { + w.mu.Lock() + _, doesExist := w.fileExists[filePath] + w.mu.Unlock() + if !doesExist { + // Send create event + select { + case w.Events <- newCreateEvent(filePath): + case <-w.done: + return + } + } + + // like watchDirectoryFiles (but without doing another ReadDir) + filePath, err = w.internalWatch(filePath, fileInfo) + if err != nil { + return err + } + + w.mu.Lock() + w.fileExists[filePath] = true + w.mu.Unlock() + + return nil +} + +func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) { + if fileInfo.IsDir() { + // mimic Linux providing delete events for subdirectories + // but preserve the flags used if currently watching subdirectory + w.mu.Lock() + flags := w.dirFlags[name] + w.mu.Unlock() + + flags |= unix.NOTE_DELETE | unix.NOTE_RENAME + return w.addWatch(name, flags) + } + + // watch file to mimic Linux inotify + return w.addWatch(name, noteAllEvents) +} + +// kqueue creates a new kernel event queue and returns a descriptor. +func kqueue() (kq int, err error) { + kq, err = unix.Kqueue() + if kq == -1 { + return kq, err + } + return kq, nil +} + +// register events with the queue +func register(kq int, fds []int, flags int, fflags uint32) error { + changes := make([]unix.Kevent_t, len(fds)) + + for i, fd := range fds { + // SetKevent converts int to the platform-specific types: + unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags) + changes[i].Fflags = fflags + } + + // register the events + success, err := unix.Kevent(kq, changes, nil, nil) + if success == -1 { + return err + } + return nil +} + +// read retrieves pending events, or waits until an event occurs. +// A timeout of nil blocks indefinitely, while 0 polls the queue. +func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) { + n, err := unix.Kevent(kq, nil, events, timeout) + if err != nil { + return nil, err + } + return events[0:n], nil +} + +// durationToTimespec prepares a timeout value +func durationToTimespec(d time.Duration) unix.Timespec { + return unix.NsecToTimespec(d.Nanoseconds()) +} diff --git a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh b/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh deleted file mode 100644 index 99012ae6..00000000 --- a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh +++ /dev/null @@ -1,259 +0,0 @@ -#!/usr/bin/env zsh -[ "${ZSH_VERSION:-}" = "" ] && echo >&2 "Only works with zsh" && exit 1 -setopt err_exit no_unset pipefail extended_glob - -# Simple script to update the godoc comments on all watchers so you don't need -# to update the same comment 5 times. - -watcher=$(</tmp/x - print -r -- $cmt >>/tmp/x - tail -n+$(( end + 1 )) $file >>/tmp/x - mv /tmp/x $file - done -} - -set-cmt '^type Watcher struct ' $watcher -set-cmt '^func NewWatcher(' $new -set-cmt '^func NewBufferedWatcher(' $newbuffered -set-cmt '^func (w \*Watcher) Add(' $add -set-cmt '^func (w \*Watcher) AddWith(' $addwith -set-cmt '^func (w \*Watcher) Remove(' $remove -set-cmt '^func (w \*Watcher) Close(' $close -set-cmt '^func (w \*Watcher) WatchList(' $watchlist -set-cmt '^[[:space:]]*Events *chan Event$' $events -set-cmt '^[[:space:]]*Errors *chan error$' $errors diff --git a/vendor/github.com/fsnotify/fsnotify/system_bsd.go b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go similarity index 50% rename from vendor/github.com/fsnotify/fsnotify/system_bsd.go rename to vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go index 4322b0b8..2306c462 100644 --- a/vendor/github.com/fsnotify/fsnotify/system_bsd.go +++ b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go @@ -1,4 +1,7 @@ -//go:build freebsd || openbsd || netbsd || dragonfly +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + // +build freebsd openbsd netbsd dragonfly package fsnotify diff --git a/vendor/github.com/fsnotify/fsnotify/system_darwin.go b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go similarity index 50% rename from vendor/github.com/fsnotify/fsnotify/system_darwin.go rename to vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go index 5da5ffa7..870c4d6d 100644 --- a/vendor/github.com/fsnotify/fsnotify/system_darwin.go +++ b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go @@ -1,4 +1,7 @@ -//go:build darwin +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + // +build darwin package fsnotify diff --git a/vendor/github.com/fsnotify/fsnotify/windows.go b/vendor/github.com/fsnotify/fsnotify/windows.go new file mode 100644 index 00000000..09436f31 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/windows.go @@ -0,0 +1,561 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package fsnotify + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "runtime" + "sync" + "syscall" + "unsafe" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + isClosed bool // Set to true when Close() is first called + mu sync.Mutex // Map access + port syscall.Handle // Handle to completion port + watches watchMap // Map of watches (key: i-number) + input chan *input // Inputs to the reader are sent on this channel + quit chan chan<- error +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0) + if e != nil { + return nil, os.NewSyscallError("CreateIoCompletionPort", e) + } + w := &Watcher{ + port: port, + watches: make(watchMap), + input: make(chan *input, 1), + Events: make(chan Event, 50), + Errors: make(chan error), + quit: make(chan chan<- error, 1), + } + go w.readEvents() + return w, nil +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + if w.isClosed { + return nil + } + w.isClosed = true + + // Send "quit" message to the reader goroutine + ch := make(chan error) + w.quit <- ch + if err := w.wakeupReader(); err != nil { + return err + } + return <-ch +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + if w.isClosed { + return errors.New("watcher already closed") + } + in := &input{ + op: opAddWatch, + path: filepath.Clean(name), + flags: sysFSALLEVENTS, + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + in := &input{ + op: opRemoveWatch, + path: filepath.Clean(name), + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +const ( + // Options for AddWatch + sysFSONESHOT = 0x80000000 + sysFSONLYDIR = 0x1000000 + + // Events + sysFSACCESS = 0x1 + sysFSALLEVENTS = 0xfff + sysFSATTRIB = 0x4 + sysFSCLOSE = 0x18 + sysFSCREATE = 0x100 + sysFSDELETE = 0x200 + sysFSDELETESELF = 0x400 + sysFSMODIFY = 0x2 + sysFSMOVE = 0xc0 + sysFSMOVEDFROM = 0x40 + sysFSMOVEDTO = 0x80 + sysFSMOVESELF = 0x800 + + // Special events + sysFSIGNORED = 0x8000 + sysFSQOVERFLOW = 0x4000 +) + +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { + e.Op |= Create + } + if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF { + e.Op |= Remove + } + if mask&sysFSMODIFY == sysFSMODIFY { + e.Op |= Write + } + if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { + e.Op |= Rename + } + if mask&sysFSATTRIB == sysFSATTRIB { + e.Op |= Chmod + } + return e +} + +const ( + opAddWatch = iota + opRemoveWatch +) + +const ( + provisional uint64 = 1 << (32 + iota) +) + +type input struct { + op int + path string + flags uint32 + reply chan error +} + +type inode struct { + handle syscall.Handle + volume uint32 + index uint64 +} + +type watch struct { + ov syscall.Overlapped + ino *inode // i-number + path string // Directory path + mask uint64 // Directory itself is being watched with these notify flags + names map[string]uint64 // Map of names being watched and their notify flags + rename string // Remembers the old name while renaming a file + buf [4096]byte +} + +type indexMap map[uint64]*watch +type watchMap map[uint32]indexMap + +func (w *Watcher) wakeupReader() error { + e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil) + if e != nil { + return os.NewSyscallError("PostQueuedCompletionStatus", e) + } + return nil +} + +func getDir(pathname string) (dir string, err error) { + attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname)) + if e != nil { + return "", os.NewSyscallError("GetFileAttributes", e) + } + if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { + dir = pathname + } else { + dir, _ = filepath.Split(pathname) + dir = filepath.Clean(dir) + } + return +} + +func getIno(path string) (ino *inode, err error) { + h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path), + syscall.FILE_LIST_DIRECTORY, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + nil, syscall.OPEN_EXISTING, + syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0) + if e != nil { + return nil, os.NewSyscallError("CreateFile", e) + } + var fi syscall.ByHandleFileInformation + if e = syscall.GetFileInformationByHandle(h, &fi); e != nil { + syscall.CloseHandle(h) + return nil, os.NewSyscallError("GetFileInformationByHandle", e) + } + ino = &inode{ + handle: h, + volume: fi.VolumeSerialNumber, + index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), + } + return ino, nil +} + +// Must run within the I/O thread. +func (m watchMap) get(ino *inode) *watch { + if i := m[ino.volume]; i != nil { + return i[ino.index] + } + return nil +} + +// Must run within the I/O thread. +func (m watchMap) set(ino *inode, watch *watch) { + i := m[ino.volume] + if i == nil { + i = make(indexMap) + m[ino.volume] = i + } + i[ino.index] = watch +} + +// Must run within the I/O thread. +func (w *Watcher) addWatch(pathname string, flags uint64) error { + dir, err := getDir(pathname) + if err != nil { + return err + } + if flags&sysFSONLYDIR != 0 && pathname != dir { + return nil + } + ino, err := getIno(dir) + if err != nil { + return err + } + w.mu.Lock() + watchEntry := w.watches.get(ino) + w.mu.Unlock() + if watchEntry == nil { + if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil { + syscall.CloseHandle(ino.handle) + return os.NewSyscallError("CreateIoCompletionPort", e) + } + watchEntry = &watch{ + ino: ino, + path: dir, + names: make(map[string]uint64), + } + w.mu.Lock() + w.watches.set(ino, watchEntry) + w.mu.Unlock() + flags |= provisional + } else { + syscall.CloseHandle(ino.handle) + } + if pathname == dir { + watchEntry.mask |= flags + } else { + watchEntry.names[filepath.Base(pathname)] |= flags + } + if err = w.startRead(watchEntry); err != nil { + return err + } + if pathname == dir { + watchEntry.mask &= ^provisional + } else { + watchEntry.names[filepath.Base(pathname)] &= ^provisional + } + return nil +} + +// Must run within the I/O thread. +func (w *Watcher) remWatch(pathname string) error { + dir, err := getDir(pathname) + if err != nil { + return err + } + ino, err := getIno(dir) + if err != nil { + return err + } + w.mu.Lock() + watch := w.watches.get(ino) + w.mu.Unlock() + if watch == nil { + return fmt.Errorf("can't remove non-existent watch for: %s", pathname) + } + if pathname == dir { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + watch.mask = 0 + } else { + name := filepath.Base(pathname) + w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + return w.startRead(watch) +} + +// Must run within the I/O thread. +func (w *Watcher) deleteWatch(watch *watch) { + for name, mask := range watch.names { + if mask&provisional == 0 { + w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) + } + delete(watch.names, name) + } + if watch.mask != 0 { + if watch.mask&provisional == 0 { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + } + watch.mask = 0 + } +} + +// Must run within the I/O thread. +func (w *Watcher) startRead(watch *watch) error { + if e := syscall.CancelIo(watch.ino.handle); e != nil { + w.Errors <- os.NewSyscallError("CancelIo", e) + w.deleteWatch(watch) + } + mask := toWindowsFlags(watch.mask) + for _, m := range watch.names { + mask |= toWindowsFlags(m) + } + if mask == 0 { + if e := syscall.CloseHandle(watch.ino.handle); e != nil { + w.Errors <- os.NewSyscallError("CloseHandle", e) + } + w.mu.Lock() + delete(w.watches[watch.ino.volume], watch.ino.index) + w.mu.Unlock() + return nil + } + e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], + uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) + if e != nil { + err := os.NewSyscallError("ReadDirectoryChanges", e) + if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { + // Watched directory was probably removed + if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) { + if watch.mask&sysFSONESHOT != 0 { + watch.mask = 0 + } + } + err = nil + } + w.deleteWatch(watch) + w.startRead(watch) + return err + } + return nil +} + +// readEvents reads from the I/O completion port, converts the +// received events into Event objects and sends them via the Events channel. +// Entry point to the I/O thread. +func (w *Watcher) readEvents() { + var ( + n, key uint32 + ov *syscall.Overlapped + ) + runtime.LockOSThread() + + for { + e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE) + watch := (*watch)(unsafe.Pointer(ov)) + + if watch == nil { + select { + case ch := <-w.quit: + w.mu.Lock() + var indexes []indexMap + for _, index := range w.watches { + indexes = append(indexes, index) + } + w.mu.Unlock() + for _, index := range indexes { + for _, watch := range index { + w.deleteWatch(watch) + w.startRead(watch) + } + } + var err error + if e := syscall.CloseHandle(w.port); e != nil { + err = os.NewSyscallError("CloseHandle", e) + } + close(w.Events) + close(w.Errors) + ch <- err + return + case in := <-w.input: + switch in.op { + case opAddWatch: + in.reply <- w.addWatch(in.path, uint64(in.flags)) + case opRemoveWatch: + in.reply <- w.remWatch(in.path) + } + default: + } + continue + } + + switch e { + case syscall.ERROR_MORE_DATA: + if watch == nil { + w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer") + } else { + // The i/o succeeded but the buffer is full. + // In theory we should be building up a full packet. + // In practice we can get away with just carrying on. + n = uint32(unsafe.Sizeof(watch.buf)) + } + case syscall.ERROR_ACCESS_DENIED: + // Watched directory was probably removed + w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.deleteWatch(watch) + w.startRead(watch) + continue + case syscall.ERROR_OPERATION_ABORTED: + // CancelIo was called on this handle + continue + default: + w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e) + continue + case nil: + } + + var offset uint32 + for { + if n == 0 { + w.Events <- newEvent("", sysFSQOVERFLOW) + w.Errors <- errors.New("short read in readEvents()") + break + } + + // Point "raw" to the event in the buffer + raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) + buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName)) + name := syscall.UTF16ToString(buf[:raw.FileNameLength/2]) + fullname := filepath.Join(watch.path, name) + + var mask uint64 + switch raw.Action { + case syscall.FILE_ACTION_REMOVED: + mask = sysFSDELETESELF + case syscall.FILE_ACTION_MODIFIED: + mask = sysFSMODIFY + case syscall.FILE_ACTION_RENAMED_OLD_NAME: + watch.rename = name + case syscall.FILE_ACTION_RENAMED_NEW_NAME: + if watch.names[watch.rename] != 0 { + watch.names[name] |= watch.names[watch.rename] + delete(watch.names, watch.rename) + mask = sysFSMOVESELF + } + } + + sendNameEvent := func() { + if w.sendEvent(fullname, watch.names[name]&mask) { + if watch.names[name]&sysFSONESHOT != 0 { + delete(watch.names, name) + } + } + } + if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME { + sendNameEvent() + } + if raw.Action == syscall.FILE_ACTION_REMOVED { + w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) { + if watch.mask&sysFSONESHOT != 0 { + watch.mask = 0 + } + } + if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME { + fullname = filepath.Join(watch.path, watch.rename) + sendNameEvent() + } + + // Move to the next event in the buffer + if raw.NextEntryOffset == 0 { + break + } + offset += raw.NextEntryOffset + + // Error! + if offset >= n { + w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.") + break + } + } + + if err := w.startRead(watch); err != nil { + w.Errors <- err + } + } +} + +func (w *Watcher) sendEvent(name string, mask uint64) bool { + if mask == 0 { + return false + } + event := newEvent(name, uint32(mask)) + select { + case ch := <-w.quit: + w.quit <- ch + case w.Events <- event: + } + return true +} + +func toWindowsFlags(mask uint64) uint32 { + var m uint32 + if mask&sysFSACCESS != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS + } + if mask&sysFSMODIFY != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE + } + if mask&sysFSATTRIB != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES + } + if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME + } + return m +} + +func toFSnotifyFlags(action uint32) uint64 { + switch action { + case syscall.FILE_ACTION_ADDED: + return sysFSCREATE + case syscall.FILE_ACTION_REMOVED: + return sysFSDELETE + case syscall.FILE_ACTION_MODIFIED: + return sysFSMODIFY + case syscall.FILE_ACTION_RENAMED_OLD_NAME: + return sysFSMOVEDFROM + case syscall.FILE_ACTION_RENAMED_NEW_NAME: + return sysFSMOVEDTO + } + return 0 +} diff --git a/vendor/github.com/ghodss/yaml/.travis.yml b/vendor/github.com/ghodss/yaml/.travis.yml deleted file mode 100644 index 0e9d6edc..00000000 --- a/vendor/github.com/ghodss/yaml/.travis.yml +++ /dev/null @@ -1,7 +0,0 @@ -language: go -go: - - 1.3 - - 1.4 -script: - - go test - - go build diff --git a/vendor/github.com/go-openapi/jsonpointer/.golangci.yml b/vendor/github.com/go-openapi/jsonpointer/.golangci.yml deleted file mode 100644 index 22f8d21c..00000000 --- a/vendor/github.com/go-openapi/jsonpointer/.golangci.yml +++ /dev/null @@ -1,61 +0,0 @@ -linters-settings: - govet: - check-shadowing: true - golint: - min-confidence: 0 - gocyclo: - min-complexity: 45 - maligned: - suggest-new: true - dupl: - threshold: 200 - goconst: - min-len: 2 - min-occurrences: 3 - -linters: - enable-all: true - disable: - - maligned - - unparam - - lll - - gochecknoinits - - gochecknoglobals - - funlen - - godox - - gocognit - - whitespace - - wsl - - wrapcheck - - testpackage - - nlreturn - - gomnd - - exhaustivestruct - - goerr113 - - errorlint - - nestif - - godot - - gofumpt - - paralleltest - - tparallel - - thelper - - ifshort - - exhaustruct - - varnamelen - - gci - - depguard - - errchkjson - - inamedparam - - nonamedreturns - - musttag - - ireturn - - forcetypeassert - - cyclop - # deprecated linters - - deadcode - - interfacer - - scopelint - - varcheck - - structcheck - - golint - - nosnakecase diff --git a/vendor/github.com/go-openapi/jsonpointer/README.md b/vendor/github.com/go-openapi/jsonpointer/README.md index 0108f1d5..813788af 100644 --- a/vendor/github.com/go-openapi/jsonpointer/README.md +++ b/vendor/github.com/go-openapi/jsonpointer/README.md @@ -1,10 +1,6 @@ -# gojsonpointer [![Build Status](https://github.com/go-openapi/jsonpointer/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/jsonpointer/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/jsonpointer/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonpointer) - -[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE) -[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/jsonpointer.svg)](https://pkg.go.dev/github.com/go-openapi/jsonpointer) -[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/jsonpointer)](https://goreportcard.com/report/github.com/go-openapi/jsonpointer) +# gojsonpointer [![Build Status](https://travis-ci.org/go-openapi/jsonpointer.svg?branch=master)](https://travis-ci.org/go-openapi/jsonpointer) [![codecov](https://codecov.io/gh/go-openapi/jsonpointer/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonpointer) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/jsonpointer?status.svg)](http://godoc.org/github.com/go-openapi/jsonpointer) An implementation of JSON Pointer - Go language ## Status diff --git a/vendor/github.com/go-openapi/jsonpointer/pointer.go b/vendor/github.com/go-openapi/jsonpointer/pointer.go index d970c7cf..7df9853d 100644 --- a/vendor/github.com/go-openapi/jsonpointer/pointer.go +++ b/vendor/github.com/go-openapi/jsonpointer/pointer.go @@ -26,7 +26,6 @@ package jsonpointer import ( - "encoding/json" "errors" "fmt" "reflect" @@ -41,7 +40,6 @@ const ( pointerSeparator = `/` invalidStart = `JSON pointer must be empty or start with a "` + pointerSeparator - notFound = `Can't find the pointer in the document` ) var jsonPointableType = reflect.TypeOf(new(JSONPointable)).Elem() @@ -50,13 +48,13 @@ var jsonSetableType = reflect.TypeOf(new(JSONSetable)).Elem() // JSONPointable is an interface for structs to implement when they need to customize the // json pointer process type JSONPointable interface { - JSONLookup(string) (any, error) + JSONLookup(string) (interface{}, error) } // JSONSetable is an interface for structs to implement when they need to customize the // json pointer process type JSONSetable interface { - JSONSet(string, any) error + JSONSet(string, interface{}) error } // New creates a new json pointer for the given string @@ -83,7 +81,9 @@ func (p *Pointer) parse(jsonPointerString string) error { err = errors.New(invalidStart) } else { referenceTokens := strings.Split(jsonPointerString, pointerSeparator) - p.referenceTokens = append(p.referenceTokens, referenceTokens[1:]...) + for _, referenceToken := range referenceTokens[1:] { + p.referenceTokens = append(p.referenceTokens, referenceToken) + } } } @@ -91,58 +91,38 @@ func (p *Pointer) parse(jsonPointerString string) error { } // Get uses the pointer to retrieve a value from a JSON document -func (p *Pointer) Get(document any) (any, reflect.Kind, error) { +func (p *Pointer) Get(document interface{}) (interface{}, reflect.Kind, error) { return p.get(document, swag.DefaultJSONNameProvider) } // Set uses the pointer to set a value from a JSON document -func (p *Pointer) Set(document any, value any) (any, error) { +func (p *Pointer) Set(document interface{}, value interface{}) (interface{}, error) { return document, p.set(document, value, swag.DefaultJSONNameProvider) } // GetForToken gets a value for a json pointer token 1 level deep -func GetForToken(document any, decodedToken string) (any, reflect.Kind, error) { +func GetForToken(document interface{}, decodedToken string) (interface{}, reflect.Kind, error) { return getSingleImpl(document, decodedToken, swag.DefaultJSONNameProvider) } // SetForToken gets a value for a json pointer token 1 level deep -func SetForToken(document any, decodedToken string, value any) (any, error) { +func SetForToken(document interface{}, decodedToken string, value interface{}) (interface{}, error) { return document, setSingleImpl(document, value, decodedToken, swag.DefaultJSONNameProvider) } -func isNil(input any) bool { - if input == nil { - return true - } - - kind := reflect.TypeOf(input).Kind() - switch kind { //nolint:exhaustive - case reflect.Ptr, reflect.Map, reflect.Slice, reflect.Chan: - return reflect.ValueOf(input).IsNil() - default: - return false - } -} - -func getSingleImpl(node any, decodedToken string, nameProvider *swag.NameProvider) (any, reflect.Kind, error) { +func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) { rValue := reflect.Indirect(reflect.ValueOf(node)) kind := rValue.Kind() - if isNil(node) { - return nil, kind, fmt.Errorf("nil value has not field %q", decodedToken) - } - switch typed := node.(type) { - case JSONPointable: - r, err := typed.JSONLookup(decodedToken) + if rValue.Type().Implements(jsonPointableType) { + r, err := node.(JSONPointable).JSONLookup(decodedToken) if err != nil { return nil, kind, err } return r, kind, nil - case *any: // case of a pointer to interface, that is not resolved by reflect.Indirect - return getSingleImpl(*typed, decodedToken, nameProvider) } - switch kind { //nolint:exhaustive + switch kind { case reflect.Struct: nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) if !ok { @@ -179,7 +159,7 @@ func getSingleImpl(node any, decodedToken string, nameProvider *swag.NameProvide } -func setSingleImpl(node, data any, decodedToken string, nameProvider *swag.NameProvider) error { +func setSingleImpl(node, data interface{}, decodedToken string, nameProvider *swag.NameProvider) error { rValue := reflect.Indirect(reflect.ValueOf(node)) if ns, ok := node.(JSONSetable); ok { // pointer impl @@ -190,7 +170,7 @@ func setSingleImpl(node, data any, decodedToken string, nameProvider *swag.NameP return node.(JSONSetable).JSONSet(decodedToken, data) } - switch rValue.Kind() { //nolint:exhaustive + switch rValue.Kind() { case reflect.Struct: nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) if !ok { @@ -230,7 +210,7 @@ func setSingleImpl(node, data any, decodedToken string, nameProvider *swag.NameP } -func (p *Pointer) get(node any, nameProvider *swag.NameProvider) (any, reflect.Kind, error) { +func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) { if nameProvider == nil { nameProvider = swag.DefaultJSONNameProvider @@ -251,7 +231,8 @@ func (p *Pointer) get(node any, nameProvider *swag.NameProvider) (any, reflect.K if err != nil { return nil, knd, err } - node = r + node, kind = r, knd + } rValue := reflect.ValueOf(node) @@ -260,11 +241,11 @@ func (p *Pointer) get(node any, nameProvider *swag.NameProvider) (any, reflect.K return node, kind, nil } -func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error { +func (p *Pointer) set(node, data interface{}, nameProvider *swag.NameProvider) error { knd := reflect.ValueOf(node).Kind() if knd != reflect.Ptr && knd != reflect.Struct && knd != reflect.Map && knd != reflect.Slice && knd != reflect.Array { - return errors.New("only structs, pointers, maps and slices are supported for setting values") + return fmt.Errorf("only structs, pointers, maps and slices are supported for setting values") } if nameProvider == nil { @@ -303,7 +284,7 @@ func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error { continue } - switch kind { //nolint:exhaustive + switch kind { case reflect.Struct: nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) if !ok { @@ -382,128 +363,6 @@ func (p *Pointer) String() string { return pointerString } -func (p *Pointer) Offset(document string) (int64, error) { - dec := json.NewDecoder(strings.NewReader(document)) - var offset int64 - for _, ttk := range p.DecodedTokens() { - tk, err := dec.Token() - if err != nil { - return 0, err - } - switch tk := tk.(type) { - case json.Delim: - switch tk { - case '{': - offset, err = offsetSingleObject(dec, ttk) - if err != nil { - return 0, err - } - case '[': - offset, err = offsetSingleArray(dec, ttk) - if err != nil { - return 0, err - } - default: - return 0, fmt.Errorf("invalid token %#v", tk) - } - default: - return 0, fmt.Errorf("invalid token %#v", tk) - } - } - return offset, nil -} - -func offsetSingleObject(dec *json.Decoder, decodedToken string) (int64, error) { - for dec.More() { - offset := dec.InputOffset() - tk, err := dec.Token() - if err != nil { - return 0, err - } - switch tk := tk.(type) { - case json.Delim: - switch tk { - case '{': - if err = drainSingle(dec); err != nil { - return 0, err - } - case '[': - if err = drainSingle(dec); err != nil { - return 0, err - } - } - case string: - if tk == decodedToken { - return offset, nil - } - default: - return 0, fmt.Errorf("invalid token %#v", tk) - } - } - return 0, fmt.Errorf("token reference %q not found", decodedToken) -} - -func offsetSingleArray(dec *json.Decoder, decodedToken string) (int64, error) { - idx, err := strconv.Atoi(decodedToken) - if err != nil { - return 0, fmt.Errorf("token reference %q is not a number: %v", decodedToken, err) - } - var i int - for i = 0; i < idx && dec.More(); i++ { - tk, err := dec.Token() - if err != nil { - return 0, err - } - - if delim, isDelim := tk.(json.Delim); isDelim { - switch delim { - case '{': - if err = drainSingle(dec); err != nil { - return 0, err - } - case '[': - if err = drainSingle(dec); err != nil { - return 0, err - } - } - } - } - - if !dec.More() { - return 0, fmt.Errorf("token reference %q not found", decodedToken) - } - return dec.InputOffset(), nil -} - -// drainSingle drains a single level of object or array. -// The decoder has to guarantee the beginning delim (i.e. '{' or '[') has been consumed. -func drainSingle(dec *json.Decoder) error { - for dec.More() { - tk, err := dec.Token() - if err != nil { - return err - } - if delim, isDelim := tk.(json.Delim); isDelim { - switch delim { - case '{': - if err = drainSingle(dec); err != nil { - return err - } - case '[': - if err = drainSingle(dec); err != nil { - return err - } - } - } - } - - // Consumes the ending delim - if _, err := dec.Token(); err != nil { - return err - } - return nil -} - // Specific JSON pointer encoding here // ~0 => ~ // ~1 => / @@ -518,14 +377,14 @@ const ( // Unescape unescapes a json pointer reference token string to the original representation func Unescape(token string) string { - step1 := strings.ReplaceAll(token, encRefTok1, decRefTok1) - step2 := strings.ReplaceAll(step1, encRefTok0, decRefTok0) + step1 := strings.Replace(token, encRefTok1, decRefTok1, -1) + step2 := strings.Replace(step1, encRefTok0, decRefTok0, -1) return step2 } // Escape escapes a pointer reference token string func Escape(token string) string { - step1 := strings.ReplaceAll(token, decRefTok0, encRefTok0) - step2 := strings.ReplaceAll(step1, decRefTok1, encRefTok1) + step1 := strings.Replace(token, decRefTok0, encRefTok0, -1) + step2 := strings.Replace(step1, decRefTok1, encRefTok1, -1) return step2 } diff --git a/vendor/github.com/go-openapi/jsonreference/.golangci.yml b/vendor/github.com/go-openapi/jsonreference/.golangci.yml index 22f8d21c..013fc194 100644 --- a/vendor/github.com/go-openapi/jsonreference/.golangci.yml +++ b/vendor/github.com/go-openapi/jsonreference/.golangci.yml @@ -1,61 +1,50 @@ linters-settings: govet: check-shadowing: true - golint: - min-confidence: 0 gocyclo: - min-complexity: 45 + min-complexity: 30 maligned: suggest-new: true dupl: - threshold: 200 + threshold: 100 goconst: min-len: 2 - min-occurrences: 3 - + min-occurrences: 4 + paralleltest: + ignore-missing: true linters: enable-all: true disable: - maligned - - unparam - lll - - gochecknoinits - gochecknoglobals - - funlen - godox - gocognit - whitespace - wsl + - funlen + - gochecknoglobals + - gochecknoinits + - scopelint - wrapcheck - - testpackage - - nlreturn - - gomnd - exhaustivestruct + - exhaustive + - nlreturn + - testpackage + - gci + - gofumpt - goerr113 - - errorlint + - gomnd + - tparallel - nestif - godot - - gofumpt - - paralleltest - - tparallel - - thelper - - ifshort - - exhaustruct - - varnamelen - - gci - - depguard - - errchkjson - - inamedparam - - nonamedreturns - - musttag - - ireturn - - forcetypeassert - - cyclop - # deprecated linters - - deadcode - - interfacer - - scopelint + - errorlint - varcheck - - structcheck + - interfacer + - deadcode - golint + - ifshort + - structcheck - nosnakecase + - varnamelen + - exhaustruct diff --git a/vendor/github.com/go-openapi/jsonreference/README.md b/vendor/github.com/go-openapi/jsonreference/README.md index c7fc2049..b94753aa 100644 --- a/vendor/github.com/go-openapi/jsonreference/README.md +++ b/vendor/github.com/go-openapi/jsonreference/README.md @@ -1,19 +1,15 @@ -# gojsonreference [![Build Status](https://github.com/go-openapi/jsonreference/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/jsonreference/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/jsonreference/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonreference) - -[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonreference/master/LICENSE) -[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/jsonreference.svg)](https://pkg.go.dev/github.com/go-openapi/jsonreference) -[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/jsonreference)](https://goreportcard.com/report/github.com/go-openapi/jsonreference) +# gojsonreference [![Build Status](https://travis-ci.org/go-openapi/jsonreference.svg?branch=master)](https://travis-ci.org/go-openapi/jsonreference) [![codecov](https://codecov.io/gh/go-openapi/jsonreference/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonreference) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonreference/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/jsonreference?status.svg)](http://godoc.org/github.com/go-openapi/jsonreference) An implementation of JSON Reference - Go language ## Status Feature complete. Stable API ## Dependencies -* https://github.com/go-openapi/jsonpointer +https://github.com/go-openapi/jsonpointer ## References +http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 -* http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 -* http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03 +http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03 diff --git a/vendor/github.com/go-openapi/spec/.gitignore b/vendor/github.com/go-openapi/spec/.gitignore index f47cb204..dd91ed6a 100644 --- a/vendor/github.com/go-openapi/spec/.gitignore +++ b/vendor/github.com/go-openapi/spec/.gitignore @@ -1 +1,2 @@ -*.out +secrets.yml +coverage.out diff --git a/vendor/github.com/go-openapi/spec/.golangci.yml b/vendor/github.com/go-openapi/spec/.golangci.yml index 22f8d21c..835d55e7 100644 --- a/vendor/github.com/go-openapi/spec/.golangci.yml +++ b/vendor/github.com/go-openapi/spec/.golangci.yml @@ -11,7 +11,7 @@ linters-settings: threshold: 200 goconst: min-len: 2 - min-occurrences: 3 + min-occurrences: 2 linters: enable-all: true @@ -40,22 +40,3 @@ linters: - tparallel - thelper - ifshort - - exhaustruct - - varnamelen - - gci - - depguard - - errchkjson - - inamedparam - - nonamedreturns - - musttag - - ireturn - - forcetypeassert - - cyclop - # deprecated linters - - deadcode - - interfacer - - scopelint - - varcheck - - structcheck - - golint - - nosnakecase diff --git a/vendor/github.com/go-openapi/spec/README.md b/vendor/github.com/go-openapi/spec/README.md index 7fd2810c..18782c6d 100644 --- a/vendor/github.com/go-openapi/spec/README.md +++ b/vendor/github.com/go-openapi/spec/README.md @@ -1,5 +1,8 @@ -# OpenAPI v2 object model [![Build Status](https://github.com/go-openapi/spec/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/spec/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/spec/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/spec) +# OAI object model +[![Build Status](https://travis-ci.org/go-openapi/spec.svg?branch=master)](https://travis-ci.org/go-openapi/spec) + +[![codecov](https://codecov.io/gh/go-openapi/spec/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/spec) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) [![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/spec/master/LICENSE) [![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/spec.svg)](https://pkg.go.dev/github.com/go-openapi/spec) @@ -29,26 +32,3 @@ The object model for OpenAPI specification documents. > This [discussion thread](https://github.com/go-openapi/spec/issues/21) relates the full story. > > An early attempt to support Swagger 3 may be found at: https://github.com/go-openapi/spec3 - -* Does the unmarshaling support YAML? - -> Not directly. The exposed types know only how to unmarshal from JSON. -> -> In order to load a YAML document as a Swagger spec, you need to use the loaders provided by -> github.com/go-openapi/loads -> -> Take a look at the example there: https://pkg.go.dev/github.com/go-openapi/loads#example-Spec -> -> See also https://github.com/go-openapi/spec/issues/164 - -* How can I validate a spec? - -> Validation is provided by [the validate package](http://github.com/go-openapi/validate) - -* Why do we have an `ID` field for `Schema` which is not part of the swagger spec? - -> We found jsonschema compatibility more important: since `id` in jsonschema influences -> how `$ref` are resolved. -> This `id` does not conflict with any property named `id`. -> -> See also https://github.com/go-openapi/spec/issues/23 diff --git a/vendor/github.com/go-openapi/spec/appveyor.yml b/vendor/github.com/go-openapi/spec/appveyor.yml new file mode 100644 index 00000000..09035939 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/appveyor.yml @@ -0,0 +1,32 @@ +version: "0.1.{build}" + +clone_folder: C:\go-openapi\spec +shallow_clone: true # for startup speed +pull_requests: + do_not_increment_build_number: true + +#skip_tags: true +#skip_branch_with_pr: true + +# appveyor.yml +build: off + +environment: + GOPATH: c:\gopath + +stack: go 1.15 + +test_script: + - go test -v -timeout 20m ./... + +deploy: off + +notifications: + - provider: Slack + incoming_webhook: https://hooks.slack.com/services/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 + auth_token: + secure: Sf7kZf7ZGbnwWUMpffHwMu5A0cHkLK2MYY32LNTPj4+/3qC3Ghl7+9v4TSLOqOlCwdRNjOGblAq7s+GDJed6/xgRQl1JtCi1klzZNrYX4q01pgTPvvGcwbBkIYgeMaPeIRcK9OZnud7sRXdttozgTOpytps2U6Js32ip7uj5mHSg2ub0FwoSJwlS6dbezZ8+eDhoha0F/guY99BEwx8Bd+zROrT2TFGsSGOFGN6wFc7moCqTHO/YkWib13a2QNXqOxCCVBy/lt76Wp+JkeFppjHlzs/2lP3EAk13RIUAaesdEUHvIHrzCyNJEd3/+KO2DzsWOYfpktd+KBCvgaYOsoo7ubdT3IROeAegZdCgo/6xgCEsmFc9ZcqCfN5yNx2A+BZ2Vwmpws+bQ1E1+B5HDzzaiLcYfG4X2O210QVGVDLWsv1jqD+uPYeHY2WRfh5ZsIUFvaqgUEnwHwrK44/8REAhQavt1QAj5uJpsRd7CkRVPWRNK+yIky+wgbVUFEchRNmS55E7QWf+W4+4QZkQi7vUTMc9nbTUu2Es9NfvfudOpM2wZbn98fjpb/qq/nRv6Bk+ca+7XD5/IgNLMbWp2ouDdzbiHLCOfDUiHiDJhLfFZx9Bwo7ZwfzeOlbrQX66bx7xRKYmOe4DLrXhNcpbsMa8qbfxlZRCmYbubB/Y8h4= + channel: bots + on_build_success: false + on_build_failure: true + on_build_status_changed: true diff --git a/vendor/github.com/go-openapi/spec/bindata.go b/vendor/github.com/go-openapi/spec/bindata.go new file mode 100644 index 00000000..afc83850 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/bindata.go @@ -0,0 +1,297 @@ +// Code generated by go-bindata. DO NOT EDIT. +// sources: +// schemas/jsonschema-draft-04.json (4.357kB) +// schemas/v2/schema.json (40.248kB) + +package spec + +import ( + "bytes" + "compress/gzip" + "crypto/sha256" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" +) + +func bindataRead(data []byte, name string) ([]byte, error) { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err != nil { + return nil, fmt.Errorf("read %q: %v", name, err) + } + + var buf bytes.Buffer + _, err = io.Copy(&buf, gz) + clErr := gz.Close() + + if err != nil { + return nil, fmt.Errorf("read %q: %v", name, err) + } + if clErr != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +type asset struct { + bytes []byte + info os.FileInfo + digest [sha256.Size]byte +} + +type bindataFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +func (fi bindataFileInfo) Name() string { + return fi.name +} +func (fi bindataFileInfo) Size() int64 { + return fi.size +} +func (fi bindataFileInfo) Mode() os.FileMode { + return fi.mode +} +func (fi bindataFileInfo) ModTime() time.Time { + return fi.modTime +} +func (fi bindataFileInfo) IsDir() bool { + return false +} +func (fi bindataFileInfo) Sys() interface{} { + return nil +} + +var _jsonschemaDraft04Json = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xc4\x57\x3d\x6f\xdb\x3c\x10\xde\xf3\x2b\x08\x26\x63\xf2\x2a\x2f\xd0\xc9\x5b\xd1\x2e\x01\x5a\x34\x43\x37\x23\x03\x6d\x9d\x6c\x06\x14\xa9\x50\x54\x60\xc3\xd0\x7f\x2f\x28\x4a\x14\x29\x91\x92\x2d\xa7\x8d\x97\x28\xbc\xaf\xe7\x8e\xf7\xc5\xd3\x0d\x42\x08\x61\x9a\xe2\x15\xc2\x7b\xa5\x8a\x55\x92\xbc\x96\x82\x3f\x94\xdb\x3d\xe4\xe4\x3f\x21\x77\x49\x2a\x49\xa6\x1e\x1e\xbf\x24\xe6\xec\x16\xdf\x1b\xa1\x3b\xf3\xff\x02\xc9\x14\xca\xad\xa4\x85\xa2\x82\x6b\xe9\x6f\x42\x02\x32\x2c\x28\x07\x45\x5a\x15\x3d\x77\x46\x39\xd5\xcc\x25\x5e\x21\x83\xb8\x21\x18\xb6\xaf\x52\x92\xa3\x47\x68\x88\xea\x58\x80\x56\x4e\x1a\xf2\xbd\x4f\xcc\x29\x7f\x52\x90\x6b\x7d\xff\x0f\x48\xb4\x3d\x3f\x21\x7c\x27\x21\xd3\x2a\x6e\x31\xaa\x2d\x53\xdd\xf3\xe3\x42\x94\x54\xd1\x77\x78\xe2\x0a\x76\x20\xe3\x20\x68\xcb\x30\x86\x41\xf3\x2a\xc7\x2b\xf4\x78\x8e\xfe\xef\x90\x91\x8a\xa9\xc7\xb1\x1d\xc2\xd8\x2f\x0d\x75\xed\xc1\x4e\x9c\xc8\x25\x43\xac\xa8\xbe\xd7\xcc\xa9\xd1\xa9\x21\xa0\x1a\xbd\x04\x61\x94\x34\x2f\x18\xfc\x3e\x16\x50\x8e\x4d\x03\x6f\x1c\x58\xdb\x48\x23\xbc\x11\x82\x01\xe1\xfa\xd3\x3a\x8e\x30\xaf\x18\x33\x7f\xf3\x8d\x39\x11\x9b\x57\xd8\x2a\xfd\x55\x2a\x49\xf9\x0e\xc7\xec\x37\xd4\x25\xf7\xec\x5c\x66\xc7\xd7\x99\xaa\xcf\x4f\x89\x8a\xd3\xb7\x0a\x3a\xaa\x92\x15\xf4\x30\x6f\x1c\xb0\xd6\x46\xe7\x98\x39\x2d\xa4\x28\x40\x2a\x3a\x88\x9e\x29\xba\x88\x37\x2d\xca\x60\x38\xfa\xba\x5b\x20\xac\xa8\x62\xb0\x4c\xd4\xaf\xda\x45\x0a\xba\x5c\x3b\xb9\xc7\x79\xc5\x14\x2d\x18\x34\x19\x1c\x51\xdb\x25\x4d\xb4\x7e\x06\x14\x38\x6c\x59\x55\xd2\x77\xf8\x69\x59\xfc\x7b\x73\xed\x93\x43\xcb\x32\x6d\x3c\x28\xdc\x1b\x9a\xd3\x62\xab\xc2\x27\xf7\x41\xc9\x08\x2b\x23\x08\xad\x13\x57\x21\x9c\xd3\x72\x0d\x42\x72\xf8\x01\x7c\xa7\xf6\x83\xce\x39\xd7\x82\x3c\x1f\x2f\xd6\x60\x1b\xa2\xdf\x35\x89\x52\x20\xe7\x73\x74\xe0\x66\x26\x64\x4e\xb4\x97\x58\xc2\x0e\x0e\xe1\x60\x92\x34\x6d\xa0\x10\xd6\xb5\x83\x61\x27\xe6\x47\xd3\x89\xbd\x63\xfd\x3b\x8d\x03\x3d\x6c\x42\x2d\x5b\x70\xee\xe8\xdf\x4b\xf4\x66\x4e\xe1\x01\x45\x17\x80\x74\xad\x4f\xc3\xf3\xae\xc6\x1d\xc6\xd7\xc2\xce\xc9\xe1\x29\x30\x86\x2f\x4a\xa6\x4b\x15\x84\x73\xc9\x6f\xfd\x7f\xa5\x6e\x9e\xbd\xf1\xb0\xd4\xdd\x45\x5a\xc2\x3e\x4b\x78\xab\xa8\x84\x74\x4a\x91\x3b\x92\x23\x05\xf2\x1c\x1e\x7b\xf3\x09\xf8\xcf\xab\x24\xb6\x60\xa2\xe8\x4c\x9f\x75\x77\xaa\x8c\xe6\x01\x45\x36\x86\xcf\xc3\x63\x3a\xea\xd4\x8d\x7e\x06\xac\x14\x0a\xe0\x29\xf0\xed\x07\x22\x1a\x65\xda\x44\xae\xa2\x73\x1a\xe6\x90\x69\xa2\x8c\x46\xb2\x2f\xde\x49\x38\x08\xed\xfe\xfd\x41\xaf\x9f\xa9\x55\xd7\xdd\x22\x8d\xfa\x45\x63\xc5\x0f\x80\xf3\xb4\x08\xd6\x79\x30\x9e\x93\xee\x59\xa6\xd0\x4b\xee\x22\xe3\x33\xc1\x3a\x27\x68\x36\x78\x7e\x87\x0a\x06\xd5\x2e\x20\xd3\xaf\x15\xfb\xd8\x3b\x73\x14\xbb\x92\xed\x05\x5d\x2e\x29\x38\x2c\x94\xe4\x42\x45\x5e\xd3\xb5\x7d\xdf\x47\xca\x38\xb4\x5c\xaf\xfb\x7d\xdd\x6d\xf4\xa1\x2d\x77\xdd\x2f\xce\x6d\xc4\x7b\x8b\x4e\x67\xa9\x6f\xfe\x04\x00\x00\xff\xff\xb1\xd1\x27\x78\x05\x11\x00\x00") + +func jsonschemaDraft04JsonBytes() ([]byte, error) { + return bindataRead( + _jsonschemaDraft04Json, + "jsonschema-draft-04.json", + ) +} + +func jsonschemaDraft04Json() (*asset, error) { + bytes, err := jsonschemaDraft04JsonBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "jsonschema-draft-04.json", size: 4357, mode: os.FileMode(0640), modTime: time.Unix(1568963823, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe1, 0x48, 0x9d, 0xb, 0x47, 0x55, 0xf0, 0x27, 0x93, 0x30, 0x25, 0x91, 0xd3, 0xfc, 0xb8, 0xf0, 0x7b, 0x68, 0x93, 0xa8, 0x2a, 0x94, 0xf2, 0x48, 0x95, 0xf8, 0xe4, 0xed, 0xf1, 0x1b, 0x82, 0xe2}} + return a, nil +} + +var _v2SchemaJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x5d\x4f\x93\xdb\x36\xb2\xbf\xfb\x53\xa0\x14\x57\xd9\xae\xd8\x92\xe3\xf7\x2e\xcf\x97\xd4\xbc\xd8\x49\x66\x37\x5e\x4f\x79\x26\xbb\x87\x78\x5c\x05\x91\x2d\x09\x09\x09\x30\x00\x38\x33\x5a\xef\x7c\xf7\x2d\xf0\x9f\x08\x02\x20\x41\x8a\xd2\xc8\x0e\x0f\xa9\x78\x28\xa0\xd1\xdd\x68\x34\x7e\xdd\xf8\xf7\xf9\x11\x42\x33\x49\x64\x04\xb3\xd7\x68\x76\x86\xfe\x76\xf9\xfe\x1f\xe8\x32\xd8\x40\x8c\xd1\x8a\x71\x74\x79\x8b\xd7\x6b\xe0\xe8\xd5\xfc\x25\x3a\xbb\x38\x9f\xcf\x9e\xab\x0a\x24\x54\xa5\x37\x52\x26\xaf\x17\x0b\x91\x17\x99\x13\xb6\xb8\x79\xb5\x10\x59\xdd\xf9\xef\x82\xd1\x6f\xf2\xc2\x8f\xf3\x4f\xb5\x1a\xea\xc7\x17\x45\x41\xc6\xd7\x8b\x90\xe3\x95\x7c\xf1\xf2\x7f\x8b\xca\x45\x3d\xb9\x4d\x32\xa6\xd8\xf2\x77\x08\x64\xfe\x8d\xc3\x9f\x29\xe1\xa0\x9a\xff\xed\x11\x42\x08\xcd\x8a\xd6\xb3\x9f\x15\x67\x74\xc5\xca\x7f\x27\x58\x6e\xc4\xec\x11\x42\xd7\x59\x5d\x1c\x86\x44\x12\x46\x71\x74\xc1\x59\x02\x5c\x12\x10\xb3\xd7\x68\x85\x23\x01\x59\x81\x04\x4b\x09\x9c\x6a\xbf\x7e\xce\x49\x7d\xba\x7b\x51\xfd\xa1\x44\xe2\xb0\x52\xac\x7d\xb3\x08\x61\x45\x68\x46\x56\x2c\x6e\x80\x86\x8c\xbf\xbd\x93\x40\x05\x61\x74\x96\x95\xbe\x7f\x84\xd0\x7d\x4e\xde\x42\xb7\xe4\xbe\x46\xbb\x14\x5b\x48\x4e\xe8\xba\x90\x05\xa1\x19\xd0\x34\xae\xc4\xce\xbe\xbc\x9a\xbf\x9c\x15\x7f\x5d\x57\xc5\x42\x10\x01\x27\x89\xe2\x48\x51\xb9\xda\x40\xd5\x87\x37\xc0\x15\x5f\x88\xad\x90\xdc\x10\x81\x42\x16\xa4\x31\x50\x39\x2f\x38\xad\xab\xb0\x53\xd8\xac\x94\x56\x6f\xc3\x84\xf4\x11\xa4\x50\xb3\xfa\xe9\xd3\x6f\x9f\x3e\xdf\x2f\xd0\xeb\x8f\x1f\x3f\x7e\xbc\xfe\xf6\xe9\xf7\xaf\x5f\x7f\xfc\x18\x7e\xfb\xec\xfb\xc7\xb3\x36\x79\x54\x43\xe8\x29\xc5\x31\x20\xc6\x11\x49\x9e\xe5\x12\x41\x66\xa0\xe8\xed\x1d\x8e\x93\x08\x5e\xa3\x27\x3b\xc3\x7c\xa2\x73\xba\xc4\x02\x2e\xb0\xdc\xf4\xe5\x76\xd1\xca\x96\xa2\x8a\x94\xcd\x21\xc9\x6c\xec\x2c\x70\x42\x9e\x34\x74\x9d\x19\x7c\xcd\x20\x9c\xea\x2e\x0a\xfe\x42\x84\xd4\x29\x04\x8c\x8a\xb4\x41\xa2\xc1\xdc\x19\x8a\x88\x90\x4a\x49\xef\xce\xdf\xbd\x45\x4a\x52\x81\x70\x10\x40\x22\x21\x44\xcb\x6d\xc5\xec\x4e\x3c\x1c\x45\xef\x57\x9a\xb5\x7d\xae\xfe\xe5\xe4\x31\x86\x90\xe0\xab\x6d\x02\x3b\x2e\xcb\x11\x90\xd9\xa8\xc6\x77\xc2\x59\x98\x06\xfd\xf9\x2e\x78\x45\x01\xa6\xa8\xa0\x71\x5c\xbe\x33\xa7\xd2\xd9\x5f\x95\xef\xd9\xd5\xac\xfd\xdc\x5d\xbf\x5e\xb8\xd1\x3e\xc7\x31\x48\xe0\x5e\x4c\x14\x65\xdf\xb8\xa8\x71\x10\x09\xa3\xc2\xc7\x02\xcb\xa2\x4e\x5a\x02\x82\x94\x13\xb9\xf5\x30\xe6\xb2\xa4\xb5\xfe\x9b\x3e\x7a\xb2\x55\xd2\xa8\x4a\xbc\x16\xb6\x71\x8e\x39\xc7\xdb\x9d\xe1\x10\x09\x71\xbd\x9c\xb3\x41\x89\xd7\xa5\x89\xdc\x57\xb5\x53\x4a\xfe\x4c\xe1\xbc\xa0\x21\x79\x0a\x1a\x0f\x70\xa7\x5c\x08\x8e\xde\xb0\xc0\x43\x24\xad\x74\x63\x0e\xb1\xd9\x90\xe1\xb0\x2d\x13\xa7\x6d\x78\xfd\x04\x14\x38\x8e\x90\xaa\xce\x63\xac\x3e\x23\xbc\x64\xa9\xb4\xf8\x03\x63\xde\xcd\xbe\x16\x13\x4a\x55\xac\x82\x12\xc6\xac\xd4\x35\xf7\x22\xd4\x3a\xff\x22\x73\x0e\x6e\x51\xa0\x75\x1e\xae\x8f\xe8\x5d\xc7\x59\xe6\xe4\x9a\x18\x8d\xd6\x1c\x53\x84\x4d\xb7\x67\x28\x37\x09\x84\x69\x88\x12\x0e\x01\x11\x80\x32\xa2\xf5\xb9\xaa\xc6\xd9\x73\x53\xab\xfb\xb4\x2e\x20\xc6\x54\x92\xa0\x9a\xf3\x69\x1a\x2f\x81\x77\x37\xae\x53\x1a\xce\x40\xc4\xa8\x82\x1c\xb5\xef\xda\x24\x7d\xb9\x61\x69\x14\xa2\x25\xa0\x90\xac\x56\xc0\x81\x4a\xb4\xe2\x2c\xce\x4a\x64\x7a\x9a\x23\xf4\x13\x91\x3f\xa7\x4b\xf4\x63\x84\x6f\x18\x87\x10\xbd\xc3\xfc\x8f\x90\xdd\x52\x44\x04\xc2\x51\xc4\x6e\x21\x74\x48\x21\x81\xc7\xe2\xfd\xea\x12\xf8\x0d\x09\xf6\xe9\x47\x35\xaf\x67\xc4\x14\xf7\x22\x27\x97\xe1\xe2\x76\x2d\x06\x8c\x4a\x1c\x48\x3f\x73\x2d\x0b\x5b\x29\x45\x24\x00\x2a\x0c\x11\xec\x94\xca\xc2\xa6\xc1\x37\x21\x43\x83\x3b\x5f\x97\xf1\x43\x5e\x53\x73\x19\xa5\x36\xd8\x2d\x05\x2e\x34\x0b\xeb\x39\xfc\x1d\x63\x51\x01\xbd\x3d\xbb\x90\x84\x40\x25\x59\x6d\x09\x5d\xa3\x1c\x37\xe6\x5c\x16\x9a\x40\x09\x70\xc1\xe8\x82\xf1\x35\xa6\xe4\xdf\x99\x5c\x8e\x9e\x4d\x79\xb4\x27\x2f\xbf\x7e\xf8\x05\x25\x8c\x50\xa9\x98\x29\x90\x62\x60\xea\x75\xae\x13\xca\xbf\x2b\x1a\x29\x27\x76\xd6\x20\xc6\x64\x5f\xe6\x32\x1a\x08\x87\x21\x07\x21\xbc\xb4\xe4\xe0\x32\x67\xa6\xcd\xf3\x1e\xcd\xd9\x6b\xb6\x6f\x8e\x27\xa7\xed\xdb\xe7\xbc\xcc\x1a\x07\xce\x6f\x87\x33\xf0\xba\x51\x17\x22\x66\x78\x79\x8e\xce\xe5\x13\x81\x80\x06\x2c\xe5\x78\x0d\xa1\xb2\xb8\x54\xa8\x79\x09\xbd\xbf\x3c\x47\x01\x8b\x13\x2c\xc9\x32\xaa\xaa\x1d\xd5\xee\xab\x36\xbd\x6c\xfd\x54\x6c\xc8\x08\x01\x3c\xbd\xe7\x07\x88\xb0\x24\x37\x79\x90\x28\x4a\x1d\x10\x1a\x92\x1b\x12\xa6\x38\x42\x40\xc3\x4c\x43\x62\x8e\xae\x36\xb0\x45\x71\x2a\xa4\x9a\x23\x79\x59\xb1\xa8\xf2\xa4\x0c\x60\x9f\xcc\x8d\x40\xf5\x80\xca\xa8\x99\xc3\xa7\x85\x1f\x31\x25\xa9\x82\xc5\x6d\xbd\xd8\x36\x76\x7c\x02\x28\x97\xf6\x1d\x74\x3b\x11\x7e\x91\xae\x32\xf8\x6c\xf4\xe6\x7b\x9a\xa5\x1f\x62\xc6\x21\xcf\x9a\xe5\xed\x8b\x02\xf3\x2c\x33\x33\xdf\x00\xca\xc9\x09\xb4\x04\xf5\xa5\x08\xd7\xc3\x02\x18\x66\xf1\xab\x1e\x83\x37\x4c\xcd\x12\xc1\x1d\x50\xf6\xaa\xbd\xfe\xe2\x73\x48\x38\x08\xa0\x32\x9b\x18\x44\x86\x0b\x6a\xc1\xaa\x26\x96\x2d\x96\x3c\xa0\x54\x65\x73\xe3\x08\xb5\x8b\x99\xbd\x82\xbc\x9e\xc2\xe8\x53\x46\x83\x3f\x33\x54\x2b\x5b\xad\x92\x79\xd9\x8f\x5d\x93\x98\xf2\xe6\xc6\x1c\xe6\x9a\x9e\xfc\x43\x82\x31\x66\x8e\x53\x77\xfe\x90\xe7\xf3\xf6\xe9\x62\x23\x3f\x10\x93\x18\xae\x72\x1a\x9d\xf9\x48\xcb\xcc\x5a\x65\xc7\x4a\x04\xf0\xf3\xd5\xd5\x05\x8a\x41\x08\xbc\x86\x86\x43\x51\x6c\xe0\x46\x57\xf6\x44\x40\x0d\xfb\xff\xa2\xc3\x7c\x3d\x39\x84\xdc\x09\x22\x64\x4f\x12\xd9\xba\xaa\xf6\xe3\xbd\x56\xdd\x91\x25\x6a\x14\x9c\x89\x34\x8e\x31\xdf\xee\x15\x7e\x2f\x39\x81\x15\x2a\x28\x95\x66\x51\xf5\xfd\x83\xc5\xfe\x15\x07\xcf\xf7\x08\xee\x1d\x8e\xb6\xc5\x52\xcc\x8c\x5a\x93\x66\xc5\xd8\x79\x38\x46\xd6\xa7\x88\x37\xc9\x2e\xe3\xd2\xa5\x7b\x4b\x3a\xdc\xa1\xdc\x9e\x29\xf1\x8c\x8a\x99\x16\x47\x8d\xd4\x78\x8b\xf6\x1c\xe9\x71\x54\x1b\x69\xa8\x4a\x93\x37\xe5\xb2\x2c\x4f\x0c\x92\xab\xa0\x73\x32\x72\x59\xd3\xf0\x2d\x8d\xed\xca\x37\x16\x19\x9e\xdb\x1c\xab\x17\x49\xc3\x0f\x37\xdc\x88\xb1\xb4\xd4\x42\xcb\x58\x5e\x6a\x52\x0b\x15\x10\x0a\xb0\x04\xe7\xf8\x58\x32\x16\x01\xa6\xcd\x01\xb2\xc2\x69\x24\x35\x38\x6f\x30\x6a\xae\x1b\xb4\x71\xaa\xad\x1d\xa0\xd6\x20\x2d\x8b\x3c\xc6\x82\x62\x27\x34\x6d\x15\x84\x7b\x43\xb1\x35\x78\xa6\x24\x77\x28\xc1\x6e\xfc\xe9\x48\x74\xf4\x15\xe3\xe1\x84\x42\x88\x40\x7a\x26\x49\x3b\x48\xb1\xa4\x19\x8e\x0c\xa7\xb5\x01\x6c\x0c\x97\x61\x8a\xc2\x32\xd8\x8c\x44\x69\x24\xbf\x65\x1d\x74\xd6\xe5\x44\xef\xec\x48\x5e\xb7\x8a\xa3\x29\x8e\x41\x64\xce\x1f\x88\xdc\x00\x47\x4b\x40\x98\x6e\xd1\x0d\x8e\x48\x98\x63\x5c\x21\xb1\x4c\x05\x0a\x58\x98\xc5\x6d\x4f\x0a\x77\x53\x4f\x8b\xc4\x44\x1f\xb2\xdf\x8d\x3b\xea\x9f\xfe\xf6\xf2\xc5\xff\x5d\x7f\xfe\x9f\xfb\x67\x8f\xff\xf3\xe9\x69\xd1\xfe\xb3\xc7\xfd\x3c\xf8\x3f\x71\x94\x82\x23\xd1\x72\x00\xb7\x42\x99\x6c\xc0\x60\x7b\x0f\x79\xea\xa8\x53\x4b\x56\x31\xfa\x0b\x52\x9f\x96\xdb\xcd\x2f\xd7\x67\xcd\x04\x19\x85\xfe\xdb\x02\x9a\x59\x03\xad\x63\x3c\xea\xff\x2e\x18\xfd\x00\xd9\xe2\x56\x60\x59\x93\xb9\xb6\xb2\x3e\x3c\x2c\xab\x0f\xa7\xb2\x89\x43\xc7\xf6\xd5\xce\x2e\xad\xa6\xa9\xed\xa6\xc6\x5a\xb4\xa6\x67\xdf\x8c\x26\x7b\x50\x5a\x91\x08\x2e\x6d\xd4\x3a\xc1\x9d\xf2\xdb\xde\x1e\xb2\x2c\x6c\xa5\x64\xc9\x16\xb4\x90\xaa\x4a\xb7\x0c\xde\x13\xc3\x2a\x9a\x11\x9b\x7a\x1b\x3d\x95\x97\x37\x31\x6b\x69\x7e\x34\xc0\x67\x1f\x66\x19\x49\xef\xf1\x25\xf5\xac\x0e\xea\x0a\x28\x8d\x4d\x7e\xd9\x57\x4b\x49\xe5\xc6\xb3\x25\xfd\xe6\x57\x42\x25\xac\xcd\xcf\x36\x74\x8e\xca\x24\x47\xe7\x80\xa8\x92\x72\xbd\x3d\x84\x2d\x65\xe2\x82\x1a\x9c\xc4\x44\x92\x1b\x10\x79\x8a\xc4\x4a\x2f\x60\x51\x04\x81\xaa\xf0\xa3\x95\x27\xd7\x12\x7b\xa3\x96\x03\x45\x96\xc1\x8a\x07\xc9\xb2\xb0\x95\x52\x8c\xef\x48\x9c\xc6\x7e\x94\xca\xc2\x0e\x07\x12\x44\xa9\x20\x37\xf0\xae\x0f\x49\xa3\x96\x9d\x4b\x42\x7b\x70\x59\x14\xee\xe0\xb2\x0f\x49\xa3\x96\x4b\x97\xbf\x00\x5d\x4b\x4f\xfc\xbb\x2b\xee\x92\xb9\x17\xb5\xaa\xb8\x0b\x97\x17\x9b\x43\xfd\xd6\xc2\xb2\xc2\x2e\x29\xcf\xfd\x87\x4a\x55\xda\x25\x63\x1f\x5a\x65\x69\x2b\x2d\x3d\x67\xe9\x41\xae\x5e\xc1\x6e\x2b\xd4\xdb\x3e\xa8\xd3\x26\xd2\x48\x92\x24\xca\x61\x86\x8f\x8c\xbb\xf2\x8e\x91\xdf\x1f\x06\x19\x33\xf3\x03\x4d\xba\xcd\xe2\x2d\xfb\x69\xe9\x16\x15\x13\xd5\x56\x85\x4e\x3c\x5b\x8a\xbf\x25\x72\x83\xee\x5e\x20\x22\xf2\xc8\xaa\x7b\xdb\x8e\xe4\x29\x58\xca\x38\xb7\x3f\x2e\x59\xb8\xbd\xa8\x16\x16\xf7\xdb\x79\x51\x9f\x5a\xb4\x8d\x87\x3a\x6e\xbc\x3e\xc5\xb4\xcd\x58\xf9\xf5\x3c\xb9\x6f\x49\xaf\x57\xc1\xfa\x1c\x5d\x6d\x88\x8a\x8b\xd3\x28\xcc\xb7\xef\x10\x8a\x4a\x74\xa9\x4a\xa7\x62\xbf\x0d\x76\x23\x6f\x59\xd9\x31\xee\x40\x11\xfb\x28\xec\x8d\x22\x1c\x13\x5a\x64\x94\x23\x16\x60\xbb\xd2\x7c\xa0\x98\xb2\xe5\x6e\xbc\x54\x33\xe0\x3e\xb9\x52\x17\xdb\xb7\x1b\xc8\x12\x20\x8c\x23\xca\x64\x7e\x78\xa3\x62\x5b\x75\x56\xd9\x9e\x2a\x91\x27\xb0\x70\x34\x1f\x90\x89\xb5\x86\x73\x7e\x71\xda\x1e\xfb\x3a\x72\xdc\x5e\x79\x88\xcb\x74\x79\xd9\x64\xe4\xd4\xc2\x9e\xce\xb1\xfe\x85\x5a\xc0\xe9\x0c\x34\x3d\xd0\x43\xce\xa1\x36\x39\xd5\xa1\x4e\xf5\xf8\xb1\xa9\x23\x08\x75\x84\xac\x53\x6c\x3a\xc5\xa6\x53\x6c\x3a\xc5\xa6\x7f\xc5\xd8\xf4\x51\xfd\xff\x25\x4e\xfa\x33\x05\xbe\x9d\x60\xd2\x04\x93\x6a\x5f\x33\x9b\x98\x50\xd2\xe1\x50\x52\xc6\xcc\xdb\x38\x91\xdb\xe6\xaa\xa2\x8f\xa1\x6a\xa6\xd4\xc6\x56\xd6\x8c\x40\x02\x68\x48\xe8\x1a\xe1\x9a\xd9\x2e\xb7\x05\xc3\x34\xda\x2a\xbb\xcd\x12\x36\x98\x22\x50\x4c\xa1\x1b\xc5\xd5\x84\xf0\xbe\x24\x84\xf7\x2f\x22\x37\xef\x94\xd7\x9f\xa0\xde\x04\xf5\x26\xa8\x37\x41\x3d\x64\x40\x3d\xe5\xf2\xde\x60\x89\x27\xb4\x37\xa1\xbd\xda\xd7\xd2\x2c\x26\xc0\x37\x01\x3e\x1b\xef\x5f\x06\xe0\x6b\x7c\x5c\x91\x08\x26\x10\x38\x81\xc0\x09\x04\x76\x4a\x3d\x81\xc0\xbf\x12\x08\x4c\xb0\xdc\x7c\x99\x00\xd0\x75\x70\xb4\xf8\x5a\x7c\xea\xde\x3e\x39\x08\x30\x5a\x27\x35\xed\xb4\x65\xad\x69\x74\x10\x88\x79\xe2\x30\x52\x19\xd6\x04\x21\xa7\x95\xd5\x0e\x03\xf8\xda\x20\xd7\x84\xb4\x26\xa4\x35\x21\xad\x09\x69\x21\x03\x69\x51\x46\xff\xff\x18\x9b\x54\xed\x87\x47\x06\x9d\x4e\x73\x6e\x9a\xb3\xa9\xce\x83\x5e\x4b\xc6\x71\x20\x45\xd7\x72\xf5\x40\x72\x0e\x34\x6c\xf4\x6c\xf3\xba\x5e\x4b\x97\x0e\x52\xb8\xbe\x8b\x79\xa0\x10\x86\xa1\x75\xb0\x6f\xec\xc8\xf4\x3d\x4d\x7b\x86\xc2\x02\x31\x12\x51\xbf\x07\x94\xad\x10\xd6\x2e\x79\xcf\xe9\x1c\xf5\x1e\x31\x23\x5c\x18\xfb\x9c\xfb\x70\xe0\x62\xbd\xf7\xb5\x94\xcf\xf3\xf6\xfa\xc5\x4e\x9c\x85\x76\x1d\xae\x37\xbc\xde\xa3\x41\xcb\x29\xd0\x5e\x70\x67\x50\x93\x6d\x98\xa8\xd3\x67\x0f\x68\xb1\xeb\x38\x47\x07\x10\x1b\xd2\xe2\x18\x68\x6d\x40\xbb\xa3\x40\xba\x21\xf2\x8e\x81\xfb\xf6\x92\x77\x2f\x70\xe8\xdb\xb2\x36\xbf\x30\x91\xc5\x21\xe7\x45\xcc\x34\x0c\x48\x8e\xd0\xf2\x9b\x7c\x3c\xbd\x1c\x04\x3e\x07\xe8\x7c\x2f\x84\x7a\x48\x4d\x1f\xba\xe1\x76\x45\x7b\x60\xe0\x01\xca\xee\x04\xca\x31\xbe\x73\x5f\xa3\x70\x0c\xad\x1f\xa5\xf5\x76\xd5\xbb\xd2\x7e\xfb\x30\x90\xcf\xfa\x67\x7a\xe6\xc3\x37\x42\x19\xe2\xc9\x9c\x61\x4c\xe7\xd1\x77\x55\x86\x6e\x8f\x7b\x85\x42\x33\xa3\xaa\x57\xae\xfd\xd5\xcc\x9c\x56\x68\xe2\xde\x0e\xa8\x2c\xa9\xb0\x7d\xf0\x54\x2d\x80\xf2\x48\x39\x3d\x98\x1a\x6d\x0b\x9d\xba\x53\xfb\xce\xf8\xd1\x7e\xbb\x60\x4f\x06\xf5\xce\xda\xab\xeb\xca\xcb\xd5\xac\x20\xda\x72\x3b\xa2\x4b\x38\xd7\xb5\x89\xbe\x42\xd9\xb9\x73\xc4\x0c\x6d\xb7\xd9\xf8\x8d\xbd\x3e\x9c\xf5\x53\x68\x48\x14\x36\x8f\x09\xc5\x92\xf1\x21\xd1\x09\x07\x1c\xbe\xa7\x91\xf3\x6a\xc8\xc1\x57\xb0\xdd\xc5\xc6\x1d\xad\x76\x1d\xa8\x82\x0e\x4c\x38\xfe\xa5\x8c\xc5\x0a\x40\x5d\xa1\xbb\x98\xd1\xfb\x74\x61\xed\x1a\x98\xaf\x3c\x8c\x1e\xe3\xc2\x92\x29\x74\x3e\x99\xd0\xf9\x41\x50\xd0\x38\x4b\x57\x7e\x5b\x7a\x0e\xe6\xce\x4e\xd7\x19\x35\x57\xbb\x3c\x3c\xd2\x5e\x4f\x4b\x4c\xf7\x0f\x4d\x2b\x91\x5d\x94\xa6\x95\xc8\x69\x25\x72\x5a\x89\x7c\xb8\x95\xc8\x07\x80\x8c\xda\x9c\x64\x7b\xb7\x71\xdf\x57\x12\x4b\x9a\x1f\x72\x0c\x13\x03\xad\x3c\xd5\x4e\xde\x8e\x57\x13\x6d\x34\x86\xcf\x97\xe6\xa4\x68\xc4\xb0\xf6\xc9\xc2\xeb\x8d\x0b\xd7\xcd\xfe\xba\xa6\xf5\x30\xeb\x30\x33\xbe\xc7\x56\x27\xab\x08\xd9\x6d\xbb\x09\xee\x7c\x2d\xcf\xee\x87\x38\xac\xc8\xdd\x90\x9a\x58\x4a\x4e\x96\xa9\x79\x79\xf3\xde\x20\xf0\x96\xe3\x24\x19\xeb\xba\xf2\x53\x19\xab\x12\xaf\x47\xb3\xa0\x3e\xef\x9b\x8d\x6d\x6d\x7b\xde\x3b\x3b\x1a\xc0\x3f\x95\x7e\xed\x78\xfb\x76\xb8\xaf\xb3\xdd\xc5\xeb\x95\xed\x5a\x62\x41\x82\xb3\x54\x6e\x80\x4a\x92\x6f\x36\xbd\x34\xae\xde\x6f\xa4\xc0\xbc\x08\xe3\x84\xfc\x1d\xb6\xe3\xd0\x62\x38\x95\x9b\x57\xe7\x71\x12\x91\x80\xc8\x31\x69\x5e\x60\x21\x6e\x19\x0f\xc7\xa4\x79\x96\x28\x3e\x47\x54\x65\x41\x36\x08\x40\x88\x1f\x58\x08\x56\xaa\xd5\xbf\xaf\xad\x96\xd7\xd6\xcf\x87\xf5\x34\x0f\x71\x93\x6e\x26\xed\x98\x5b\x9f\x4f\xcf\x95\x34\xc6\xd7\x11\xfa\xb0\x81\x22\x1a\xdb\xdf\x8e\xdc\xc3\xb9\xf8\xdd\x5d\x3c\x74\xe6\xea\xb7\x8b\xbf\xf5\x6e\xb3\x46\x2e\x64\xf4\xab\x3c\x4e\xcf\x36\x1d\xfe\xfa\xb8\x36\xba\x8a\xd8\xad\xf6\xc6\x41\x2a\x37\x8c\x17\x0f\xda\xfe\xda\xe7\x65\xbc\x71\x2c\x36\x57\x8a\x47\x12\x4c\xf1\xbd\x77\x6b\xa4\x50\x7e\x77\x7b\x22\x60\x89\xef\xcd\xf5\xb9\x0c\x97\x79\x0d\x2b\x35\x43\xcb\x3d\x24\xf1\x78\xfc\xf8\xcb\x1f\x15\x06\xe2\x78\xd8\x51\x21\xd9\x1f\xf0\xf5\x8f\x86\xa4\x50\xfa\xb1\x47\x43\xa5\xdd\x69\x14\xe8\xa3\xc0\x86\x91\xa7\x81\x50\xb4\x7c\xc0\x81\x80\x77\x7a\x9f\xc6\xc2\xa9\x8c\x05\x33\xb0\x3b\x31\xa4\xf4\xd7\x1b\x26\x55\x97\x7c\x65\xf8\x69\x1a\x84\x8e\x41\x78\xd9\xec\xc5\x11\x16\x1e\x74\x91\xf5\x56\xf5\x57\x49\x47\x5c\x92\xa9\x1e\x99\x36\xf4\xdb\xb1\x0e\xd3\x78\x02\xb0\x9b\x25\xcb\xe9\xe9\x1d\x0d\x44\x01\x42\x08\x91\x64\xd9\xdd\x37\x08\x17\xef\xf9\xe5\x0f\xbd\x46\x91\xf5\xf9\x89\x92\x37\xdd\x89\x59\x44\x1f\x9c\xee\x34\x1e\xbe\x47\x83\x32\x72\x8e\x37\xdf\xac\x69\x38\xef\x75\xb0\xda\xdb\xac\x83\x94\x2f\x39\xa6\x62\x05\x1c\x25\x9c\x49\x16\xb0\xa8\x3c\xc7\x7e\x76\x71\x3e\x6f\xb5\x24\xe7\xe8\xb7\xb9\xc7\x6c\x43\x92\xee\x21\xd4\x17\xa1\x7f\xba\x35\xfe\xae\x39\xbc\xde\xba\x69\xd9\x8e\xe1\x62\xde\x64\x7d\x16\x88\x1b\xed\x29\x11\xfd\x4f\xa9\xff\x99\x90\xc4\xf6\xf4\xf9\x6e\xe9\x28\x23\xd7\xca\xe5\xee\xee\x9f\x63\xb1\x5b\xfb\x10\xd7\x2f\x1d\xf2\xe3\xbf\xb9\xb5\x6f\xa4\x6d\x7d\x25\x79\xfb\x24\x31\xea\x56\xbe\x5d\x53\xcd\x2d\x36\xa3\x6d\xdf\xab\x1c\xb8\x6d\x6f\xc0\x98\xa7\xdd\xaa\x86\x8c\x1d\x39\xa3\x9d\x70\x2b\x9b\x68\xd9\xfd\x33\xfe\xa9\xb6\x4a\x2e\x63\x0f\xcf\x68\x27\xd9\x4c\xb9\x46\x6d\xcb\xbe\xa1\xa8\xd6\x5f\xc6\xd6\x9f\xf1\x4f\xf4\xd4\xb4\x78\xd0\xd6\xf4\x13\x3c\x3b\xac\xd0\xdc\x90\x34\xda\xc9\xb4\x9a\x1a\x8d\xbd\x93\x87\xd4\xe2\x21\x1b\xb3\x2b\xd1\xbe\xe7\x69\xd4\x53\x67\xd5\x40\xa0\xe3\x19\x3f\x6d\x1a\xbc\x0e\x86\x3c\x10\xb4\x3d\x2a\xcd\x78\x32\xe6\xab\xbd\x36\xc9\xf4\x3a\x58\xae\xc3\xf4\x47\xea\xbf\xfb\x47\xff\x0d\x00\x00\xff\xff\xd2\x32\x5a\x28\x38\x9d\x00\x00") + +func v2SchemaJsonBytes() ([]byte, error) { + return bindataRead( + _v2SchemaJson, + "v2/schema.json", + ) +} + +func v2SchemaJson() (*asset, error) { + bytes, err := v2SchemaJsonBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "v2/schema.json", size: 40248, mode: os.FileMode(0640), modTime: time.Unix(1568964748, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xab, 0x88, 0x5e, 0xf, 0xbf, 0x17, 0x74, 0x0, 0xb2, 0x5a, 0x7f, 0xbc, 0x58, 0xcd, 0xc, 0x25, 0x73, 0xd5, 0x29, 0x1c, 0x7a, 0xd0, 0xce, 0x79, 0xd4, 0x89, 0x31, 0x27, 0x90, 0xf2, 0xff, 0xe6}} + return a, nil +} + +// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + canonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[canonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) + } + return a.bytes, nil + } + return nil, fmt.Errorf("Asset %s not found", name) +} + +// AssetString returns the asset contents as a string (instead of a []byte). +func AssetString(name string) (string, error) { + data, err := Asset(name) + return string(data), err +} + +// MustAsset is like Asset but panics when Asset would return an error. +// It simplifies safe initialization of global variables. +func MustAsset(name string) []byte { + a, err := Asset(name) + if err != nil { + panic("asset: Asset(" + name + "): " + err.Error()) + } + + return a +} + +// MustAssetString is like AssetString but panics when Asset would return an +// error. It simplifies safe initialization of global variables. +func MustAssetString(name string) string { + return string(MustAsset(name)) +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + canonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[canonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) + } + return a.info, nil + } + return nil, fmt.Errorf("AssetInfo %s not found", name) +} + +// AssetDigest returns the digest of the file with the given name. It returns an +// error if the asset could not be found or the digest could not be loaded. +func AssetDigest(name string) ([sha256.Size]byte, error) { + canonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[canonicalName]; ok { + a, err := f() + if err != nil { + return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s can't read by error: %v", name, err) + } + return a.digest, nil + } + return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s not found", name) +} + +// Digests returns a map of all known files and their checksums. +func Digests() (map[string][sha256.Size]byte, error) { + mp := make(map[string][sha256.Size]byte, len(_bindata)) + for name := range _bindata { + a, err := _bindata[name]() + if err != nil { + return nil, err + } + mp[name] = a.digest + } + return mp, nil +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() (*asset, error){ + "jsonschema-draft-04.json": jsonschemaDraft04Json, + + "v2/schema.json": v2SchemaJson, +} + +// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// data/ +// foo.txt +// img/ +// a.png +// b.png +// then AssetDir("data") would return []string{"foo.txt", "img"}, +// AssetDir("data/img") would return []string{"a.png", "b.png"}, +// AssetDir("foo.txt") and AssetDir("notexist") would return an error, and +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + canonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(canonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for childName := range node.Children { + rv = append(rv, childName) + } + return rv, nil +} + +type bintree struct { + Func func() (*asset, error) + Children map[string]*bintree +} + +var _bintree = &bintree{nil, map[string]*bintree{ + "jsonschema-draft-04.json": {jsonschemaDraft04Json, map[string]*bintree{}}, + "v2": {nil, map[string]*bintree{ + "schema.json": {v2SchemaJson, map[string]*bintree{}}, + }}, +}} + +// RestoreAsset restores an asset under the given directory. +func RestoreAsset(dir, name string) error { + data, err := Asset(name) + if err != nil { + return err + } + info, err := AssetInfo(name) + if err != nil { + return err + } + err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) + if err != nil { + return err + } + err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) + if err != nil { + return err + } + return os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) +} + +// RestoreAssets restores an asset under the given directory recursively. +func RestoreAssets(dir, name string) error { + children, err := AssetDir(name) + // File + if err != nil { + return RestoreAsset(dir, name) + } + // Dir + for _, child := range children { + err = RestoreAssets(dir, filepath.Join(name, child)) + if err != nil { + return err + } + } + return nil +} + +func _filePath(dir, name string) string { + canonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(canonicalName, "/")...)...) +} diff --git a/vendor/github.com/go-openapi/spec/embed.go b/vendor/github.com/go-openapi/spec/embed.go deleted file mode 100644 index 1f428475..00000000 --- a/vendor/github.com/go-openapi/spec/embed.go +++ /dev/null @@ -1,17 +0,0 @@ -package spec - -import ( - "embed" - "path" -) - -//go:embed schemas/*.json schemas/*/*.json -var assets embed.FS - -func jsonschemaDraft04JSONBytes() ([]byte, error) { - return assets.ReadFile(path.Join("schemas", "jsonschema-draft-04.json")) -} - -func v2SchemaJSONBytes() ([]byte, error) { - return assets.ReadFile(path.Join("schemas", "v2", "schema.json")) -} diff --git a/vendor/github.com/go-openapi/spec/expander.go b/vendor/github.com/go-openapi/spec/expander.go index b81a5699..d4ea889d 100644 --- a/vendor/github.com/go-openapi/spec/expander.go +++ b/vendor/github.com/go-openapi/spec/expander.go @@ -27,6 +27,7 @@ import ( // all relative $ref's will be resolved from there. // // PathLoader injects a document loading method. By default, this resolves to the function provided by the SpecLoader package variable. +// type ExpandOptions struct { RelativeBase string // the path to the root document to expand. This is a file, not a directory SkipSchemas bool // do not expand schemas, just paths, parameters and responses @@ -57,7 +58,7 @@ func ExpandSpec(spec *Swagger, options *ExpandOptions) error { if !options.SkipSchemas { for key, definition := range spec.Definitions { parentRefs := make([]string, 0, 10) - parentRefs = append(parentRefs, "#/definitions/"+key) + parentRefs = append(parentRefs, fmt.Sprintf("#/definitions/%s", key)) def, err := expandSchema(definition, parentRefs, resolver, specBasePath) if resolver.shouldStopOnError(err) { @@ -102,21 +103,15 @@ const rootBase = ".root" // baseForRoot loads in the cache the root document and produces a fake ".root" base path entry // for further $ref resolution +// +// Setting the cache is optional and this parameter may safely be left to nil. func baseForRoot(root interface{}, cache ResolutionCache) string { - // cache the root document to resolve $ref's - normalizedBase := normalizeBase(rootBase) - if root == nil { - // ensure that we never leave a nil root: always cache the root base pseudo-document - cachedRoot, found := cache.Get(normalizedBase) - if found && cachedRoot != nil { - // the cache is already preloaded with a root - return normalizedBase - } - - root = map[string]interface{}{} + return "" } + // cache the root document to resolve $ref's + normalizedBase := normalizeBase(rootBase) cache.Set(normalizedBase, root) return normalizedBase @@ -213,19 +208,7 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, ba } if target.Ref.String() != "" { - if !resolver.options.SkipSchemas { - return expandSchemaRef(target, parentRefs, resolver, basePath) - } - - // when "expand" with SkipSchema, we just rebase the existing $ref without replacing - // the full schema. - rebasedRef, err := NewRef(normalizeURI(target.Ref.String(), basePath)) - if err != nil { - return nil, err - } - target.Ref = denormalizeRef(&rebasedRef, resolver.context.basePath, resolver.context.rootID) - - return &target, nil + return expandSchemaRef(target, parentRefs, resolver, basePath) } for k := range target.Definitions { @@ -537,25 +520,21 @@ func getRefAndSchema(input interface{}) (*Ref, *Schema, error) { } func expandParameterOrResponse(input interface{}, resolver *schemaLoader, basePath string) error { - ref, sch, err := getRefAndSchema(input) + ref, _, err := getRefAndSchema(input) if err != nil { return err } - if ref == nil && sch == nil { // nothing to do + if ref == nil { return nil } parentRefs := make([]string, 0, 10) - if ref != nil { - // dereference this $ref - if err = resolver.deref(input, parentRefs, basePath); resolver.shouldStopOnError(err) { - return err - } - - ref, sch, _ = getRefAndSchema(input) + if err = resolver.deref(input, parentRefs, basePath); resolver.shouldStopOnError(err) { + return err } + ref, sch, _ := getRefAndSchema(input) if ref.String() != "" { transitiveResolver := resolver.transitiveResolver(basePath, *ref) basePath = resolver.updateBasePath(transitiveResolver, basePath) @@ -567,7 +546,6 @@ func expandParameterOrResponse(input interface{}, resolver *schemaLoader, basePa if ref != nil { *ref = Ref{} } - return nil } @@ -577,29 +555,38 @@ func expandParameterOrResponse(input interface{}, resolver *schemaLoader, basePa return ern } - if resolver.isCircular(&rebasedRef, basePath, parentRefs...) { + switch { + case resolver.isCircular(&rebasedRef, basePath, parentRefs...): // this is a circular $ref: stop expansion if !resolver.options.AbsoluteCircularRef { sch.Ref = denormalizeRef(&rebasedRef, resolver.context.basePath, resolver.context.rootID) } else { sch.Ref = rebasedRef } + case !resolver.options.SkipSchemas: + // schema expanded to a $ref in another root + sch.Ref = rebasedRef + debugLog("rebased to: %s", sch.Ref.String()) + default: + // skip schema expansion but rebase $ref to schema + sch.Ref = denormalizeRef(&rebasedRef, resolver.context.basePath, resolver.context.rootID) } } - // $ref expansion or rebasing is performed by expandSchema below if ref != nil { *ref = Ref{} } // expand schema - // yes, we do it even if options.SkipSchema is true: we have to go down that rabbit hole and rebase nested $ref) - s, err := expandSchema(*sch, parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return err - } - - if s != nil { // guard for when continuing on error + if !resolver.options.SkipSchemas { + s, err := expandSchema(*sch, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return err + } + if s == nil { + // guard for when continuing on error + return nil + } *sch = *s } diff --git a/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go b/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go index f19f1a8f..2df07231 100644 --- a/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go +++ b/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go @@ -40,5 +40,5 @@ func repairURI(in string) (*url.URL, string) { return u, "" } -func fixWindowsURI(_ *url.URL, _ string) { +func fixWindowsURI(u *url.URL, in string) { } diff --git a/vendor/github.com/go-openapi/spec/operation.go b/vendor/github.com/go-openapi/spec/operation.go index a69cca88..995ce6ac 100644 --- a/vendor/github.com/go-openapi/spec/operation.go +++ b/vendor/github.com/go-openapi/spec/operation.go @@ -217,12 +217,9 @@ func (o *Operation) AddParam(param *Parameter) *Operation { for i, p := range o.Parameters { if p.Name == param.Name && p.In == param.In { - params := make([]Parameter, 0, len(o.Parameters)+1) - params = append(params, o.Parameters[:i]...) - params = append(params, *param) + params := append(o.Parameters[:i], *param) params = append(params, o.Parameters[i+1:]...) o.Parameters = params - return o } } diff --git a/vendor/github.com/go-openapi/spec/parameter.go b/vendor/github.com/go-openapi/spec/parameter.go index bd4f1cdb..2b2b89b6 100644 --- a/vendor/github.com/go-openapi/spec/parameter.go +++ b/vendor/github.com/go-openapi/spec/parameter.go @@ -84,27 +84,27 @@ type ParamProps struct { // Parameter a unique parameter is defined by a combination of a [name](#parameterName) and [location](#parameterIn). // // There are five possible parameter types. -// - Path - Used together with [Path Templating](#pathTemplating), where the parameter value is actually part -// of the operation's URL. This does not include the host or base path of the API. For example, in `/items/{itemId}`, -// the path parameter is `itemId`. -// - Query - Parameters that are appended to the URL. For example, in `/items?id=###`, the query parameter is `id`. -// - Header - Custom headers that are expected as part of the request. -// - Body - The payload that's appended to the HTTP request. Since there can only be one payload, there can only be -// _one_ body parameter. The name of the body parameter has no effect on the parameter itself and is used for -// documentation purposes only. Since Form parameters are also in the payload, body and form parameters cannot exist -// together for the same operation. -// - Form - Used to describe the payload of an HTTP request when either `application/x-www-form-urlencoded` or -// `multipart/form-data` are used as the content type of the request (in Swagger's definition, -// the [`consumes`](#operationConsumes) property of an operation). This is the only parameter type that can be used -// to send files, thus supporting the `file` type. Since form parameters are sent in the payload, they cannot be -// declared together with a body parameter for the same operation. Form parameters have a different format based on -// the content-type used (for further details, consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4). -// - `application/x-www-form-urlencoded` - Similar to the format of Query parameters but as a payload. -// For example, `foo=1&bar=swagger` - both `foo` and `bar` are form parameters. This is normally used for simple -// parameters that are being transferred. -// - `multipart/form-data` - each parameter takes a section in the payload with an internal header. -// For example, for the header `Content-Disposition: form-data; name="submit-name"` the name of the parameter is -// `submit-name`. This type of form parameters is more commonly used for file transfers. +// * Path - Used together with [Path Templating](#pathTemplating), where the parameter value is actually part +// of the operation's URL. This does not include the host or base path of the API. For example, in `/items/{itemId}`, +// the path parameter is `itemId`. +// * Query - Parameters that are appended to the URL. For example, in `/items?id=###`, the query parameter is `id`. +// * Header - Custom headers that are expected as part of the request. +// * Body - The payload that's appended to the HTTP request. Since there can only be one payload, there can only be +// _one_ body parameter. The name of the body parameter has no effect on the parameter itself and is used for +// documentation purposes only. Since Form parameters are also in the payload, body and form parameters cannot exist +// together for the same operation. +// * Form - Used to describe the payload of an HTTP request when either `application/x-www-form-urlencoded` or +// `multipart/form-data` are used as the content type of the request (in Swagger's definition, +// the [`consumes`](#operationConsumes) property of an operation). This is the only parameter type that can be used +// to send files, thus supporting the `file` type. Since form parameters are sent in the payload, they cannot be +// declared together with a body parameter for the same operation. Form parameters have a different format based on +// the content-type used (for further details, consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4). +// * `application/x-www-form-urlencoded` - Similar to the format of Query parameters but as a payload. +// For example, `foo=1&bar=swagger` - both `foo` and `bar` are form parameters. This is normally used for simple +// parameters that are being transferred. +// * `multipart/form-data` - each parameter takes a section in the payload with an internal header. +// For example, for the header `Content-Disposition: form-data; name="submit-name"` the name of the parameter is +// `submit-name`. This type of form parameters is more commonly used for file transfers. // // For more information: http://goo.gl/8us55a#parameterObject type Parameter struct { diff --git a/vendor/github.com/go-openapi/spec/schema_loader.go b/vendor/github.com/go-openapi/spec/schema_loader.go index 0059b99a..b81175af 100644 --- a/vendor/github.com/go-openapi/spec/schema_loader.go +++ b/vendor/github.com/go-openapi/spec/schema_loader.go @@ -168,7 +168,14 @@ func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error) normalized := normalizeBase(pth) debugLog("loading doc from: %s", normalized) - data, fromCache := r.cache.Get(normalized) + unescaped, err := url.PathUnescape(normalized) + if err != nil { + return nil, url.URL{}, false, err + } + + u := url.URL{Path: unescaped} + + data, fromCache := r.cache.Get(u.RequestURI()) if fromCache { return data, toFetch, fromCache, nil } diff --git a/vendor/github.com/go-openapi/spec/schemas/jsonschema-draft-04.json b/vendor/github.com/go-openapi/spec/schemas/jsonschema-draft-04.json deleted file mode 100644 index bcbb8474..00000000 --- a/vendor/github.com/go-openapi/spec/schemas/jsonschema-draft-04.json +++ /dev/null @@ -1,149 +0,0 @@ -{ - "id": "http://json-schema.org/draft-04/schema#", - "$schema": "http://json-schema.org/draft-04/schema#", - "description": "Core schema meta-schema", - "definitions": { - "schemaArray": { - "type": "array", - "minItems": 1, - "items": { "$ref": "#" } - }, - "positiveInteger": { - "type": "integer", - "minimum": 0 - }, - "positiveIntegerDefault0": { - "allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ] - }, - "simpleTypes": { - "enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ] - }, - "stringArray": { - "type": "array", - "items": { "type": "string" }, - "minItems": 1, - "uniqueItems": true - } - }, - "type": "object", - "properties": { - "id": { - "type": "string" - }, - "$schema": { - "type": "string" - }, - "title": { - "type": "string" - }, - "description": { - "type": "string" - }, - "default": {}, - "multipleOf": { - "type": "number", - "minimum": 0, - "exclusiveMinimum": true - }, - "maximum": { - "type": "number" - }, - "exclusiveMaximum": { - "type": "boolean", - "default": false - }, - "minimum": { - "type": "number" - }, - "exclusiveMinimum": { - "type": "boolean", - "default": false - }, - "maxLength": { "$ref": "#/definitions/positiveInteger" }, - "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" }, - "pattern": { - "type": "string", - "format": "regex" - }, - "additionalItems": { - "anyOf": [ - { "type": "boolean" }, - { "$ref": "#" } - ], - "default": {} - }, - "items": { - "anyOf": [ - { "$ref": "#" }, - { "$ref": "#/definitions/schemaArray" } - ], - "default": {} - }, - "maxItems": { "$ref": "#/definitions/positiveInteger" }, - "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" }, - "uniqueItems": { - "type": "boolean", - "default": false - }, - "maxProperties": { "$ref": "#/definitions/positiveInteger" }, - "minProperties": { "$ref": "#/definitions/positiveIntegerDefault0" }, - "required": { "$ref": "#/definitions/stringArray" }, - "additionalProperties": { - "anyOf": [ - { "type": "boolean" }, - { "$ref": "#" } - ], - "default": {} - }, - "definitions": { - "type": "object", - "additionalProperties": { "$ref": "#" }, - "default": {} - }, - "properties": { - "type": "object", - "additionalProperties": { "$ref": "#" }, - "default": {} - }, - "patternProperties": { - "type": "object", - "additionalProperties": { "$ref": "#" }, - "default": {} - }, - "dependencies": { - "type": "object", - "additionalProperties": { - "anyOf": [ - { "$ref": "#" }, - { "$ref": "#/definitions/stringArray" } - ] - } - }, - "enum": { - "type": "array", - "minItems": 1, - "uniqueItems": true - }, - "type": { - "anyOf": [ - { "$ref": "#/definitions/simpleTypes" }, - { - "type": "array", - "items": { "$ref": "#/definitions/simpleTypes" }, - "minItems": 1, - "uniqueItems": true - } - ] - }, - "format": { "type": "string" }, - "allOf": { "$ref": "#/definitions/schemaArray" }, - "anyOf": { "$ref": "#/definitions/schemaArray" }, - "oneOf": { "$ref": "#/definitions/schemaArray" }, - "not": { "$ref": "#" } - }, - "dependencies": { - "exclusiveMaximum": [ "maximum" ], - "exclusiveMinimum": [ "minimum" ] - }, - "default": {} -} diff --git a/vendor/github.com/go-openapi/spec/schemas/v2/schema.json b/vendor/github.com/go-openapi/spec/schemas/v2/schema.json deleted file mode 100644 index ebe10ed3..00000000 --- a/vendor/github.com/go-openapi/spec/schemas/v2/schema.json +++ /dev/null @@ -1,1607 +0,0 @@ -{ - "title": "A JSON Schema for Swagger 2.0 API.", - "id": "http://swagger.io/v2/schema.json#", - "$schema": "http://json-schema.org/draft-04/schema#", - "type": "object", - "required": [ - "swagger", - "info", - "paths" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "swagger": { - "type": "string", - "enum": [ - "2.0" - ], - "description": "The Swagger version of this document." - }, - "info": { - "$ref": "#/definitions/info" - }, - "host": { - "type": "string", - "pattern": "^[^{}/ :\\\\]+(?::\\d+)?$", - "description": "The host (name or ip) of the API. Example: 'swagger.io'" - }, - "basePath": { - "type": "string", - "pattern": "^/", - "description": "The base path to the API. Example: '/api'." - }, - "schemes": { - "$ref": "#/definitions/schemesList" - }, - "consumes": { - "description": "A list of MIME types accepted by the API.", - "allOf": [ - { - "$ref": "#/definitions/mediaTypeList" - } - ] - }, - "produces": { - "description": "A list of MIME types the API can produce.", - "allOf": [ - { - "$ref": "#/definitions/mediaTypeList" - } - ] - }, - "paths": { - "$ref": "#/definitions/paths" - }, - "definitions": { - "$ref": "#/definitions/definitions" - }, - "parameters": { - "$ref": "#/definitions/parameterDefinitions" - }, - "responses": { - "$ref": "#/definitions/responseDefinitions" - }, - "security": { - "$ref": "#/definitions/security" - }, - "securityDefinitions": { - "$ref": "#/definitions/securityDefinitions" - }, - "tags": { - "type": "array", - "items": { - "$ref": "#/definitions/tag" - }, - "uniqueItems": true - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - } - }, - "definitions": { - "info": { - "type": "object", - "description": "General information about the API.", - "required": [ - "version", - "title" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "title": { - "type": "string", - "description": "A unique and precise title of the API." - }, - "version": { - "type": "string", - "description": "A semantic version number of the API." - }, - "description": { - "type": "string", - "description": "A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed." - }, - "termsOfService": { - "type": "string", - "description": "The terms of service for the API." - }, - "contact": { - "$ref": "#/definitions/contact" - }, - "license": { - "$ref": "#/definitions/license" - } - } - }, - "contact": { - "type": "object", - "description": "Contact information for the owners of the API.", - "additionalProperties": false, - "properties": { - "name": { - "type": "string", - "description": "The identifying name of the contact person/organization." - }, - "url": { - "type": "string", - "description": "The URL pointing to the contact information.", - "format": "uri" - }, - "email": { - "type": "string", - "description": "The email address of the contact person/organization.", - "format": "email" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "license": { - "type": "object", - "required": [ - "name" - ], - "additionalProperties": false, - "properties": { - "name": { - "type": "string", - "description": "The name of the license type. It's encouraged to use an OSI compatible license." - }, - "url": { - "type": "string", - "description": "The URL pointing to the license.", - "format": "uri" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "paths": { - "type": "object", - "description": "Relative paths to the individual endpoints. They must be relative to the 'basePath'.", - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - }, - "^/": { - "$ref": "#/definitions/pathItem" - } - }, - "additionalProperties": false - }, - "definitions": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/schema" - }, - "description": "One or more JSON objects describing the schemas being consumed and produced by the API." - }, - "parameterDefinitions": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/parameter" - }, - "description": "One or more JSON representations for parameters" - }, - "responseDefinitions": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/response" - }, - "description": "One or more JSON representations for responses" - }, - "externalDocs": { - "type": "object", - "additionalProperties": false, - "description": "information about external documentation", - "required": [ - "url" - ], - "properties": { - "description": { - "type": "string" - }, - "url": { - "type": "string", - "format": "uri" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "examples": { - "type": "object", - "additionalProperties": true - }, - "mimeType": { - "type": "string", - "description": "The MIME type of the HTTP message." - }, - "operation": { - "type": "object", - "required": [ - "responses" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "tags": { - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - }, - "summary": { - "type": "string", - "description": "A brief summary of the operation." - }, - "description": { - "type": "string", - "description": "A longer description of the operation, GitHub Flavored Markdown is allowed." - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - }, - "operationId": { - "type": "string", - "description": "A unique identifier of the operation." - }, - "produces": { - "description": "A list of MIME types the API can produce.", - "allOf": [ - { - "$ref": "#/definitions/mediaTypeList" - } - ] - }, - "consumes": { - "description": "A list of MIME types the API can consume.", - "allOf": [ - { - "$ref": "#/definitions/mediaTypeList" - } - ] - }, - "parameters": { - "$ref": "#/definitions/parametersList" - }, - "responses": { - "$ref": "#/definitions/responses" - }, - "schemes": { - "$ref": "#/definitions/schemesList" - }, - "deprecated": { - "type": "boolean", - "default": false - }, - "security": { - "$ref": "#/definitions/security" - } - } - }, - "pathItem": { - "type": "object", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "$ref": { - "type": "string" - }, - "get": { - "$ref": "#/definitions/operation" - }, - "put": { - "$ref": "#/definitions/operation" - }, - "post": { - "$ref": "#/definitions/operation" - }, - "delete": { - "$ref": "#/definitions/operation" - }, - "options": { - "$ref": "#/definitions/operation" - }, - "head": { - "$ref": "#/definitions/operation" - }, - "patch": { - "$ref": "#/definitions/operation" - }, - "parameters": { - "$ref": "#/definitions/parametersList" - } - } - }, - "responses": { - "type": "object", - "description": "Response objects names can either be any valid HTTP status code or 'default'.", - "minProperties": 1, - "additionalProperties": false, - "patternProperties": { - "^([0-9]{3})$|^(default)$": { - "$ref": "#/definitions/responseValue" - }, - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "not": { - "type": "object", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - } - }, - "responseValue": { - "oneOf": [ - { - "$ref": "#/definitions/response" - }, - { - "$ref": "#/definitions/jsonReference" - } - ] - }, - "response": { - "type": "object", - "required": [ - "description" - ], - "properties": { - "description": { - "type": "string" - }, - "schema": { - "oneOf": [ - { - "$ref": "#/definitions/schema" - }, - { - "$ref": "#/definitions/fileSchema" - } - ] - }, - "headers": { - "$ref": "#/definitions/headers" - }, - "examples": { - "$ref": "#/definitions/examples" - } - }, - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "headers": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/header" - } - }, - "header": { - "type": "object", - "additionalProperties": false, - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "string", - "number", - "integer", - "boolean", - "array" - ] - }, - "format": { - "type": "string" - }, - "items": { - "$ref": "#/definitions/primitivesItems" - }, - "collectionFormat": { - "$ref": "#/definitions/collectionFormat" - }, - "default": { - "$ref": "#/definitions/default" - }, - "maximum": { - "$ref": "#/definitions/maximum" - }, - "exclusiveMaximum": { - "$ref": "#/definitions/exclusiveMaximum" - }, - "minimum": { - "$ref": "#/definitions/minimum" - }, - "exclusiveMinimum": { - "$ref": "#/definitions/exclusiveMinimum" - }, - "maxLength": { - "$ref": "#/definitions/maxLength" - }, - "minLength": { - "$ref": "#/definitions/minLength" - }, - "pattern": { - "$ref": "#/definitions/pattern" - }, - "maxItems": { - "$ref": "#/definitions/maxItems" - }, - "minItems": { - "$ref": "#/definitions/minItems" - }, - "uniqueItems": { - "$ref": "#/definitions/uniqueItems" - }, - "enum": { - "$ref": "#/definitions/enum" - }, - "multipleOf": { - "$ref": "#/definitions/multipleOf" - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "vendorExtension": { - "description": "Any property starting with x- is valid.", - "additionalProperties": true, - "additionalItems": true - }, - "bodyParameter": { - "type": "object", - "required": [ - "name", - "in", - "schema" - ], - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "description": { - "type": "string", - "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." - }, - "name": { - "type": "string", - "description": "The name of the parameter." - }, - "in": { - "type": "string", - "description": "Determines the location of the parameter.", - "enum": [ - "body" - ] - }, - "required": { - "type": "boolean", - "description": "Determines whether or not this parameter is required or optional.", - "default": false - }, - "schema": { - "$ref": "#/definitions/schema" - } - }, - "additionalProperties": false - }, - "headerParameterSubSchema": { - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "required": { - "type": "boolean", - "description": "Determines whether or not this parameter is required or optional.", - "default": false - }, - "in": { - "type": "string", - "description": "Determines the location of the parameter.", - "enum": [ - "header" - ] - }, - "description": { - "type": "string", - "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." - }, - "name": { - "type": "string", - "description": "The name of the parameter." - }, - "type": { - "type": "string", - "enum": [ - "string", - "number", - "boolean", - "integer", - "array" - ] - }, - "format": { - "type": "string" - }, - "items": { - "$ref": "#/definitions/primitivesItems" - }, - "collectionFormat": { - "$ref": "#/definitions/collectionFormat" - }, - "default": { - "$ref": "#/definitions/default" - }, - "maximum": { - "$ref": "#/definitions/maximum" - }, - "exclusiveMaximum": { - "$ref": "#/definitions/exclusiveMaximum" - }, - "minimum": { - "$ref": "#/definitions/minimum" - }, - "exclusiveMinimum": { - "$ref": "#/definitions/exclusiveMinimum" - }, - "maxLength": { - "$ref": "#/definitions/maxLength" - }, - "minLength": { - "$ref": "#/definitions/minLength" - }, - "pattern": { - "$ref": "#/definitions/pattern" - }, - "maxItems": { - "$ref": "#/definitions/maxItems" - }, - "minItems": { - "$ref": "#/definitions/minItems" - }, - "uniqueItems": { - "$ref": "#/definitions/uniqueItems" - }, - "enum": { - "$ref": "#/definitions/enum" - }, - "multipleOf": { - "$ref": "#/definitions/multipleOf" - } - } - }, - "queryParameterSubSchema": { - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "required": { - "type": "boolean", - "description": "Determines whether or not this parameter is required or optional.", - "default": false - }, - "in": { - "type": "string", - "description": "Determines the location of the parameter.", - "enum": [ - "query" - ] - }, - "description": { - "type": "string", - "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." - }, - "name": { - "type": "string", - "description": "The name of the parameter." - }, - "allowEmptyValue": { - "type": "boolean", - "default": false, - "description": "allows sending a parameter by name only or with an empty value." - }, - "type": { - "type": "string", - "enum": [ - "string", - "number", - "boolean", - "integer", - "array" - ] - }, - "format": { - "type": "string" - }, - "items": { - "$ref": "#/definitions/primitivesItems" - }, - "collectionFormat": { - "$ref": "#/definitions/collectionFormatWithMulti" - }, - "default": { - "$ref": "#/definitions/default" - }, - "maximum": { - "$ref": "#/definitions/maximum" - }, - "exclusiveMaximum": { - "$ref": "#/definitions/exclusiveMaximum" - }, - "minimum": { - "$ref": "#/definitions/minimum" - }, - "exclusiveMinimum": { - "$ref": "#/definitions/exclusiveMinimum" - }, - "maxLength": { - "$ref": "#/definitions/maxLength" - }, - "minLength": { - "$ref": "#/definitions/minLength" - }, - "pattern": { - "$ref": "#/definitions/pattern" - }, - "maxItems": { - "$ref": "#/definitions/maxItems" - }, - "minItems": { - "$ref": "#/definitions/minItems" - }, - "uniqueItems": { - "$ref": "#/definitions/uniqueItems" - }, - "enum": { - "$ref": "#/definitions/enum" - }, - "multipleOf": { - "$ref": "#/definitions/multipleOf" - } - } - }, - "formDataParameterSubSchema": { - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "required": { - "type": "boolean", - "description": "Determines whether or not this parameter is required or optional.", - "default": false - }, - "in": { - "type": "string", - "description": "Determines the location of the parameter.", - "enum": [ - "formData" - ] - }, - "description": { - "type": "string", - "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." - }, - "name": { - "type": "string", - "description": "The name of the parameter." - }, - "allowEmptyValue": { - "type": "boolean", - "default": false, - "description": "allows sending a parameter by name only or with an empty value." - }, - "type": { - "type": "string", - "enum": [ - "string", - "number", - "boolean", - "integer", - "array", - "file" - ] - }, - "format": { - "type": "string" - }, - "items": { - "$ref": "#/definitions/primitivesItems" - }, - "collectionFormat": { - "$ref": "#/definitions/collectionFormatWithMulti" - }, - "default": { - "$ref": "#/definitions/default" - }, - "maximum": { - "$ref": "#/definitions/maximum" - }, - "exclusiveMaximum": { - "$ref": "#/definitions/exclusiveMaximum" - }, - "minimum": { - "$ref": "#/definitions/minimum" - }, - "exclusiveMinimum": { - "$ref": "#/definitions/exclusiveMinimum" - }, - "maxLength": { - "$ref": "#/definitions/maxLength" - }, - "minLength": { - "$ref": "#/definitions/minLength" - }, - "pattern": { - "$ref": "#/definitions/pattern" - }, - "maxItems": { - "$ref": "#/definitions/maxItems" - }, - "minItems": { - "$ref": "#/definitions/minItems" - }, - "uniqueItems": { - "$ref": "#/definitions/uniqueItems" - }, - "enum": { - "$ref": "#/definitions/enum" - }, - "multipleOf": { - "$ref": "#/definitions/multipleOf" - } - } - }, - "pathParameterSubSchema": { - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "required": [ - "required" - ], - "properties": { - "required": { - "type": "boolean", - "enum": [ - true - ], - "description": "Determines whether or not this parameter is required or optional." - }, - "in": { - "type": "string", - "description": "Determines the location of the parameter.", - "enum": [ - "path" - ] - }, - "description": { - "type": "string", - "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." - }, - "name": { - "type": "string", - "description": "The name of the parameter." - }, - "type": { - "type": "string", - "enum": [ - "string", - "number", - "boolean", - "integer", - "array" - ] - }, - "format": { - "type": "string" - }, - "items": { - "$ref": "#/definitions/primitivesItems" - }, - "collectionFormat": { - "$ref": "#/definitions/collectionFormat" - }, - "default": { - "$ref": "#/definitions/default" - }, - "maximum": { - "$ref": "#/definitions/maximum" - }, - "exclusiveMaximum": { - "$ref": "#/definitions/exclusiveMaximum" - }, - "minimum": { - "$ref": "#/definitions/minimum" - }, - "exclusiveMinimum": { - "$ref": "#/definitions/exclusiveMinimum" - }, - "maxLength": { - "$ref": "#/definitions/maxLength" - }, - "minLength": { - "$ref": "#/definitions/minLength" - }, - "pattern": { - "$ref": "#/definitions/pattern" - }, - "maxItems": { - "$ref": "#/definitions/maxItems" - }, - "minItems": { - "$ref": "#/definitions/minItems" - }, - "uniqueItems": { - "$ref": "#/definitions/uniqueItems" - }, - "enum": { - "$ref": "#/definitions/enum" - }, - "multipleOf": { - "$ref": "#/definitions/multipleOf" - } - } - }, - "nonBodyParameter": { - "type": "object", - "required": [ - "name", - "in", - "type" - ], - "oneOf": [ - { - "$ref": "#/definitions/headerParameterSubSchema" - }, - { - "$ref": "#/definitions/formDataParameterSubSchema" - }, - { - "$ref": "#/definitions/queryParameterSubSchema" - }, - { - "$ref": "#/definitions/pathParameterSubSchema" - } - ] - }, - "parameter": { - "oneOf": [ - { - "$ref": "#/definitions/bodyParameter" - }, - { - "$ref": "#/definitions/nonBodyParameter" - } - ] - }, - "schema": { - "type": "object", - "description": "A deterministic version of a JSON Schema object.", - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "$ref": { - "type": "string" - }, - "format": { - "type": "string" - }, - "title": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/title" - }, - "description": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/description" - }, - "default": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/default" - }, - "multipleOf": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" - }, - "maximum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" - }, - "exclusiveMaximum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" - }, - "minimum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" - }, - "exclusiveMinimum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" - }, - "maxLength": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" - }, - "minLength": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" - }, - "pattern": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" - }, - "maxItems": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" - }, - "minItems": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" - }, - "uniqueItems": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" - }, - "maxProperties": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" - }, - "minProperties": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" - }, - "required": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" - }, - "enum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" - }, - "additionalProperties": { - "anyOf": [ - { - "$ref": "#/definitions/schema" - }, - { - "type": "boolean" - } - ], - "default": {} - }, - "type": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/type" - }, - "items": { - "anyOf": [ - { - "$ref": "#/definitions/schema" - }, - { - "type": "array", - "minItems": 1, - "items": { - "$ref": "#/definitions/schema" - } - } - ], - "default": {} - }, - "allOf": { - "type": "array", - "minItems": 1, - "items": { - "$ref": "#/definitions/schema" - } - }, - "properties": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/schema" - }, - "default": {} - }, - "discriminator": { - "type": "string" - }, - "readOnly": { - "type": "boolean", - "default": false - }, - "xml": { - "$ref": "#/definitions/xml" - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - }, - "example": {} - }, - "additionalProperties": false - }, - "fileSchema": { - "type": "object", - "description": "A deterministic version of a JSON Schema object.", - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "required": [ - "type" - ], - "properties": { - "format": { - "type": "string" - }, - "title": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/title" - }, - "description": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/description" - }, - "default": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/default" - }, - "required": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" - }, - "type": { - "type": "string", - "enum": [ - "file" - ] - }, - "readOnly": { - "type": "boolean", - "default": false - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - }, - "example": {} - }, - "additionalProperties": false - }, - "primitivesItems": { - "type": "object", - "additionalProperties": false, - "properties": { - "type": { - "type": "string", - "enum": [ - "string", - "number", - "integer", - "boolean", - "array" - ] - }, - "format": { - "type": "string" - }, - "items": { - "$ref": "#/definitions/primitivesItems" - }, - "collectionFormat": { - "$ref": "#/definitions/collectionFormat" - }, - "default": { - "$ref": "#/definitions/default" - }, - "maximum": { - "$ref": "#/definitions/maximum" - }, - "exclusiveMaximum": { - "$ref": "#/definitions/exclusiveMaximum" - }, - "minimum": { - "$ref": "#/definitions/minimum" - }, - "exclusiveMinimum": { - "$ref": "#/definitions/exclusiveMinimum" - }, - "maxLength": { - "$ref": "#/definitions/maxLength" - }, - "minLength": { - "$ref": "#/definitions/minLength" - }, - "pattern": { - "$ref": "#/definitions/pattern" - }, - "maxItems": { - "$ref": "#/definitions/maxItems" - }, - "minItems": { - "$ref": "#/definitions/minItems" - }, - "uniqueItems": { - "$ref": "#/definitions/uniqueItems" - }, - "enum": { - "$ref": "#/definitions/enum" - }, - "multipleOf": { - "$ref": "#/definitions/multipleOf" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "security": { - "type": "array", - "items": { - "$ref": "#/definitions/securityRequirement" - }, - "uniqueItems": true - }, - "securityRequirement": { - "type": "object", - "additionalProperties": { - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - } - }, - "xml": { - "type": "object", - "additionalProperties": false, - "properties": { - "name": { - "type": "string" - }, - "namespace": { - "type": "string" - }, - "prefix": { - "type": "string" - }, - "attribute": { - "type": "boolean", - "default": false - }, - "wrapped": { - "type": "boolean", - "default": false - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "tag": { - "type": "object", - "additionalProperties": false, - "required": [ - "name" - ], - "properties": { - "name": { - "type": "string" - }, - "description": { - "type": "string" - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "securityDefinitions": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "$ref": "#/definitions/basicAuthenticationSecurity" - }, - { - "$ref": "#/definitions/apiKeySecurity" - }, - { - "$ref": "#/definitions/oauth2ImplicitSecurity" - }, - { - "$ref": "#/definitions/oauth2PasswordSecurity" - }, - { - "$ref": "#/definitions/oauth2ApplicationSecurity" - }, - { - "$ref": "#/definitions/oauth2AccessCodeSecurity" - } - ] - } - }, - "basicAuthenticationSecurity": { - "type": "object", - "additionalProperties": false, - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "basic" - ] - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "apiKeySecurity": { - "type": "object", - "additionalProperties": false, - "required": [ - "type", - "name", - "in" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "apiKey" - ] - }, - "name": { - "type": "string" - }, - "in": { - "type": "string", - "enum": [ - "header", - "query" - ] - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "oauth2ImplicitSecurity": { - "type": "object", - "additionalProperties": false, - "required": [ - "type", - "flow", - "authorizationUrl" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "oauth2" - ] - }, - "flow": { - "type": "string", - "enum": [ - "implicit" - ] - }, - "scopes": { - "$ref": "#/definitions/oauth2Scopes" - }, - "authorizationUrl": { - "type": "string", - "format": "uri" - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "oauth2PasswordSecurity": { - "type": "object", - "additionalProperties": false, - "required": [ - "type", - "flow", - "tokenUrl" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "oauth2" - ] - }, - "flow": { - "type": "string", - "enum": [ - "password" - ] - }, - "scopes": { - "$ref": "#/definitions/oauth2Scopes" - }, - "tokenUrl": { - "type": "string", - "format": "uri" - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "oauth2ApplicationSecurity": { - "type": "object", - "additionalProperties": false, - "required": [ - "type", - "flow", - "tokenUrl" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "oauth2" - ] - }, - "flow": { - "type": "string", - "enum": [ - "application" - ] - }, - "scopes": { - "$ref": "#/definitions/oauth2Scopes" - }, - "tokenUrl": { - "type": "string", - "format": "uri" - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "oauth2AccessCodeSecurity": { - "type": "object", - "additionalProperties": false, - "required": [ - "type", - "flow", - "authorizationUrl", - "tokenUrl" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "oauth2" - ] - }, - "flow": { - "type": "string", - "enum": [ - "accessCode" - ] - }, - "scopes": { - "$ref": "#/definitions/oauth2Scopes" - }, - "authorizationUrl": { - "type": "string", - "format": "uri" - }, - "tokenUrl": { - "type": "string", - "format": "uri" - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "oauth2Scopes": { - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "mediaTypeList": { - "type": "array", - "items": { - "$ref": "#/definitions/mimeType" - }, - "uniqueItems": true - }, - "parametersList": { - "type": "array", - "description": "The parameters needed to send a valid API call.", - "additionalItems": false, - "items": { - "oneOf": [ - { - "$ref": "#/definitions/parameter" - }, - { - "$ref": "#/definitions/jsonReference" - } - ] - }, - "uniqueItems": true - }, - "schemesList": { - "type": "array", - "description": "The transfer protocol of the API.", - "items": { - "type": "string", - "enum": [ - "http", - "https", - "ws", - "wss" - ] - }, - "uniqueItems": true - }, - "collectionFormat": { - "type": "string", - "enum": [ - "csv", - "ssv", - "tsv", - "pipes" - ], - "default": "csv" - }, - "collectionFormatWithMulti": { - "type": "string", - "enum": [ - "csv", - "ssv", - "tsv", - "pipes", - "multi" - ], - "default": "csv" - }, - "title": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/title" - }, - "description": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/description" - }, - "default": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/default" - }, - "multipleOf": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" - }, - "maximum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" - }, - "exclusiveMaximum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" - }, - "minimum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" - }, - "exclusiveMinimum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" - }, - "maxLength": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" - }, - "minLength": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" - }, - "pattern": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" - }, - "maxItems": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" - }, - "minItems": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" - }, - "uniqueItems": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" - }, - "enum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" - }, - "jsonReference": { - "type": "object", - "required": [ - "$ref" - ], - "additionalProperties": false, - "properties": { - "$ref": { - "type": "string" - } - } - } - } -} diff --git a/vendor/github.com/go-openapi/spec/spec.go b/vendor/github.com/go-openapi/spec/spec.go index 876aa127..7d38b6e6 100644 --- a/vendor/github.com/go-openapi/spec/spec.go +++ b/vendor/github.com/go-openapi/spec/spec.go @@ -26,7 +26,7 @@ import ( const ( // SwaggerSchemaURL the url for the swagger 2.0 schema to validate specs SwaggerSchemaURL = "http://swagger.io/v2/schema.json#" - // JSONSchemaURL the url for the json schema + // JSONSchemaURL the url for the json schema schema JSONSchemaURL = "http://json-schema.org/draft-04/schema#" ) @@ -41,7 +41,7 @@ func MustLoadJSONSchemaDraft04() *Schema { // JSONSchemaDraft04 loads the json schema document for json shema draft04 func JSONSchemaDraft04() (*Schema, error) { - b, err := jsonschemaDraft04JSONBytes() + b, err := Asset("jsonschema-draft-04.json") if err != nil { return nil, err } @@ -65,7 +65,7 @@ func MustLoadSwagger20Schema() *Schema { // Swagger20Schema loads the swagger 2.0 schema from the embedded assets func Swagger20Schema() (*Schema, error) { - b, err := v2SchemaJSONBytes() + b, err := Asset("v2/schema.json") if err != nil { return nil, err } diff --git a/vendor/github.com/go-openapi/spec/swagger.go b/vendor/github.com/go-openapi/spec/swagger.go index 1590fd17..44722ffd 100644 --- a/vendor/github.com/go-openapi/spec/swagger.go +++ b/vendor/github.com/go-openapi/spec/swagger.go @@ -253,7 +253,7 @@ func (s SchemaOrBool) MarshalJSON() ([]byte, error) { // UnmarshalJSON converts this bool or schema object from a JSON structure func (s *SchemaOrBool) UnmarshalJSON(data []byte) error { var nw SchemaOrBool - if len(data) > 0 { + if len(data) >= 4 { if data[0] == '{' { var sch Schema if err := json.Unmarshal(data, &sch); err != nil { @@ -261,7 +261,7 @@ func (s *SchemaOrBool) UnmarshalJSON(data []byte) error { } nw.Schema = &sch } - nw.Allows = !bytes.Equal(data, []byte("false")) + nw.Allows = !(data[0] == 'f' && data[1] == 'a' && data[2] == 'l' && data[3] == 's' && data[4] == 'e') } *s = nw return nil diff --git a/vendor/github.com/go-openapi/spec/url_go18.go b/vendor/github.com/go-openapi/spec/url_go18.go new file mode 100644 index 00000000..60b78515 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/url_go18.go @@ -0,0 +1,8 @@ +//go:build !go1.19 +// +build !go1.19 + +package spec + +import "net/url" + +var parseURL = url.Parse diff --git a/vendor/github.com/go-openapi/spec/url_go19.go b/vendor/github.com/go-openapi/spec/url_go19.go index 5bdfe40b..392e3e63 100644 --- a/vendor/github.com/go-openapi/spec/url_go19.go +++ b/vendor/github.com/go-openapi/spec/url_go19.go @@ -1,3 +1,6 @@ +//go:build go1.19 +// +build go1.19 + package spec import "net/url" diff --git a/vendor/github.com/go-openapi/swag/.gitignore b/vendor/github.com/go-openapi/swag/.gitignore index c4b1b64f..d69b53ac 100644 --- a/vendor/github.com/go-openapi/swag/.gitignore +++ b/vendor/github.com/go-openapi/swag/.gitignore @@ -2,4 +2,3 @@ secrets.yml vendor Godeps .idea -*.out diff --git a/vendor/github.com/go-openapi/swag/.golangci.yml b/vendor/github.com/go-openapi/swag/.golangci.yml index 80e2be00..bf503e40 100644 --- a/vendor/github.com/go-openapi/swag/.golangci.yml +++ b/vendor/github.com/go-openapi/swag/.golangci.yml @@ -4,14 +4,14 @@ linters-settings: golint: min-confidence: 0 gocyclo: - min-complexity: 45 + min-complexity: 25 maligned: suggest-new: true dupl: - threshold: 200 + threshold: 100 goconst: min-len: 3 - min-occurrences: 3 + min-occurrences: 2 linters: enable-all: true @@ -20,41 +20,35 @@ linters: - lll - gochecknoinits - gochecknoglobals - - funlen - - godox - - gocognit - - whitespace - - wsl - - wrapcheck - - testpackage - nlreturn + - testpackage + - wrapcheck - gomnd + - exhaustive - exhaustivestruct - goerr113 - - errorlint - - nestif - - godot + - wsl + - whitespace - gofumpt + - godot + - nestif + - godox + - funlen + - gci + - gocognit - paralleltest - - tparallel - thelper - ifshort - - exhaustruct - - varnamelen - - gci - - depguard - - errchkjson - - inamedparam - - nonamedreturns - - musttag - - ireturn - - forcetypeassert + - gomoddirectives - cyclop - # deprecated linters - - deadcode - - interfacer - - scopelint - - varcheck - - structcheck + - forcetypeassert + - ireturn + - tagliatelle + - varnamelen + - goimports + - tenv - golint + - exhaustruct + - nilnil + - nonamedreturns - nosnakecase diff --git a/vendor/github.com/go-openapi/swag/BENCHMARK.md b/vendor/github.com/go-openapi/swag/BENCHMARK.md deleted file mode 100644 index e7f28ed6..00000000 --- a/vendor/github.com/go-openapi/swag/BENCHMARK.md +++ /dev/null @@ -1,52 +0,0 @@ -# Benchmarks - -## Name mangling utilities - -```bash -go test -bench XXX -run XXX -benchtime 30s -``` - -### Benchmarks at b3e7a5386f996177e4808f11acb2aa93a0f660df - -``` -goos: linux -goarch: amd64 -pkg: github.com/go-openapi/swag -cpu: Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz -BenchmarkToXXXName/ToGoName-4 862623 44101 ns/op 10450 B/op 732 allocs/op -BenchmarkToXXXName/ToVarName-4 853656 40728 ns/op 10468 B/op 734 allocs/op -BenchmarkToXXXName/ToFileName-4 1268312 27813 ns/op 9785 B/op 617 allocs/op -BenchmarkToXXXName/ToCommandName-4 1276322 27903 ns/op 9785 B/op 617 allocs/op -BenchmarkToXXXName/ToHumanNameLower-4 895334 40354 ns/op 10472 B/op 731 allocs/op -BenchmarkToXXXName/ToHumanNameTitle-4 882441 40678 ns/op 10566 B/op 749 allocs/op -``` - -### Benchmarks after PR #79 - -~ x10 performance improvement and ~ /100 memory allocations. - -``` -goos: linux -goarch: amd64 -pkg: github.com/go-openapi/swag -cpu: Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz -BenchmarkToXXXName/ToGoName-4 9595830 3991 ns/op 42 B/op 5 allocs/op -BenchmarkToXXXName/ToVarName-4 9194276 3984 ns/op 62 B/op 7 allocs/op -BenchmarkToXXXName/ToFileName-4 17002711 2123 ns/op 147 B/op 7 allocs/op -BenchmarkToXXXName/ToCommandName-4 16772926 2111 ns/op 147 B/op 7 allocs/op -BenchmarkToXXXName/ToHumanNameLower-4 9788331 3749 ns/op 92 B/op 6 allocs/op -BenchmarkToXXXName/ToHumanNameTitle-4 9188260 3941 ns/op 104 B/op 6 allocs/op -``` - -``` -goos: linux -goarch: amd64 -pkg: github.com/go-openapi/swag -cpu: AMD Ryzen 7 5800X 8-Core Processor -BenchmarkToXXXName/ToGoName-16 18527378 1972 ns/op 42 B/op 5 allocs/op -BenchmarkToXXXName/ToVarName-16 15552692 2093 ns/op 62 B/op 7 allocs/op -BenchmarkToXXXName/ToFileName-16 32161176 1117 ns/op 147 B/op 7 allocs/op -BenchmarkToXXXName/ToCommandName-16 32256634 1137 ns/op 147 B/op 7 allocs/op -BenchmarkToXXXName/ToHumanNameLower-16 18599661 1946 ns/op 92 B/op 6 allocs/op -BenchmarkToXXXName/ToHumanNameTitle-16 17581353 2054 ns/op 105 B/op 6 allocs/op -``` diff --git a/vendor/github.com/go-openapi/swag/README.md b/vendor/github.com/go-openapi/swag/README.md index a7292229..217f6fa5 100644 --- a/vendor/github.com/go-openapi/swag/README.md +++ b/vendor/github.com/go-openapi/swag/README.md @@ -1,8 +1,7 @@ -# Swag [![Build Status](https://github.com/go-openapi/swag/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/swag/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/swag/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/swag) +# Swag [![Build Status](https://travis-ci.org/go-openapi/swag.svg?branch=master)](https://travis-ci.org/go-openapi/swag) [![codecov](https://codecov.io/gh/go-openapi/swag/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/swag) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) -[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) [![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/swag/master/LICENSE) -[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/swag.svg)](https://pkg.go.dev/github.com/go-openapi/swag) +[![GoDoc](https://godoc.org/github.com/go-openapi/swag?status.svg)](http://godoc.org/github.com/go-openapi/swag) [![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/swag)](https://goreportcard.com/report/github.com/go-openapi/swag) Contains a bunch of helper functions for go-openapi and go-swagger projects. @@ -19,5 +18,4 @@ You may also use it standalone for your projects. This repo has only few dependencies outside of the standard library: -* YAML utilities depend on `gopkg.in/yaml.v3` -* `github.com/mailru/easyjson v0.7.7` +* YAML utilities depend on gopkg.in/yaml.v2 diff --git a/vendor/github.com/go-openapi/swag/initialism_index.go b/vendor/github.com/go-openapi/swag/initialism_index.go deleted file mode 100644 index 20a359bb..00000000 --- a/vendor/github.com/go-openapi/swag/initialism_index.go +++ /dev/null @@ -1,202 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package swag - -import ( - "sort" - "strings" - "sync" -) - -var ( - // commonInitialisms are common acronyms that are kept as whole uppercased words. - commonInitialisms *indexOfInitialisms - - // initialisms is a slice of sorted initialisms - initialisms []string - - // a copy of initialisms pre-baked as []rune - initialismsRunes [][]rune - initialismsUpperCased [][]rune - - isInitialism func(string) bool - - maxAllocMatches int -) - -func init() { - // Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769 - configuredInitialisms := map[string]bool{ - "ACL": true, - "API": true, - "ASCII": true, - "CPU": true, - "CSS": true, - "DNS": true, - "EOF": true, - "GUID": true, - "HTML": true, - "HTTPS": true, - "HTTP": true, - "ID": true, - "IP": true, - "IPv4": true, - "IPv6": true, - "JSON": true, - "LHS": true, - "OAI": true, - "QPS": true, - "RAM": true, - "RHS": true, - "RPC": true, - "SLA": true, - "SMTP": true, - "SQL": true, - "SSH": true, - "TCP": true, - "TLS": true, - "TTL": true, - "UDP": true, - "UI": true, - "UID": true, - "UUID": true, - "URI": true, - "URL": true, - "UTF8": true, - "VM": true, - "XML": true, - "XMPP": true, - "XSRF": true, - "XSS": true, - } - - // a thread-safe index of initialisms - commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms) - initialisms = commonInitialisms.sorted() - initialismsRunes = asRunes(initialisms) - initialismsUpperCased = asUpperCased(initialisms) - maxAllocMatches = maxAllocHeuristic(initialismsRunes) - - // a test function - isInitialism = commonInitialisms.isInitialism -} - -func asRunes(in []string) [][]rune { - out := make([][]rune, len(in)) - for i, initialism := range in { - out[i] = []rune(initialism) - } - - return out -} - -func asUpperCased(in []string) [][]rune { - out := make([][]rune, len(in)) - - for i, initialism := range in { - out[i] = []rune(upper(trim(initialism))) - } - - return out -} - -func maxAllocHeuristic(in [][]rune) int { - heuristic := make(map[rune]int) - for _, initialism := range in { - heuristic[initialism[0]]++ - } - - var maxAlloc int - for _, val := range heuristic { - if val > maxAlloc { - maxAlloc = val - } - } - - return maxAlloc -} - -// AddInitialisms add additional initialisms -func AddInitialisms(words ...string) { - for _, word := range words { - // commonInitialisms[upper(word)] = true - commonInitialisms.add(upper(word)) - } - // sort again - initialisms = commonInitialisms.sorted() - initialismsRunes = asRunes(initialisms) - initialismsUpperCased = asUpperCased(initialisms) -} - -// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms. -// Since go1.9, this may be implemented with sync.Map. -type indexOfInitialisms struct { - sortMutex *sync.Mutex - index *sync.Map -} - -func newIndexOfInitialisms() *indexOfInitialisms { - return &indexOfInitialisms{ - sortMutex: new(sync.Mutex), - index: new(sync.Map), - } -} - -func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms { - m.sortMutex.Lock() - defer m.sortMutex.Unlock() - for k, v := range initial { - m.index.Store(k, v) - } - return m -} - -func (m *indexOfInitialisms) isInitialism(key string) bool { - _, ok := m.index.Load(key) - return ok -} - -func (m *indexOfInitialisms) add(key string) *indexOfInitialisms { - m.index.Store(key, true) - return m -} - -func (m *indexOfInitialisms) sorted() (result []string) { - m.sortMutex.Lock() - defer m.sortMutex.Unlock() - m.index.Range(func(key, _ interface{}) bool { - k := key.(string) - result = append(result, k) - return true - }) - sort.Sort(sort.Reverse(byInitialism(result))) - return -} - -type byInitialism []string - -func (s byInitialism) Len() int { - return len(s) -} -func (s byInitialism) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} -func (s byInitialism) Less(i, j int) bool { - if len(s[i]) != len(s[j]) { - return len(s[i]) < len(s[j]) - } - - return strings.Compare(s[i], s[j]) > 0 -} diff --git a/vendor/github.com/go-openapi/swag/loading.go b/vendor/github.com/go-openapi/swag/loading.go index 783442fd..00038c37 100644 --- a/vendor/github.com/go-openapi/swag/loading.go +++ b/vendor/github.com/go-openapi/swag/loading.go @@ -21,7 +21,6 @@ import ( "net/http" "net/url" "os" - "path" "path/filepath" "runtime" "strings" @@ -41,97 +40,43 @@ var LoadHTTPBasicAuthPassword = "" var LoadHTTPCustomHeaders = map[string]string{} // LoadFromFileOrHTTP loads the bytes from a file or a remote http server based on the path passed in -func LoadFromFileOrHTTP(pth string) ([]byte, error) { - return LoadStrategy(pth, os.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(pth) +func LoadFromFileOrHTTP(path string) ([]byte, error) { + return LoadStrategy(path, os.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(path) } // LoadFromFileOrHTTPWithTimeout loads the bytes from a file or a remote http server based on the path passed in // timeout arg allows for per request overriding of the request timeout -func LoadFromFileOrHTTPWithTimeout(pth string, timeout time.Duration) ([]byte, error) { - return LoadStrategy(pth, os.ReadFile, loadHTTPBytes(timeout))(pth) +func LoadFromFileOrHTTPWithTimeout(path string, timeout time.Duration) ([]byte, error) { + return LoadStrategy(path, os.ReadFile, loadHTTPBytes(timeout))(path) } -// LoadStrategy returns a loader function for a given path or URI. -// -// The load strategy returns the remote load for any path starting with `http`. -// So this works for any URI with a scheme `http` or `https`. -// -// The fallback strategy is to call the local loader. -// -// The local loader takes a local file system path (absolute or relative) as argument, -// or alternatively a `file://...` URI, **without host** (see also below for windows). -// -// There are a few liberalities, initially intended to be tolerant regarding the URI syntax, -// especially on windows. -// -// Before the local loader is called, the given path is transformed: -// - percent-encoded characters are unescaped -// - simple paths (e.g. `./folder/file`) are passed as-is -// - on windows, occurrences of `/` are replaced by `\`, so providing a relative path such a `folder/file` works too. -// -// For paths provided as URIs with the "file" scheme, please note that: -// - `file://` is simply stripped. -// This means that the host part of the URI is not parsed at all. -// For example, `file:///folder/file" becomes "/folder/file`, -// but `file://localhost/folder/file` becomes `localhost/folder/file` on unix systems. -// Similarly, `file://./folder/file` yields `./folder/file`. -// - on windows, `file://...` can take a host so as to specify an UNC share location. -// -// Reminder about windows-specifics: -// - `file://host/folder/file` becomes an UNC path like `\\host\folder\file` (no port specification is supported) -// - `file:///c:/folder/file` becomes `C:\folder\file` -// - `file://c:/folder/file` is tolerated (without leading `/`) and becomes `c:\folder\file` -func LoadStrategy(pth string, local, remote func(string) ([]byte, error)) func(string) ([]byte, error) { - if strings.HasPrefix(pth, "http") { +// LoadStrategy returns a loader function for a given path or uri +func LoadStrategy(path string, local, remote func(string) ([]byte, error)) func(string) ([]byte, error) { + if strings.HasPrefix(path, "http") { return remote } - - return func(p string) ([]byte, error) { - upth, err := url.PathUnescape(p) + return func(pth string) ([]byte, error) { + upth, err := pathUnescape(pth) if err != nil { return nil, err } - if !strings.HasPrefix(p, `file://`) { - // regular file path provided: just normalize slashes - return local(filepath.FromSlash(upth)) - } - - if runtime.GOOS != "windows" { - // crude processing: this leaves full URIs with a host with a (mostly) unexpected result - upth = strings.TrimPrefix(upth, `file://`) - - return local(filepath.FromSlash(upth)) - } - - // windows-only pre-processing of file://... URIs - - // support for canonical file URIs on windows. - u, err := url.Parse(filepath.ToSlash(upth)) - if err != nil { - return nil, err - } - - if u.Host != "" { - // assume UNC name (volume share) - // NOTE: UNC port not yet supported - - // when the "host" segment is a drive letter: - // file://C:/folder/... => C:\folder - upth = path.Clean(strings.Join([]string{u.Host, u.Path}, `/`)) - if !strings.HasSuffix(u.Host, ":") && u.Host[0] != '.' { - // tolerance: if we have a leading dot, this can't be a host - // file://host/share/folder\... ==> \\host\share\path\folder - upth = "//" + upth - } - } else { - // no host, let's figure out if this is a drive letter - upth = strings.TrimPrefix(upth, `file://`) - first, _, _ := strings.Cut(strings.TrimPrefix(u.Path, "/"), "/") - if strings.HasSuffix(first, ":") { - // drive letter in the first segment: - // file:///c:/folder/... ==> strip the leading slash - upth = strings.TrimPrefix(upth, `/`) + if strings.HasPrefix(pth, `file://`) { + if runtime.GOOS == "windows" { + // support for canonical file URIs on windows. + // Zero tolerance here for dodgy URIs. + u, _ := url.Parse(upth) + if u.Host != "" { + // assume UNC name (volume share) + // file://host/share/folder\... ==> \\host\share\path\folder + // NOTE: UNC port not yet supported + upth = strings.Join([]string{`\`, u.Host, u.Path}, `\`) + } else { + // file:///c:/folder/... ==> just remove the leading slash + upth = strings.TrimPrefix(upth, `file:///`) + } + } else { + upth = strings.TrimPrefix(upth, `file://`) } } diff --git a/vendor/github.com/go-openapi/swag/name_lexem.go b/vendor/github.com/go-openapi/swag/name_lexem.go index 8bb64ac3..aa7f6a9b 100644 --- a/vendor/github.com/go-openapi/swag/name_lexem.go +++ b/vendor/github.com/go-openapi/swag/name_lexem.go @@ -14,80 +14,74 @@ package swag -import ( - "unicode" - "unicode/utf8" -) +import "unicode" type ( - lexemKind uint8 + nameLexem interface { + GetUnsafeGoName() string + GetOriginal() string + IsInitialism() bool + } - nameLexem struct { + initialismNameLexem struct { original string matchedInitialism string - kind lexemKind + } + + casualNameLexem struct { + original string } ) -const ( - lexemKindCasualName lexemKind = iota - lexemKindInitialismName -) - -func newInitialismNameLexem(original, matchedInitialism string) nameLexem { - return nameLexem{ - kind: lexemKindInitialismName, +func newInitialismNameLexem(original, matchedInitialism string) *initialismNameLexem { + return &initialismNameLexem{ original: original, matchedInitialism: matchedInitialism, } } -func newCasualNameLexem(original string) nameLexem { - return nameLexem{ - kind: lexemKindCasualName, +func newCasualNameLexem(original string) *casualNameLexem { + return &casualNameLexem{ original: original, } } -func (l nameLexem) GetUnsafeGoName() string { - if l.kind == lexemKindInitialismName { - return l.matchedInitialism - } - - var ( - first rune - rest string - ) +func (l *initialismNameLexem) GetUnsafeGoName() string { + return l.matchedInitialism +} +func (l *casualNameLexem) GetUnsafeGoName() string { + var first rune + var rest string for i, orig := range l.original { if i == 0 { first = orig continue } - if i > 0 { rest = l.original[i:] break } } - if len(l.original) > 1 { - b := poolOfBuffers.BorrowBuffer(utf8.UTFMax + len(rest)) - defer func() { - poolOfBuffers.RedeemBuffer(b) - }() - b.WriteRune(unicode.ToUpper(first)) - b.WriteString(lower(rest)) - return b.String() + return string(unicode.ToUpper(first)) + lower(rest) } return l.original } -func (l nameLexem) GetOriginal() string { +func (l *initialismNameLexem) GetOriginal() string { return l.original } -func (l nameLexem) IsInitialism() bool { - return l.kind == lexemKindInitialismName +func (l *casualNameLexem) GetOriginal() string { + return l.original +} + +func (l *initialismNameLexem) IsInitialism() bool { + return true +} + +func (l *casualNameLexem) IsInitialism() bool { + return false } diff --git a/vendor/github.com/go-openapi/swag/post_go18.go b/vendor/github.com/go-openapi/swag/post_go18.go new file mode 100644 index 00000000..f5228b82 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/post_go18.go @@ -0,0 +1,24 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.8 +// +build go1.8 + +package swag + +import "net/url" + +func pathUnescape(path string) (string, error) { + return url.PathUnescape(path) +} diff --git a/vendor/github.com/go-openapi/swag/post_go19.go b/vendor/github.com/go-openapi/swag/post_go19.go new file mode 100644 index 00000000..7c7da9c0 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/post_go19.go @@ -0,0 +1,68 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.9 +// +build go1.9 + +package swag + +import ( + "sort" + "sync" +) + +// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms. +// Since go1.9, this may be implemented with sync.Map. +type indexOfInitialisms struct { + sortMutex *sync.Mutex + index *sync.Map +} + +func newIndexOfInitialisms() *indexOfInitialisms { + return &indexOfInitialisms{ + sortMutex: new(sync.Mutex), + index: new(sync.Map), + } +} + +func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms { + m.sortMutex.Lock() + defer m.sortMutex.Unlock() + for k, v := range initial { + m.index.Store(k, v) + } + return m +} + +func (m *indexOfInitialisms) isInitialism(key string) bool { + _, ok := m.index.Load(key) + return ok +} + +func (m *indexOfInitialisms) add(key string) *indexOfInitialisms { + m.index.Store(key, true) + return m +} + +func (m *indexOfInitialisms) sorted() (result []string) { + m.sortMutex.Lock() + defer m.sortMutex.Unlock() + m.index.Range(func(key, value interface{}) bool { + k := key.(string) + result = append(result, k) + return true + }) + sort.Sort(sort.Reverse(byInitialism(result))) + return +} diff --git a/vendor/github.com/go-openapi/swag/pre_go18.go b/vendor/github.com/go-openapi/swag/pre_go18.go new file mode 100644 index 00000000..2757d9b9 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/pre_go18.go @@ -0,0 +1,24 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !go1.8 +// +build !go1.8 + +package swag + +import "net/url" + +func pathUnescape(path string) (string, error) { + return url.QueryUnescape(path) +} diff --git a/vendor/github.com/go-openapi/swag/pre_go19.go b/vendor/github.com/go-openapi/swag/pre_go19.go new file mode 100644 index 00000000..0565db37 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/pre_go19.go @@ -0,0 +1,70 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !go1.9 +// +build !go1.9 + +package swag + +import ( + "sort" + "sync" +) + +// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms. +// Before go1.9, this may be implemented with a mutex on the map. +type indexOfInitialisms struct { + getMutex *sync.Mutex + index map[string]bool +} + +func newIndexOfInitialisms() *indexOfInitialisms { + return &indexOfInitialisms{ + getMutex: new(sync.Mutex), + index: make(map[string]bool, 50), + } +} + +func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms { + m.getMutex.Lock() + defer m.getMutex.Unlock() + for k, v := range initial { + m.index[k] = v + } + return m +} + +func (m *indexOfInitialisms) isInitialism(key string) bool { + m.getMutex.Lock() + defer m.getMutex.Unlock() + _, ok := m.index[key] + return ok +} + +func (m *indexOfInitialisms) add(key string) *indexOfInitialisms { + m.getMutex.Lock() + defer m.getMutex.Unlock() + m.index[key] = true + return m +} + +func (m *indexOfInitialisms) sorted() (result []string) { + m.getMutex.Lock() + defer m.getMutex.Unlock() + for k := range m.index { + result = append(result, k) + } + sort.Sort(sort.Reverse(byInitialism(result))) + return +} diff --git a/vendor/github.com/go-openapi/swag/split.go b/vendor/github.com/go-openapi/swag/split.go index 274727a8..a1825fb7 100644 --- a/vendor/github.com/go-openapi/swag/split.go +++ b/vendor/github.com/go-openapi/swag/split.go @@ -15,269 +15,124 @@ package swag import ( - "bytes" - "sync" "unicode" - "unicode/utf8" ) +var nameReplaceTable = map[rune]string{ + '@': "At ", + '&': "And ", + '|': "Pipe ", + '$': "Dollar ", + '!': "Bang ", + '-': "", + '_': "", +} + type ( splitter struct { - initialisms []string - initialismsRunes [][]rune - initialismsUpperCased [][]rune // initialisms cached in their trimmed, upper-cased version postSplitInitialismCheck bool + initialisms []string } - splitterOption func(*splitter) - - initialismMatch struct { - body []rune - start, end int - complete bool - } - initialismMatches []initialismMatch + splitterOption func(*splitter) *splitter ) -type ( - // memory pools of temporary objects. - // - // These are used to recycle temporarily allocated objects - // and relieve the GC from undue pressure. - - matchesPool struct { - *sync.Pool - } - - buffersPool struct { - *sync.Pool - } - - lexemsPool struct { - *sync.Pool - } - - splittersPool struct { - *sync.Pool - } -) - -var ( - // poolOfMatches holds temporary slices for recycling during the initialism match process - poolOfMatches = matchesPool{ - Pool: &sync.Pool{ - New: func() any { - s := make(initialismMatches, 0, maxAllocMatches) - - return &s - }, - }, - } - - poolOfBuffers = buffersPool{ - Pool: &sync.Pool{ - New: func() any { - return new(bytes.Buffer) - }, - }, - } - - poolOfLexems = lexemsPool{ - Pool: &sync.Pool{ - New: func() any { - s := make([]nameLexem, 0, maxAllocMatches) - - return &s - }, - }, - } - - poolOfSplitters = splittersPool{ - Pool: &sync.Pool{ - New: func() any { - s := newSplitter() - - return &s - }, - }, - } -) - -// nameReplaceTable finds a word representation for special characters. -func nameReplaceTable(r rune) (string, bool) { - switch r { - case '@': - return "At ", true - case '&': - return "And ", true - case '|': - return "Pipe ", true - case '$': - return "Dollar ", true - case '!': - return "Bang ", true - case '-': - return "", true - case '_': - return "", true - default: - return "", false - } -} - -// split calls the splitter. -// -// Use newSplitter for more control and options +// split calls the splitter; splitter provides more control and post options func split(str string) []string { - s := poolOfSplitters.BorrowSplitter() - lexems := s.split(str) - result := make([]string, 0, len(*lexems)) + lexems := newSplitter().split(str) + result := make([]string, 0, len(lexems)) - for _, lexem := range *lexems { + for _, lexem := range lexems { result = append(result, lexem.GetOriginal()) } - poolOfLexems.RedeemLexems(lexems) - poolOfSplitters.RedeemSplitter(s) return result } -func newSplitter(options ...splitterOption) splitter { - s := splitter{ +func (s *splitter) split(str string) []nameLexem { + return s.toNameLexems(str) +} + +func newSplitter(options ...splitterOption) *splitter { + splitter := &splitter{ postSplitInitialismCheck: false, initialisms: initialisms, - initialismsRunes: initialismsRunes, - initialismsUpperCased: initialismsUpperCased, } for _, option := range options { - option(&s) + splitter = option(splitter) } - return s + return splitter } // withPostSplitInitialismCheck allows to catch initialisms after main split process -func withPostSplitInitialismCheck(s *splitter) { +func withPostSplitInitialismCheck(s *splitter) *splitter { s.postSplitInitialismCheck = true -} - -func (p matchesPool) BorrowMatches() *initialismMatches { - s := p.Get().(*initialismMatches) - *s = (*s)[:0] // reset slice, keep allocated capacity - return s } -func (p buffersPool) BorrowBuffer(size int) *bytes.Buffer { - s := p.Get().(*bytes.Buffer) - s.Reset() - - if s.Cap() < size { - s.Grow(size) +type ( + initialismMatch struct { + start, end int + body []rune + complete bool } + initialismMatches []*initialismMatch +) - return s -} - -func (p lexemsPool) BorrowLexems() *[]nameLexem { - s := p.Get().(*[]nameLexem) - *s = (*s)[:0] // reset slice, keep allocated capacity - - return s -} - -func (p splittersPool) BorrowSplitter(options ...splitterOption) *splitter { - s := p.Get().(*splitter) - s.postSplitInitialismCheck = false // reset options - for _, apply := range options { - apply(s) - } - - return s -} - -func (p matchesPool) RedeemMatches(s *initialismMatches) { - p.Put(s) -} - -func (p buffersPool) RedeemBuffer(s *bytes.Buffer) { - p.Put(s) -} - -func (p lexemsPool) RedeemLexems(s *[]nameLexem) { - p.Put(s) -} - -func (p splittersPool) RedeemSplitter(s *splitter) { - p.Put(s) -} - -func (m initialismMatch) isZero() bool { - return m.start == 0 && m.end == 0 -} - -func (s splitter) split(name string) *[]nameLexem { +func (s *splitter) toNameLexems(name string) []nameLexem { nameRunes := []rune(name) matches := s.gatherInitialismMatches(nameRunes) - if matches == nil { - return poolOfLexems.BorrowLexems() - } - return s.mapMatchesToNameLexems(nameRunes, matches) } -func (s splitter) gatherInitialismMatches(nameRunes []rune) *initialismMatches { - var matches *initialismMatches +func (s *splitter) gatherInitialismMatches(nameRunes []rune) initialismMatches { + matches := make(initialismMatches, 0) for currentRunePosition, currentRune := range nameRunes { - // recycle these allocations as we loop over runes - // with such recycling, only 2 slices should be allocated per call - // instead of o(n). - newMatches := poolOfMatches.BorrowMatches() + newMatches := make(initialismMatches, 0, len(matches)) // check current initialism matches - if matches != nil { // skip first iteration - for _, match := range *matches { - if keepCompleteMatch := match.complete; keepCompleteMatch { - *newMatches = append(*newMatches, match) - continue - } - - // drop failed match - currentMatchRune := match.body[currentRunePosition-match.start] - if currentMatchRune != currentRune { - continue - } - - // try to complete ongoing match - if currentRunePosition-match.start == len(match.body)-1 { - // we are close; the next step is to check the symbol ahead - // if it is a small letter, then it is not the end of match - // but beginning of the next word - - if currentRunePosition < len(nameRunes)-1 { - nextRune := nameRunes[currentRunePosition+1] - if newWord := unicode.IsLower(nextRune); newWord { - // oh ok, it was the start of a new word - continue - } - } - - match.complete = true - match.end = currentRunePosition - } - - *newMatches = append(*newMatches, match) + for _, match := range matches { + if keepCompleteMatch := match.complete; keepCompleteMatch { + newMatches = append(newMatches, match) + continue } + + // drop failed match + currentMatchRune := match.body[currentRunePosition-match.start] + if !s.initialismRuneEqual(currentMatchRune, currentRune) { + continue + } + + // try to complete ongoing match + if currentRunePosition-match.start == len(match.body)-1 { + // we are close; the next step is to check the symbol ahead + // if it is a small letter, then it is not the end of match + // but beginning of the next word + + if currentRunePosition < len(nameRunes)-1 { + nextRune := nameRunes[currentRunePosition+1] + if newWord := unicode.IsLower(nextRune); newWord { + // oh ok, it was the start of a new word + continue + } + } + + match.complete = true + match.end = currentRunePosition + } + + newMatches = append(newMatches, match) } // check for new initialism matches - for i := range s.initialisms { - initialismRunes := s.initialismsRunes[i] - if initialismRunes[0] == currentRune { - *newMatches = append(*newMatches, initialismMatch{ + for _, initialism := range s.initialisms { + initialismRunes := []rune(initialism) + if s.initialismRuneEqual(initialismRunes[0], currentRune) { + newMatches = append(newMatches, &initialismMatch{ start: currentRunePosition, body: initialismRunes, complete: false, @@ -285,28 +140,24 @@ func (s splitter) gatherInitialismMatches(nameRunes []rune) *initialismMatches { } } - if matches != nil { - poolOfMatches.RedeemMatches(matches) - } matches = newMatches } - // up to the caller to redeem this last slice return matches } -func (s splitter) mapMatchesToNameLexems(nameRunes []rune, matches *initialismMatches) *[]nameLexem { - nameLexems := poolOfLexems.BorrowLexems() +func (s *splitter) mapMatchesToNameLexems(nameRunes []rune, matches initialismMatches) []nameLexem { + nameLexems := make([]nameLexem, 0) - var lastAcceptedMatch initialismMatch - for _, match := range *matches { + var lastAcceptedMatch *initialismMatch + for _, match := range matches { if !match.complete { continue } - if firstMatch := lastAcceptedMatch.isZero(); firstMatch { - s.appendBrokenDownCasualString(nameLexems, nameRunes[:match.start]) - *nameLexems = append(*nameLexems, s.breakInitialism(string(match.body))) + if firstMatch := lastAcceptedMatch == nil; firstMatch { + nameLexems = append(nameLexems, s.breakCasualString(nameRunes[:match.start])...) + nameLexems = append(nameLexems, s.breakInitialism(string(match.body))) lastAcceptedMatch = match @@ -318,66 +169,63 @@ func (s splitter) mapMatchesToNameLexems(nameRunes []rune, matches *initialismMa } middle := nameRunes[lastAcceptedMatch.end+1 : match.start] - s.appendBrokenDownCasualString(nameLexems, middle) - *nameLexems = append(*nameLexems, s.breakInitialism(string(match.body))) + nameLexems = append(nameLexems, s.breakCasualString(middle)...) + nameLexems = append(nameLexems, s.breakInitialism(string(match.body))) lastAcceptedMatch = match } // we have not found any accepted matches - if lastAcceptedMatch.isZero() { - *nameLexems = (*nameLexems)[:0] - s.appendBrokenDownCasualString(nameLexems, nameRunes) - } else if lastAcceptedMatch.end+1 != len(nameRunes) { - rest := nameRunes[lastAcceptedMatch.end+1:] - s.appendBrokenDownCasualString(nameLexems, rest) + if lastAcceptedMatch == nil { + return s.breakCasualString(nameRunes) } - poolOfMatches.RedeemMatches(matches) + if lastAcceptedMatch.end+1 != len(nameRunes) { + rest := nameRunes[lastAcceptedMatch.end+1:] + nameLexems = append(nameLexems, s.breakCasualString(rest)...) + } return nameLexems } -func (s splitter) breakInitialism(original string) nameLexem { +func (s *splitter) initialismRuneEqual(a, b rune) bool { + return a == b +} + +func (s *splitter) breakInitialism(original string) nameLexem { return newInitialismNameLexem(original, original) } -func (s splitter) appendBrokenDownCasualString(segments *[]nameLexem, str []rune) { - currentSegment := poolOfBuffers.BorrowBuffer(len(str)) // unlike strings.Builder, bytes.Buffer initial storage can reused - defer func() { - poolOfBuffers.RedeemBuffer(currentSegment) - }() +func (s *splitter) breakCasualString(str []rune) []nameLexem { + segments := make([]nameLexem, 0) + currentSegment := "" addCasualNameLexem := func(original string) { - *segments = append(*segments, newCasualNameLexem(original)) + segments = append(segments, newCasualNameLexem(original)) } addInitialismNameLexem := func(original, match string) { - *segments = append(*segments, newInitialismNameLexem(original, match)) + segments = append(segments, newInitialismNameLexem(original, match)) } - var addNameLexem func(string) - if s.postSplitInitialismCheck { - addNameLexem = func(original string) { - for i := range s.initialisms { - if isEqualFoldIgnoreSpace(s.initialismsUpperCased[i], original) { - addInitialismNameLexem(original, s.initialisms[i]) - + addNameLexem := func(original string) { + if s.postSplitInitialismCheck { + for _, initialism := range s.initialisms { + if upper(initialism) == upper(original) { + addInitialismNameLexem(original, initialism) return } } - - addCasualNameLexem(original) } - } else { - addNameLexem = addCasualNameLexem + + addCasualNameLexem(original) } - for _, rn := range str { - if replace, found := nameReplaceTable(rn); found { - if currentSegment.Len() > 0 { - addNameLexem(currentSegment.String()) - currentSegment.Reset() + for _, rn := range string(str) { + if replace, found := nameReplaceTable[rn]; found { + if currentSegment != "" { + addNameLexem(currentSegment) + currentSegment = "" } if replace != "" { @@ -388,121 +236,27 @@ func (s splitter) appendBrokenDownCasualString(segments *[]nameLexem, str []rune } if !unicode.In(rn, unicode.L, unicode.M, unicode.N, unicode.Pc) { - if currentSegment.Len() > 0 { - addNameLexem(currentSegment.String()) - currentSegment.Reset() + if currentSegment != "" { + addNameLexem(currentSegment) + currentSegment = "" } continue } if unicode.IsUpper(rn) { - if currentSegment.Len() > 0 { - addNameLexem(currentSegment.String()) + if currentSegment != "" { + addNameLexem(currentSegment) } - currentSegment.Reset() + currentSegment = "" } - currentSegment.WriteRune(rn) + currentSegment += string(rn) } - if currentSegment.Len() > 0 { - addNameLexem(currentSegment.String()) + if currentSegment != "" { + addNameLexem(currentSegment) } -} - -// isEqualFoldIgnoreSpace is the same as strings.EqualFold, but -// it ignores leading and trailing blank spaces in the compared -// string. -// -// base is assumed to be composed of upper-cased runes, and be already -// trimmed. -// -// This code is heavily inspired from strings.EqualFold. -func isEqualFoldIgnoreSpace(base []rune, str string) bool { - var i, baseIndex int - // equivalent to b := []byte(str), but without data copy - b := hackStringBytes(str) - - for i < len(b) { - if c := b[i]; c < utf8.RuneSelf { - // fast path for ASCII - if c != ' ' && c != '\t' { - break - } - i++ - - continue - } - - // unicode case - r, size := utf8.DecodeRune(b[i:]) - if !unicode.IsSpace(r) { - break - } - i += size - } - - if i >= len(b) { - return len(base) == 0 - } - - for _, baseRune := range base { - if i >= len(b) { - break - } - - if c := b[i]; c < utf8.RuneSelf { - // single byte rune case (ASCII) - if baseRune >= utf8.RuneSelf { - return false - } - - baseChar := byte(baseRune) - if c != baseChar && - !('a' <= c && c <= 'z' && c-'a'+'A' == baseChar) { - return false - } - - baseIndex++ - i++ - - continue - } - - // unicode case - r, size := utf8.DecodeRune(b[i:]) - if unicode.ToUpper(r) != baseRune { - return false - } - baseIndex++ - i += size - } - - if baseIndex != len(base) { - return false - } - - // all passed: now we should only have blanks - for i < len(b) { - if c := b[i]; c < utf8.RuneSelf { - // fast path for ASCII - if c != ' ' && c != '\t' { - return false - } - i++ - - continue - } - - // unicode case - r, size := utf8.DecodeRune(b[i:]) - if !unicode.IsSpace(r) { - return false - } - - i += size - } - - return true + + return segments } diff --git a/vendor/github.com/go-openapi/swag/string_bytes.go b/vendor/github.com/go-openapi/swag/string_bytes.go deleted file mode 100644 index 90745d5c..00000000 --- a/vendor/github.com/go-openapi/swag/string_bytes.go +++ /dev/null @@ -1,8 +0,0 @@ -package swag - -import "unsafe" - -// hackStringBytes returns the (unsafe) underlying bytes slice of a string. -func hackStringBytes(str string) []byte { - return unsafe.Slice(unsafe.StringData(str), len(str)) -} diff --git a/vendor/github.com/go-openapi/swag/util.go b/vendor/github.com/go-openapi/swag/util.go index 5051401c..f78ab684 100644 --- a/vendor/github.com/go-openapi/swag/util.go +++ b/vendor/github.com/go-openapi/swag/util.go @@ -18,25 +18,76 @@ import ( "reflect" "strings" "unicode" - "unicode/utf8" ) +// commonInitialisms are common acronyms that are kept as whole uppercased words. +var commonInitialisms *indexOfInitialisms + +// initialisms is a slice of sorted initialisms +var initialisms []string + +var isInitialism func(string) bool + // GoNamePrefixFunc sets an optional rule to prefix go names // which do not start with a letter. // -// The prefix function is assumed to return a string that starts with an upper case letter. -// // e.g. to help convert "123" into "{prefix}123" // // The default is to prefix with "X" var GoNamePrefixFunc func(string) string -func prefixFunc(name, in string) string { - if GoNamePrefixFunc == nil { - return "X" + in +func init() { + // Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769 + var configuredInitialisms = map[string]bool{ + "ACL": true, + "API": true, + "ASCII": true, + "CPU": true, + "CSS": true, + "DNS": true, + "EOF": true, + "GUID": true, + "HTML": true, + "HTTPS": true, + "HTTP": true, + "ID": true, + "IP": true, + "IPv4": true, + "IPv6": true, + "JSON": true, + "LHS": true, + "OAI": true, + "QPS": true, + "RAM": true, + "RHS": true, + "RPC": true, + "SLA": true, + "SMTP": true, + "SQL": true, + "SSH": true, + "TCP": true, + "TLS": true, + "TTL": true, + "UDP": true, + "UI": true, + "UID": true, + "UUID": true, + "URI": true, + "URL": true, + "UTF8": true, + "VM": true, + "XML": true, + "XMPP": true, + "XSRF": true, + "XSS": true, } - return GoNamePrefixFunc(name) + in + // a thread-safe index of initialisms + commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms) + initialisms = commonInitialisms.sorted() + + // a test function + isInitialism = commonInitialisms.isInitialism } const ( @@ -105,9 +156,25 @@ func SplitByFormat(data, format string) []string { return result } +type byInitialism []string + +func (s byInitialism) Len() int { + return len(s) +} +func (s byInitialism) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} +func (s byInitialism) Less(i, j int) bool { + if len(s[i]) != len(s[j]) { + return len(s[i]) < len(s[j]) + } + + return strings.Compare(s[i], s[j]) > 0 +} + // Removes leading whitespaces func trim(str string) string { - return strings.TrimSpace(str) + return strings.Trim(str, " ") } // Shortcut to strings.ToUpper() @@ -121,20 +188,15 @@ func lower(str string) string { } // Camelize an uppercased word -func Camelize(word string) string { - camelized := poolOfBuffers.BorrowBuffer(len(word)) - defer func() { - poolOfBuffers.RedeemBuffer(camelized) - }() - +func Camelize(word string) (camelized string) { for pos, ru := range []rune(word) { if pos > 0 { - camelized.WriteRune(unicode.ToLower(ru)) + camelized += string(unicode.ToLower(ru)) } else { - camelized.WriteRune(unicode.ToUpper(ru)) + camelized += string(unicode.ToUpper(ru)) } } - return camelized.String() + return } // ToFileName lowercases and underscores a go type name @@ -162,40 +224,33 @@ func ToCommandName(name string) string { // ToHumanNameLower represents a code name as a human series of words func ToHumanNameLower(name string) string { - s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck) - in := s.split(name) - poolOfSplitters.RedeemSplitter(s) - out := make([]string, 0, len(*in)) + in := newSplitter(withPostSplitInitialismCheck).split(name) + out := make([]string, 0, len(in)) - for _, w := range *in { + for _, w := range in { if !w.IsInitialism() { out = append(out, lower(w.GetOriginal())) } else { - out = append(out, trim(w.GetOriginal())) + out = append(out, w.GetOriginal()) } } - poolOfLexems.RedeemLexems(in) return strings.Join(out, " ") } // ToHumanNameTitle represents a code name as a human series of words with the first letters titleized func ToHumanNameTitle(name string) string { - s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck) - in := s.split(name) - poolOfSplitters.RedeemSplitter(s) + in := newSplitter(withPostSplitInitialismCheck).split(name) - out := make([]string, 0, len(*in)) - for _, w := range *in { - original := trim(w.GetOriginal()) + out := make([]string, 0, len(in)) + for _, w := range in { + original := w.GetOriginal() if !w.IsInitialism() { out = append(out, Camelize(original)) } else { out = append(out, original) } } - poolOfLexems.RedeemLexems(in) - return strings.Join(out, " ") } @@ -209,7 +264,7 @@ func ToJSONName(name string) string { out = append(out, lower(w)) continue } - out = append(out, Camelize(trim(w))) + out = append(out, Camelize(w)) } return strings.Join(out, "") } @@ -228,70 +283,35 @@ func ToVarName(name string) string { // ToGoName translates a swagger name which can be underscored or camel cased to a name that golint likes func ToGoName(name string) string { - s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck) - lexems := s.split(name) - poolOfSplitters.RedeemSplitter(s) - defer func() { - poolOfLexems.RedeemLexems(lexems) - }() - lexemes := *lexems + lexems := newSplitter(withPostSplitInitialismCheck).split(name) - if len(lexemes) == 0 { - return "" - } - - result := poolOfBuffers.BorrowBuffer(len(name)) - defer func() { - poolOfBuffers.RedeemBuffer(result) - }() - - // check if not starting with a letter, upper case - firstPart := lexemes[0].GetUnsafeGoName() - if lexemes[0].IsInitialism() { - firstPart = upper(firstPart) - } - - if c := firstPart[0]; c < utf8.RuneSelf { - // ASCII - switch { - case 'A' <= c && c <= 'Z': - result.WriteString(firstPart) - case 'a' <= c && c <= 'z': - result.WriteByte(c - 'a' + 'A') - result.WriteString(firstPart[1:]) - default: - result.WriteString(prefixFunc(name, firstPart)) - // NOTE: no longer check if prefixFunc returns a string that starts with uppercase: - // assume this is always the case - } - } else { - // unicode - firstRune, _ := utf8.DecodeRuneInString(firstPart) - switch { - case !unicode.IsLetter(firstRune): - result.WriteString(prefixFunc(name, firstPart)) - case !unicode.IsUpper(firstRune): - result.WriteString(prefixFunc(name, firstPart)) - /* - result.WriteRune(unicode.ToUpper(firstRune)) - result.WriteString(firstPart[offset:]) - */ - default: - result.WriteString(firstPart) - } - } - - for _, lexem := range lexemes[1:] { + result := "" + for _, lexem := range lexems { goName := lexem.GetUnsafeGoName() // to support old behavior if lexem.IsInitialism() { goName = upper(goName) } - result.WriteString(goName) + result += goName } - return result.String() + if len(result) > 0 { + // Only prefix with X when the first character isn't an ascii letter + first := []rune(result)[0] + if !unicode.IsLetter(first) || (first > unicode.MaxASCII && !unicode.IsUpper(first)) { + if GoNamePrefixFunc == nil { + return "X" + result + } + result = GoNamePrefixFunc(name) + result + } + first = []rune(result)[0] + if unicode.IsLetter(first) && !unicode.IsUpper(first) { + result = string(append([]rune{unicode.ToUpper(first)}, []rune(result)[1:]...)) + } + } + + return result } // ContainsStrings searches a slice of strings for a case-sensitive match @@ -321,22 +341,13 @@ type zeroable interface { // IsZero returns true when the value passed into the function is a zero value. // This allows for safer checking of interface values. func IsZero(data interface{}) bool { - v := reflect.ValueOf(data) - // check for nil data - switch v.Kind() { //nolint:exhaustive - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - if v.IsNil() { - return true - } - } - // check for things that have an IsZero method instead if vv, ok := data.(zeroable); ok { return vv.IsZero() } - // continue with slightly more complex reflection - switch v.Kind() { //nolint:exhaustive + v := reflect.ValueOf(data) + switch v.Kind() { case reflect.String: return v.Len() == 0 case reflect.Bool: @@ -347,13 +358,24 @@ func IsZero(data interface{}) bool { return v.Uint() == 0 case reflect.Float32, reflect.Float64: return v.Float() == 0 + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return v.IsNil() case reflect.Struct, reflect.Array: return reflect.DeepEqual(data, reflect.Zero(v.Type()).Interface()) case reflect.Invalid: return true - default: - return false } + return false +} + +// AddInitialisms add additional initialisms +func AddInitialisms(words ...string) { + for _, word := range words { + // commonInitialisms[upper(word)] = true + commonInitialisms.add(upper(word)) + } + // sort again + initialisms = commonInitialisms.sorted() } // CommandLineOptionsGroup represents a group of user-defined command line options diff --git a/vendor/github.com/go-openapi/swag/yaml.go b/vendor/github.com/go-openapi/swag/yaml.go index f59e0259..f09ee609 100644 --- a/vendor/github.com/go-openapi/swag/yaml.go +++ b/vendor/github.com/go-openapi/swag/yaml.go @@ -16,11 +16,8 @@ package swag import ( "encoding/json" - "errors" "fmt" "path/filepath" - "reflect" - "sort" "strconv" "github.com/mailru/easyjson/jlexer" @@ -51,7 +48,7 @@ func BytesToYAMLDoc(data []byte) (interface{}, error) { return nil, err } if document.Kind != yaml.DocumentNode || len(document.Content) != 1 || document.Content[0].Kind != yaml.MappingNode { - return nil, errors.New("only YAML documents that are objects are supported") + return nil, fmt.Errorf("only YAML documents that are objects are supported") } return &document, nil } @@ -150,7 +147,7 @@ func yamlScalar(node *yaml.Node) (interface{}, error) { case yamlTimestamp: return node.Value, nil case yamlNull: - return nil, nil //nolint:nilnil + return nil, nil default: return nil, fmt.Errorf("YAML tag %q is not supported", node.LongTag()) } @@ -248,27 +245,7 @@ func (s JSONMapSlice) MarshalYAML() (interface{}, error) { return yaml.Marshal(&n) } -func isNil(input interface{}) bool { - if input == nil { - return true - } - kind := reflect.TypeOf(input).Kind() - switch kind { //nolint:exhaustive - case reflect.Ptr, reflect.Map, reflect.Slice, reflect.Chan: - return reflect.ValueOf(input).IsNil() - default: - return false - } -} - func json2yaml(item interface{}) (*yaml.Node, error) { - if isNil(item) { - return &yaml.Node{ - Kind: yaml.ScalarNode, - Value: "null", - }, nil - } - switch val := item.(type) { case JSONMapSlice: var n yaml.Node @@ -288,14 +265,7 @@ func json2yaml(item interface{}) (*yaml.Node, error) { case map[string]interface{}: var n yaml.Node n.Kind = yaml.MappingNode - keys := make([]string, 0, len(val)) - for k := range val { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, k := range keys { - v := val[k] + for k, v := range val { childNode, err := json2yaml(v) if err != nil { return nil, err @@ -348,9 +318,8 @@ func json2yaml(item interface{}) (*yaml.Node, error) { Tag: yamlBoolScalar, Value: strconv.FormatBool(val), }, nil - default: - return nil, fmt.Errorf("unhandled type: %T", val) } + return nil, nil } // JSONMapItem represents the value of a key in a JSON object held by JSONMapSlice diff --git a/vendor/github.com/go-sql-driver/mysql/AUTHORS b/vendor/github.com/go-sql-driver/mysql/AUTHORS index 4021b96c..ec346e20 100644 --- a/vendor/github.com/go-sql-driver/mysql/AUTHORS +++ b/vendor/github.com/go-sql-driver/mysql/AUTHORS @@ -20,7 +20,12 @@ Andrew Reid Animesh Ray Arne Hormann Ariel Mashraki +Artur Melanchyk Asta Xie +B Lamarche +Bes Dollma +Bogdan Constantinescu +Brad Higgins Brian Hendriks Bulat Gaifullin Caine Jette @@ -33,6 +38,8 @@ Daniel Montoya Daniel Nichter Daniël van Eeden Dave Protasowski +Diego Dupin +Dirkjan Bussink DisposaBoy Egor Smolyakov Erwan Martin @@ -50,6 +57,7 @@ ICHINOSE Shogo Ilia Cimpoes INADA Naoki Jacek Szwec +Jakub Adamus James Harr Janek Vedock Jason Ng @@ -60,6 +68,7 @@ Jennifer Purevsuren Jerome Meyer Jiajia Zhong Jian Zhen +Joe Mann Joshua Prunier Julien Lefevre Julien Schmidt @@ -80,6 +89,7 @@ Lunny Xiao Luke Scott Maciej Zimnoch Michael Woolnough +Nao Yokotsuka Nathanial Murphy Nicola Peduzzi Oliver Bone @@ -89,6 +99,7 @@ Paul Bonser Paulius Lozys Peter Schultz Phil Porada +Minh Quang Rebecca Chin Reed Allman Richard Wilkes @@ -124,6 +135,7 @@ Ziheng Lyu Barracuda Networks, Inc. Counting Ltd. +Defined Networking Inc. DigitalOcean Inc. Dolthub Inc. dyves labs AG @@ -139,4 +151,5 @@ PingCAP Inc. Pivotal Inc. Shattered Silicon Ltd. Stripe Inc. +ThousandEyes Zendesk Inc. diff --git a/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md index 0c9bd9b1..75674b60 100644 --- a/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md +++ b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md @@ -1,3 +1,55 @@ +# Changelog + +## v1.9.3 (2025-06-13) + +* `tx.Commit()` and `tx.Rollback()` returned `ErrInvalidConn` always. + Now they return cached real error if present. (#1690) + +* Optimize reading small resultsets to fix performance regression + introduced by compression protocol support. (#1707) + +* Fix `db.Ping()` on compressed connection. (#1723) + + +## v1.9.2 (2025-04-07) + +v1.9.2 is a re-release of v1.9.1 due to a release process issue; no changes were made to the content. + + +## v1.9.1 (2025-03-21) + +### Major Changes + +* Add Charset() option. (#1679) + +### Bugfixes + +* go.mod: fix go version format (#1682) +* Fix FormatDSN missing ConnectionAttributes (#1619) + +## v1.9.0 (2025-02-18) + +### Major Changes + +- Implement zlib compression. (#1487) +- Supported Go version is updated to Go 1.21+. (#1639) +- Add support for VECTOR type introduced in MySQL 9.0. (#1609) +- Config object can have custom dial function. (#1527) + +### Bugfixes + +- Fix auth errors when username/password are too long. (#1625) +- Check if MySQL supports CLIENT_CONNECT_ATTRS before sending client attributes. (#1640) +- Fix auth switch request handling. (#1666) + +### Other changes + +- Add "filename:line" prefix to log in go-mysql. Custom loggers now show it. (#1589) +- Improve error handling. It reduces the "busy buffer" errors. (#1595, #1601, #1641) +- Use `strconv.Atoi` to parse max_allowed_packet. (#1661) +- `rejectReadOnly` option now handles ER_READ_ONLY_MODE (1290) error too. (#1660) + + ## Version 1.8.1 (2024-03-26) Bugfixes: diff --git a/vendor/github.com/go-sql-driver/mysql/README.md b/vendor/github.com/go-sql-driver/mysql/README.md index 4968cb06..da4593cc 100644 --- a/vendor/github.com/go-sql-driver/mysql/README.md +++ b/vendor/github.com/go-sql-driver/mysql/README.md @@ -38,11 +38,12 @@ A MySQL-Driver for Go's [database/sql](https://golang.org/pkg/database/sql/) pac * Secure `LOAD DATA LOCAL INFILE` support with file allowlisting and `io.Reader` support * Optional `time.Time` parsing * Optional placeholder interpolation + * Supports zlib compression. ## Requirements -* Go 1.19 or higher. We aim to support the 3 latest versions of Go. -* MySQL (5.7+) and MariaDB (10.3+) are supported. +* Go 1.21 or higher. We aim to support the 3 latest versions of Go. +* MySQL (5.7+) and MariaDB (10.5+) are supported. * [TiDB](https://github.com/pingcap/tidb) is supported by PingCAP. * Do not ask questions about TiDB in our issue tracker or forum. * [Document](https://docs.pingcap.com/tidb/v6.1/dev-guide-sample-application-golang) @@ -267,6 +268,16 @@ SELECT u.id FROM users as u will return `u.id` instead of just `id` if `columnsWithAlias=true`. +##### `compress` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +Toggles zlib compression. false by default. + ##### `interpolateParams` ``` @@ -519,6 +530,9 @@ This driver supports the [`ColumnType` interface](https://golang.org/pkg/databas Go 1.8 added `database/sql` support for `context.Context`. This driver supports query timeouts and cancellation via contexts. See [context support in the database/sql package](https://golang.org/doc/go1.8#database_sql) for more details. +> [!IMPORTANT] +> The `QueryContext`, `ExecContext`, etc. variants provided by `database/sql` will cause the connection to be closed if the provided context is cancelled or timed out before the result is received by the driver. + ### `LOAD DATA LOCAL INFILE` support For this feature you need direct access to the package. Therefore you must change the import path (no `_`): diff --git a/vendor/github.com/go-sql-driver/mysql/atomic_bool.go b/vendor/github.com/go-sql-driver/mysql/atomic_bool.go deleted file mode 100644 index 1b7e19f3..00000000 --- a/vendor/github.com/go-sql-driver/mysql/atomic_bool.go +++ /dev/null @@ -1,19 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package. -// -// Copyright 2022 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. -//go:build go1.19 -// +build go1.19 - -package mysql - -import "sync/atomic" - -/****************************************************************************** -* Sync utils * -******************************************************************************/ - -type atomicBool = atomic.Bool diff --git a/vendor/github.com/go-sql-driver/mysql/atomic_bool_go118.go b/vendor/github.com/go-sql-driver/mysql/atomic_bool_go118.go deleted file mode 100644 index 2e9a7f0b..00000000 --- a/vendor/github.com/go-sql-driver/mysql/atomic_bool_go118.go +++ /dev/null @@ -1,47 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package. -// -// Copyright 2022 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. -//go:build !go1.19 -// +build !go1.19 - -package mysql - -import "sync/atomic" - -/****************************************************************************** -* Sync utils * -******************************************************************************/ - -// atomicBool is an implementation of atomic.Bool for older version of Go. -// it is a wrapper around uint32 for usage as a boolean value with -// atomic access. -type atomicBool struct { - _ noCopy - value uint32 -} - -// Load returns whether the current boolean value is true -func (ab *atomicBool) Load() bool { - return atomic.LoadUint32(&ab.value) > 0 -} - -// Store sets the value of the bool regardless of the previous value -func (ab *atomicBool) Store(value bool) { - if value { - atomic.StoreUint32(&ab.value, 1) - } else { - atomic.StoreUint32(&ab.value, 0) - } -} - -// Swap sets the value of the bool and returns the old value. -func (ab *atomicBool) Swap(value bool) bool { - if value { - return atomic.SwapUint32(&ab.value, 1) > 0 - } - return atomic.SwapUint32(&ab.value, 0) > 0 -} diff --git a/vendor/github.com/go-sql-driver/mysql/buffer.go b/vendor/github.com/go-sql-driver/mysql/buffer.go index 0774c5c8..f895e87b 100644 --- a/vendor/github.com/go-sql-driver/mysql/buffer.go +++ b/vendor/github.com/go-sql-driver/mysql/buffer.go @@ -10,54 +10,47 @@ package mysql import ( "io" - "net" - "time" ) const defaultBufSize = 4096 const maxCachedBufSize = 256 * 1024 +// readerFunc is a function that compatible with io.Reader. +// We use this function type instead of io.Reader because we want to +// just pass mc.readWithTimeout. +type readerFunc func([]byte) (int, error) + // A buffer which is used for both reading and writing. // This is possible since communication on each connection is synchronous. // In other words, we can't write and read simultaneously on the same connection. // The buffer is similar to bufio.Reader / Writer but zero-copy-ish // Also highly optimized for this particular use case. -// This buffer is backed by two byte slices in a double-buffering scheme type buffer struct { - buf []byte // buf is a byte buffer who's length and capacity are equal. - nc net.Conn - idx int - length int - timeout time.Duration - dbuf [2][]byte // dbuf is an array with the two byte slices that back this buffer - flipcnt uint // flipccnt is the current buffer counter for double-buffering + buf []byte // read buffer. + cachedBuf []byte // buffer that will be reused. len(cachedBuf) <= maxCachedBufSize. } // newBuffer allocates and returns a new buffer. -func newBuffer(nc net.Conn) buffer { - fg := make([]byte, defaultBufSize) +func newBuffer() buffer { return buffer{ - buf: fg, - nc: nc, - dbuf: [2][]byte{fg, nil}, + cachedBuf: make([]byte, defaultBufSize), } } -// flip replaces the active buffer with the background buffer -// this is a delayed flip that simply increases the buffer counter; -// the actual flip will be performed the next time we call `buffer.fill` -func (b *buffer) flip() { - b.flipcnt += 1 +// busy returns true if the read buffer is not empty. +func (b *buffer) busy() bool { + return len(b.buf) > 0 } -// fill reads into the buffer until at least _need_ bytes are in it -func (b *buffer) fill(need int) error { - n := b.length - // fill data into its double-buffering target: if we've called - // flip on this buffer, we'll be copying to the background buffer, - // and then filling it with network data; otherwise we'll just move - // the contents of the current buffer to the front before filling it - dest := b.dbuf[b.flipcnt&1] +// len returns how many bytes in the read buffer. +func (b *buffer) len() int { + return len(b.buf) +} + +// fill reads into the read buffer until at least _need_ bytes are in it. +func (b *buffer) fill(need int, r readerFunc) error { + // we'll move the contents of the current buffer to dest before filling it. + dest := b.cachedBuf // grow buffer if necessary to fit the whole packet. if need > len(dest) { @@ -67,64 +60,41 @@ func (b *buffer) fill(need int) error { // if the allocated buffer is not too large, move it to backing storage // to prevent extra allocations on applications that perform large reads if len(dest) <= maxCachedBufSize { - b.dbuf[b.flipcnt&1] = dest + b.cachedBuf = dest } } - // if we're filling the fg buffer, move the existing data to the start of it. - // if we're filling the bg buffer, copy over the data - if n > 0 { - copy(dest[:n], b.buf[b.idx:]) - } - - b.buf = dest - b.idx = 0 + // move the existing data to the start of the buffer. + n := len(b.buf) + copy(dest[:n], b.buf) for { - if b.timeout > 0 { - if err := b.nc.SetReadDeadline(time.Now().Add(b.timeout)); err != nil { - return err - } - } - - nn, err := b.nc.Read(b.buf[n:]) + nn, err := r(dest[n:]) n += nn - switch err { - case nil: - if n < need { - continue - } - b.length = n - return nil - - case io.EOF: - if n >= need { - b.length = n - return nil - } - return io.ErrUnexpectedEOF - - default: - return err + if err == nil && n < need { + continue } + + b.buf = dest[:n] + + if err == io.EOF { + if n < need { + err = io.ErrUnexpectedEOF + } else { + err = nil + } + } + return err } } // returns next N bytes from buffer. // The returned slice is only guaranteed to be valid until the next read -func (b *buffer) readNext(need int) ([]byte, error) { - if b.length < need { - // refill - if err := b.fill(need); err != nil { - return nil, err - } - } - - offset := b.idx - b.idx += need - b.length -= need - return b.buf[offset:b.idx], nil +func (b *buffer) readNext(need int) []byte { + data := b.buf[:need:need] + b.buf = b.buf[need:] + return data } // takeBuffer returns a buffer with the requested size. @@ -132,18 +102,18 @@ func (b *buffer) readNext(need int) ([]byte, error) { // Otherwise a bigger buffer is made. // Only one buffer (total) can be used at a time. func (b *buffer) takeBuffer(length int) ([]byte, error) { - if b.length > 0 { + if b.busy() { return nil, ErrBusyBuffer } // test (cheap) general case first - if length <= cap(b.buf) { - return b.buf[:length], nil + if length <= len(b.cachedBuf) { + return b.cachedBuf[:length], nil } - if length < maxPacketSize { - b.buf = make([]byte, length) - return b.buf, nil + if length < maxCachedBufSize { + b.cachedBuf = make([]byte, length) + return b.cachedBuf, nil } // buffer is larger than we want to store. @@ -154,10 +124,10 @@ func (b *buffer) takeBuffer(length int) ([]byte, error) { // known to be smaller than defaultBufSize. // Only one buffer (total) can be used at a time. func (b *buffer) takeSmallBuffer(length int) ([]byte, error) { - if b.length > 0 { + if b.busy() { return nil, ErrBusyBuffer } - return b.buf[:length], nil + return b.cachedBuf[:length], nil } // takeCompleteBuffer returns the complete existing buffer. @@ -165,18 +135,15 @@ func (b *buffer) takeSmallBuffer(length int) ([]byte, error) { // cap and len of the returned buffer will be equal. // Only one buffer (total) can be used at a time. func (b *buffer) takeCompleteBuffer() ([]byte, error) { - if b.length > 0 { + if b.busy() { return nil, ErrBusyBuffer } - return b.buf, nil + return b.cachedBuf, nil } // store stores buf, an updated buffer, if its suitable to do so. -func (b *buffer) store(buf []byte) error { - if b.length > 0 { - return ErrBusyBuffer - } else if cap(buf) <= maxPacketSize && cap(buf) > cap(b.buf) { - b.buf = buf[:cap(buf)] +func (b *buffer) store(buf []byte) { + if cap(buf) <= maxCachedBufSize && cap(buf) > cap(b.cachedBuf) { + b.cachedBuf = buf[:cap(buf)] } - return nil } diff --git a/vendor/github.com/go-sql-driver/mysql/collations.go b/vendor/github.com/go-sql-driver/mysql/collations.go index 1cdf97b6..29b1aa43 100644 --- a/vendor/github.com/go-sql-driver/mysql/collations.go +++ b/vendor/github.com/go-sql-driver/mysql/collations.go @@ -8,7 +8,7 @@ package mysql -const defaultCollation = "utf8mb4_general_ci" +const defaultCollationID = 45 // utf8mb4_general_ci const binaryCollationID = 63 // A list of available collations mapped to the internal ID. diff --git a/vendor/github.com/go-sql-driver/mysql/compress.go b/vendor/github.com/go-sql-driver/mysql/compress.go new file mode 100644 index 00000000..38bfa000 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/compress.go @@ -0,0 +1,213 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2024 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "bytes" + "compress/zlib" + "fmt" + "io" + "sync" +) + +var ( + zrPool *sync.Pool // Do not use directly. Use zDecompress() instead. + zwPool *sync.Pool // Do not use directly. Use zCompress() instead. +) + +func init() { + zrPool = &sync.Pool{ + New: func() any { return nil }, + } + zwPool = &sync.Pool{ + New: func() any { + zw, err := zlib.NewWriterLevel(new(bytes.Buffer), 2) + if err != nil { + panic(err) // compress/zlib return non-nil error only if level is invalid + } + return zw + }, + } +} + +func zDecompress(src []byte, dst *bytes.Buffer) (int, error) { + br := bytes.NewReader(src) + var zr io.ReadCloser + var err error + + if a := zrPool.Get(); a == nil { + if zr, err = zlib.NewReader(br); err != nil { + return 0, err + } + } else { + zr = a.(io.ReadCloser) + if err := zr.(zlib.Resetter).Reset(br, nil); err != nil { + return 0, err + } + } + + n, _ := dst.ReadFrom(zr) // ignore err because zr.Close() will return it again. + err = zr.Close() // zr.Close() may return chuecksum error. + zrPool.Put(zr) + return int(n), err +} + +func zCompress(src []byte, dst io.Writer) error { + zw := zwPool.Get().(*zlib.Writer) + zw.Reset(dst) + if _, err := zw.Write(src); err != nil { + return err + } + err := zw.Close() + zwPool.Put(zw) + return err +} + +type compIO struct { + mc *mysqlConn + buff bytes.Buffer +} + +func newCompIO(mc *mysqlConn) *compIO { + return &compIO{ + mc: mc, + } +} + +func (c *compIO) reset() { + c.buff.Reset() +} + +func (c *compIO) readNext(need int) ([]byte, error) { + for c.buff.Len() < need { + if err := c.readCompressedPacket(); err != nil { + return nil, err + } + } + data := c.buff.Next(need) + return data[:need:need], nil // prevent caller writes into c.buff +} + +func (c *compIO) readCompressedPacket() error { + header, err := c.mc.readNext(7) + if err != nil { + return err + } + _ = header[6] // bounds check hint to compiler; guaranteed by readNext + + // compressed header structure + comprLength := getUint24(header[0:3]) + compressionSequence := header[3] + uncompressedLength := getUint24(header[4:7]) + if debug { + fmt.Printf("uncompress cmplen=%v uncomplen=%v pkt_cmp_seq=%v expected_cmp_seq=%v\n", + comprLength, uncompressedLength, compressionSequence, c.mc.sequence) + } + // Do not return ErrPktSync here. + // Server may return error packet (e.g. 1153 Got a packet bigger than 'max_allowed_packet' bytes) + // before receiving all packets from client. In this case, seqnr is younger than expected. + // NOTE: Both of mariadbclient and mysqlclient do not check seqnr. Only server checks it. + if debug && compressionSequence != c.mc.compressSequence { + fmt.Printf("WARN: unexpected cmpress seq nr: expected %v, got %v", + c.mc.compressSequence, compressionSequence) + } + c.mc.compressSequence = compressionSequence + 1 + + comprData, err := c.mc.readNext(comprLength) + if err != nil { + return err + } + + // if payload is uncompressed, its length will be specified as zero, and its + // true length is contained in comprLength + if uncompressedLength == 0 { + c.buff.Write(comprData) + return nil + } + + // use existing capacity in bytesBuf if possible + c.buff.Grow(uncompressedLength) + nread, err := zDecompress(comprData, &c.buff) + if err != nil { + return err + } + if nread != uncompressedLength { + return fmt.Errorf("invalid compressed packet: uncompressed length in header is %d, actual %d", + uncompressedLength, nread) + } + return nil +} + +const minCompressLength = 150 +const maxPayloadLen = maxPacketSize - 4 + +// writePackets sends one or some packets with compression. +// Use this instead of mc.netConn.Write() when mc.compress is true. +func (c *compIO) writePackets(packets []byte) (int, error) { + totalBytes := len(packets) + blankHeader := make([]byte, 7) + buf := &c.buff + + for len(packets) > 0 { + payloadLen := min(maxPayloadLen, len(packets)) + payload := packets[:payloadLen] + uncompressedLen := payloadLen + + buf.Reset() + buf.Write(blankHeader) // Buffer.Write() never returns error + + // If payload is less than minCompressLength, don't compress. + if uncompressedLen < minCompressLength { + buf.Write(payload) + uncompressedLen = 0 + } else { + err := zCompress(payload, buf) + if debug && err != nil { + fmt.Printf("zCompress error: %v", err) + } + // do not compress if compressed data is larger than uncompressed data + // I intentionally miss 7 byte header in the buf; zCompress must compress more than 7 bytes. + if err != nil || buf.Len() >= uncompressedLen { + buf.Reset() + buf.Write(blankHeader) + buf.Write(payload) + uncompressedLen = 0 + } + } + + if n, err := c.writeCompressedPacket(buf.Bytes(), uncompressedLen); err != nil { + // To allow returning ErrBadConn when sending really 0 bytes, we sum + // up compressed bytes that is returned by underlying Write(). + return totalBytes - len(packets) + n, err + } + packets = packets[payloadLen:] + } + + return totalBytes, nil +} + +// writeCompressedPacket writes a compressed packet with header. +// data should start with 7 size space for header followed by payload. +func (c *compIO) writeCompressedPacket(data []byte, uncompressedLen int) (int, error) { + mc := c.mc + comprLength := len(data) - 7 + if debug { + fmt.Printf( + "writeCompressedPacket: comprLength=%v, uncompressedLen=%v, seq=%v\n", + comprLength, uncompressedLen, mc.compressSequence) + } + + // compression header + putUint24(data[0:3], comprLength) + data[3] = mc.compressSequence + putUint24(data[4:7], uncompressedLen) + + mc.compressSequence++ + return mc.writeWithTimeout(data) +} diff --git a/vendor/github.com/go-sql-driver/mysql/connection.go b/vendor/github.com/go-sql-driver/mysql/connection.go index eff978d9..3e455a3f 100644 --- a/vendor/github.com/go-sql-driver/mysql/connection.go +++ b/vendor/github.com/go-sql-driver/mysql/connection.go @@ -13,10 +13,13 @@ import ( "database/sql" "database/sql/driver" "encoding/json" + "fmt" "io" "net" + "runtime" "strconv" "strings" + "sync/atomic" "time" ) @@ -25,15 +28,17 @@ type mysqlConn struct { netConn net.Conn rawConn net.Conn // underlying connection when netConn is TLS connection. result mysqlResult // managed by clearResult() and handleOkPacket(). + compIO *compIO cfg *Config connector *connector maxAllowedPacket int maxWriteSize int - writeTimeout time.Duration flags clientFlag status statusFlag sequence uint8 + compressSequence uint8 parseTime bool + compress bool // for context support (Go 1.8+) watching bool @@ -41,71 +46,92 @@ type mysqlConn struct { closech chan struct{} finished chan<- struct{} canceled atomicError // set non-nil if conn is canceled - closed atomicBool // set when conn is closed, before closech is closed + closed atomic.Bool // set when conn is closed, before closech is closed } // Helper function to call per-connection logger. func (mc *mysqlConn) log(v ...any) { + _, filename, lineno, ok := runtime.Caller(1) + if ok { + pos := strings.LastIndexByte(filename, '/') + if pos != -1 { + filename = filename[pos+1:] + } + prefix := fmt.Sprintf("%s:%d ", filename, lineno) + v = append([]any{prefix}, v...) + } + mc.cfg.Logger.Print(v...) } +func (mc *mysqlConn) readWithTimeout(b []byte) (int, error) { + to := mc.cfg.ReadTimeout + if to > 0 { + if err := mc.netConn.SetReadDeadline(time.Now().Add(to)); err != nil { + return 0, err + } + } + return mc.netConn.Read(b) +} + +func (mc *mysqlConn) writeWithTimeout(b []byte) (int, error) { + to := mc.cfg.WriteTimeout + if to > 0 { + if err := mc.netConn.SetWriteDeadline(time.Now().Add(to)); err != nil { + return 0, err + } + } + return mc.netConn.Write(b) +} + +func (mc *mysqlConn) resetSequence() { + mc.sequence = 0 + mc.compressSequence = 0 +} + +// syncSequence must be called when finished writing some packet and before start reading. +func (mc *mysqlConn) syncSequence() { + // Syncs compressionSequence to sequence. + // This is not documented but done in `net_flush()` in MySQL and MariaDB. + // https://github.com/mariadb-corporation/mariadb-connector-c/blob/8228164f850b12353da24df1b93a1e53cc5e85e9/libmariadb/ma_net.c#L170-L171 + // https://github.com/mysql/mysql-server/blob/824e2b4064053f7daf17d7f3f84b7a3ed92e5fb4/sql-common/net_serv.cc#L293 + if mc.compress { + mc.sequence = mc.compressSequence + mc.compIO.reset() + } +} + // Handles parameters set in DSN after the connection is established func (mc *mysqlConn) handleParams() (err error) { var cmdSet strings.Builder for param, val := range mc.cfg.Params { - switch param { - // Charset: character_set_connection, character_set_client, character_set_results - case "charset": - charsets := strings.Split(val, ",") - for _, cs := range charsets { - // ignore errors here - a charset may not exist - if mc.cfg.Collation != "" { - err = mc.exec("SET NAMES " + cs + " COLLATE " + mc.cfg.Collation) - } else { - err = mc.exec("SET NAMES " + cs) - } - if err == nil { - break - } - } - if err != nil { - return - } - - // Other system vars accumulated in a single SET command - default: - if cmdSet.Len() == 0 { - // Heuristic: 29 chars for each other key=value to reduce reallocations - cmdSet.Grow(4 + len(param) + 3 + len(val) + 30*(len(mc.cfg.Params)-1)) - cmdSet.WriteString("SET ") - } else { - cmdSet.WriteString(", ") - } - cmdSet.WriteString(param) - cmdSet.WriteString(" = ") - cmdSet.WriteString(val) + if cmdSet.Len() == 0 { + // Heuristic: 29 chars for each other key=value to reduce reallocations + cmdSet.Grow(4 + len(param) + 3 + len(val) + 30*(len(mc.cfg.Params)-1)) + cmdSet.WriteString("SET ") + } else { + cmdSet.WriteString(", ") } + cmdSet.WriteString(param) + cmdSet.WriteString(" = ") + cmdSet.WriteString(val) } if cmdSet.Len() > 0 { err = mc.exec(cmdSet.String()) - if err != nil { - return - } } return } +// markBadConn replaces errBadConnNoWrite with driver.ErrBadConn. +// This function is used to return driver.ErrBadConn only when safe to retry. func (mc *mysqlConn) markBadConn(err error) error { - if mc == nil { - return err + if err == errBadConnNoWrite { + return driver.ErrBadConn } - if err != errBadConnNoWrite { - return err - } - return driver.ErrBadConn + return err } func (mc *mysqlConn) Begin() (driver.Tx, error) { @@ -114,7 +140,6 @@ func (mc *mysqlConn) Begin() (driver.Tx, error) { func (mc *mysqlConn) begin(readOnly bool) (driver.Tx, error) { if mc.closed.Load() { - mc.log(ErrInvalidConn) return nil, driver.ErrBadConn } var q string @@ -135,10 +160,14 @@ func (mc *mysqlConn) Close() (err error) { if !mc.closed.Load() { err = mc.writeCommandPacket(comQuit) } + mc.close() + return +} +// close closes the network connection and clear results without sending COM_QUIT. +func (mc *mysqlConn) close() { mc.cleanup() mc.clearResult() - return } // Closes the network connection and unsets internal variables. Do not call this @@ -157,7 +186,7 @@ func (mc *mysqlConn) cleanup() { return } if err := conn.Close(); err != nil { - mc.log(err) + mc.log("closing connection:", err) } // This function can be called from multiple goroutines. // So we can not mc.clearResult() here. @@ -176,7 +205,6 @@ func (mc *mysqlConn) error() error { func (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) { if mc.closed.Load() { - mc.log(ErrInvalidConn) return nil, driver.ErrBadConn } // Send command @@ -217,8 +245,10 @@ func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (strin buf, err := mc.buf.takeCompleteBuffer() if err != nil { // can not take the buffer. Something must be wrong with the connection - mc.log(err) - return "", ErrInvalidConn + mc.cleanup() + // interpolateParams would be called before sending any query. + // So its safe to retry. + return "", driver.ErrBadConn } buf = buf[:0] argPos := 0 @@ -309,7 +339,6 @@ func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (strin func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, error) { if mc.closed.Load() { - mc.log(ErrInvalidConn) return nil, driver.ErrBadConn } if len(args) != 0 { @@ -369,7 +398,6 @@ func (mc *mysqlConn) query(query string, args []driver.Value) (*textRows, error) handleOk := mc.clearResult() if mc.closed.Load() { - mc.log(ErrInvalidConn) return nil, driver.ErrBadConn } if len(args) != 0 { @@ -385,31 +413,34 @@ func (mc *mysqlConn) query(query string, args []driver.Value) (*textRows, error) } // Send command err := mc.writeCommandPacketStr(comQuery, query) - if err == nil { - // Read Result - var resLen int - resLen, err = handleOk.readResultSetHeaderPacket() - if err == nil { - rows := new(textRows) - rows.mc = mc + if err != nil { + return nil, mc.markBadConn(err) + } - if resLen == 0 { - rows.rs.done = true + // Read Result + var resLen int + resLen, err = handleOk.readResultSetHeaderPacket() + if err != nil { + return nil, err + } - switch err := rows.NextResultSet(); err { - case nil, io.EOF: - return rows, nil - default: - return nil, err - } - } + rows := new(textRows) + rows.mc = mc - // Columns - rows.rs.columns, err = mc.readColumns(resLen) - return rows, err + if resLen == 0 { + rows.rs.done = true + + switch err := rows.NextResultSet(); err { + case nil, io.EOF: + return rows, nil + default: + return nil, err } } - return nil, mc.markBadConn(err) + + // Columns + rows.rs.columns, err = mc.readColumns(resLen) + return rows, err } // Gets the value of the given MySQL System Variable @@ -443,7 +474,7 @@ func (mc *mysqlConn) getSystemVar(name string) ([]byte, error) { return nil, err } -// finish is called when the query has canceled. +// cancel is called when the query has canceled. func (mc *mysqlConn) cancel(err error) { mc.canceled.Set(err) mc.cleanup() @@ -464,7 +495,6 @@ func (mc *mysqlConn) finish() { // Ping implements driver.Pinger interface func (mc *mysqlConn) Ping(ctx context.Context) (err error) { if mc.closed.Load() { - mc.log(ErrInvalidConn) return driver.ErrBadConn } @@ -650,7 +680,7 @@ func (mc *mysqlConn) CheckNamedValue(nv *driver.NamedValue) (err error) { // ResetSession implements driver.SessionResetter. // (From Go 1.10) func (mc *mysqlConn) ResetSession(ctx context.Context) error { - if mc.closed.Load() { + if mc.closed.Load() || mc.buf.busy() { return driver.ErrBadConn } @@ -684,5 +714,8 @@ func (mc *mysqlConn) ResetSession(ctx context.Context) error { // IsValid implements driver.Validator interface // (From Go 1.15) func (mc *mysqlConn) IsValid() bool { - return !mc.closed.Load() + return !mc.closed.Load() && !mc.buf.busy() } + +var _ driver.SessionResetter = &mysqlConn{} +var _ driver.Validator = &mysqlConn{} diff --git a/vendor/github.com/go-sql-driver/mysql/connector.go b/vendor/github.com/go-sql-driver/mysql/connector.go index b6707759..bc1d46af 100644 --- a/vendor/github.com/go-sql-driver/mysql/connector.go +++ b/vendor/github.com/go-sql-driver/mysql/connector.go @@ -11,6 +11,7 @@ package mysql import ( "context" "database/sql/driver" + "fmt" "net" "os" "strconv" @@ -87,20 +88,25 @@ func (c *connector) Connect(ctx context.Context) (driver.Conn, error) { mc.parseTime = mc.cfg.ParseTime // Connect to Server - dialsLock.RLock() - dial, ok := dials[mc.cfg.Net] - dialsLock.RUnlock() - if ok { - dctx := ctx - if mc.cfg.Timeout > 0 { - var cancel context.CancelFunc - dctx, cancel = context.WithTimeout(ctx, c.cfg.Timeout) - defer cancel() - } - mc.netConn, err = dial(dctx, mc.cfg.Addr) + dctx := ctx + if mc.cfg.Timeout > 0 { + var cancel context.CancelFunc + dctx, cancel = context.WithTimeout(ctx, c.cfg.Timeout) + defer cancel() + } + + if c.cfg.DialFunc != nil { + mc.netConn, err = c.cfg.DialFunc(dctx, mc.cfg.Net, mc.cfg.Addr) } else { - nd := net.Dialer{Timeout: mc.cfg.Timeout} - mc.netConn, err = nd.DialContext(ctx, mc.cfg.Net, mc.cfg.Addr) + dialsLock.RLock() + dial, ok := dials[mc.cfg.Net] + dialsLock.RUnlock() + if ok { + mc.netConn, err = dial(dctx, mc.cfg.Addr) + } else { + nd := net.Dialer{} + mc.netConn, err = nd.DialContext(dctx, mc.cfg.Net, mc.cfg.Addr) + } } if err != nil { return nil, err @@ -122,11 +128,7 @@ func (c *connector) Connect(ctx context.Context) (driver.Conn, error) { } defer mc.finish() - mc.buf = newBuffer(mc.netConn) - - // Set I/O timeouts - mc.buf.timeout = mc.cfg.ReadTimeout - mc.writeTimeout = mc.cfg.WriteTimeout + mc.buf = newBuffer() // Reading Handshake Initialization Packet authData, plugin, err := mc.readHandshakePacket() @@ -165,6 +167,10 @@ func (c *connector) Connect(ctx context.Context) (driver.Conn, error) { return nil, err } + if mc.cfg.compress && mc.flags&clientCompress == clientCompress { + mc.compress = true + mc.compIO = newCompIO(mc) + } if mc.cfg.MaxAllowedPacket > 0 { mc.maxAllowedPacket = mc.cfg.MaxAllowedPacket } else { @@ -174,12 +180,36 @@ func (c *connector) Connect(ctx context.Context) (driver.Conn, error) { mc.Close() return nil, err } - mc.maxAllowedPacket = stringToInt(maxap) - 1 + n, err := strconv.Atoi(string(maxap)) + if err != nil { + mc.Close() + return nil, fmt.Errorf("invalid max_allowed_packet value (%q): %w", maxap, err) + } + mc.maxAllowedPacket = n - 1 } if mc.maxAllowedPacket < maxPacketSize { mc.maxWriteSize = mc.maxAllowedPacket } + // Charset: character_set_connection, character_set_client, character_set_results + if len(mc.cfg.charsets) > 0 { + for _, cs := range mc.cfg.charsets { + // ignore errors here - a charset may not exist + if mc.cfg.Collation != "" { + err = mc.exec("SET NAMES " + cs + " COLLATE " + mc.cfg.Collation) + } else { + err = mc.exec("SET NAMES " + cs) + } + if err == nil { + break + } + } + if err != nil { + mc.Close() + return nil, err + } + } + // Handle DSN Params err = mc.handleParams() if err != nil { diff --git a/vendor/github.com/go-sql-driver/mysql/const.go b/vendor/github.com/go-sql-driver/mysql/const.go index 22526e03..4aadcd64 100644 --- a/vendor/github.com/go-sql-driver/mysql/const.go +++ b/vendor/github.com/go-sql-driver/mysql/const.go @@ -11,6 +11,8 @@ package mysql import "runtime" const ( + debug = false // for debugging. Set true only in development. + defaultAuthPlugin = "mysql_native_password" defaultMaxAllowedPacket = 64 << 20 // 64 MiB. See https://github.com/go-sql-driver/mysql/issues/1355 minProtocolVersion = 10 @@ -125,7 +127,10 @@ const ( fieldTypeBit ) const ( - fieldTypeJSON fieldType = iota + 0xf5 + fieldTypeVector fieldType = iota + 0xf2 + fieldTypeInvalid + fieldTypeBool + fieldTypeJSON fieldTypeNewDecimal fieldTypeEnum fieldTypeSet diff --git a/vendor/github.com/go-sql-driver/mysql/dsn.go b/vendor/github.com/go-sql-driver/mysql/dsn.go index 65f5a024..ecf62567 100644 --- a/vendor/github.com/go-sql-driver/mysql/dsn.go +++ b/vendor/github.com/go-sql-driver/mysql/dsn.go @@ -44,7 +44,7 @@ type Config struct { DBName string // Database name Params map[string]string // Connection parameters ConnectionAttributes string // Connection Attributes, comma-delimited string of user-defined "key:value" pairs - Collation string // Connection collation + Collation string // Connection collation. When set, this will be set in SET NAMES COLLATE query Loc *time.Location // Location for time.Time values MaxAllowedPacket int // Max packet size allowed ServerPubKey string // Server public key name @@ -54,6 +54,8 @@ type Config struct { ReadTimeout time.Duration // I/O read timeout WriteTimeout time.Duration // I/O write timeout Logger Logger // Logger + // DialFunc specifies the dial function for creating connections + DialFunc func(ctx context.Context, network, addr string) (net.Conn, error) // boolean fields @@ -70,11 +72,15 @@ type Config struct { ParseTime bool // Parse time values to time.Time RejectReadOnly bool // Reject read-only connections - // unexported fields. new options should be come here + // unexported fields. new options should be come here. + // boolean first. alphabetical order. + + compress bool // Enable zlib compression beforeConnect func(context.Context, *Config) error // Invoked before a connection is established pubKey *rsa.PublicKey // Server public key timeTruncate time.Duration // Truncate time.Time values to the specified duration + charsets []string // Connection charset. When set, this will be set in SET NAMES query } // Functional Options Pattern @@ -90,7 +96,6 @@ func NewConfig() *Config { AllowNativePasswords: true, CheckConnLiveness: true, } - return cfg } @@ -122,6 +127,29 @@ func BeforeConnect(fn func(context.Context, *Config) error) Option { } } +// EnableCompress sets the compression mode. +func EnableCompression(yes bool) Option { + return func(cfg *Config) error { + cfg.compress = yes + return nil + } +} + +// Charset sets the connection charset and collation. +// +// charset is the connection charset. +// collation is the connection collation. It can be null or empty string. +// +// When collation is not specified, `SET NAMES ` command is sent when the connection is established. +// When collation is specified, `SET NAMES COLLATE ` command is sent when the connection is established. +func Charset(charset, collation string) Option { + return func(cfg *Config) error { + cfg.charsets = []string{charset} + cfg.Collation = collation + return nil + } +} + func (cfg *Config) Clone() *Config { cp := *cfg if cp.TLS != nil { @@ -282,6 +310,10 @@ func (cfg *Config) FormatDSN() string { writeDSNParam(&buf, &hasParam, "clientFoundRows", "true") } + if charsets := cfg.charsets; len(charsets) > 0 { + writeDSNParam(&buf, &hasParam, "charset", strings.Join(charsets, ",")) + } + if col := cfg.Collation; col != "" { writeDSNParam(&buf, &hasParam, "collation", col) } @@ -290,6 +322,14 @@ func (cfg *Config) FormatDSN() string { writeDSNParam(&buf, &hasParam, "columnsWithAlias", "true") } + if cfg.ConnectionAttributes != "" { + writeDSNParam(&buf, &hasParam, "connectionAttributes", url.QueryEscape(cfg.ConnectionAttributes)) + } + + if cfg.compress { + writeDSNParam(&buf, &hasParam, "compress", "true") + } + if cfg.InterpolateParams { writeDSNParam(&buf, &hasParam, "interpolateParams", "true") } @@ -501,6 +541,10 @@ func parseDSNParams(cfg *Config, params string) (err error) { return errors.New("invalid bool value: " + value) } + // charset + case "charset": + cfg.charsets = strings.Split(value, ",") + // Collation case "collation": cfg.Collation = value @@ -514,7 +558,11 @@ func parseDSNParams(cfg *Config, params string) (err error) { // Compression case "compress": - return errors.New("compression not implemented yet") + var isBool bool + cfg.compress, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } // Enable client side placeholder substitution case "interpolateParams": diff --git a/vendor/github.com/go-sql-driver/mysql/errors.go b/vendor/github.com/go-sql-driver/mysql/errors.go index a7ef8890..584617b1 100644 --- a/vendor/github.com/go-sql-driver/mysql/errors.go +++ b/vendor/github.com/go-sql-driver/mysql/errors.go @@ -32,12 +32,12 @@ var ( // errBadConnNoWrite is used for connection errors where nothing was sent to the database yet. // If this happens first in a function starting a database interaction, it should be replaced by driver.ErrBadConn - // to trigger a resend. + // to trigger a resend. Use mc.markBadConn(err) to do this. // See https://github.com/go-sql-driver/mysql/pull/302 errBadConnNoWrite = errors.New("bad connection") ) -var defaultLogger = Logger(log.New(os.Stderr, "[mysql] ", log.Ldate|log.Ltime|log.Lshortfile)) +var defaultLogger = Logger(log.New(os.Stderr, "[mysql] ", log.Ldate|log.Ltime)) // Logger is used to log critical error messages. type Logger interface { diff --git a/vendor/github.com/go-sql-driver/mysql/fields.go b/vendor/github.com/go-sql-driver/mysql/fields.go index 28608424..be5cd809 100644 --- a/vendor/github.com/go-sql-driver/mysql/fields.go +++ b/vendor/github.com/go-sql-driver/mysql/fields.go @@ -112,6 +112,8 @@ func (mf *mysqlField) typeDatabaseName() string { return "VARCHAR" case fieldTypeYear: return "YEAR" + case fieldTypeVector: + return "VECTOR" default: return "" } @@ -198,7 +200,7 @@ func (mf *mysqlField) scanType() reflect.Type { return scanTypeNullFloat case fieldTypeBit, fieldTypeTinyBLOB, fieldTypeMediumBLOB, fieldTypeLongBLOB, - fieldTypeBLOB, fieldTypeVarString, fieldTypeString, fieldTypeGeometry: + fieldTypeBLOB, fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeVector: if mf.charSet == binaryCollationID { return scanTypeBytes } diff --git a/vendor/github.com/go-sql-driver/mysql/infile.go b/vendor/github.com/go-sql-driver/mysql/infile.go index 0c8af9f1..453ae091 100644 --- a/vendor/github.com/go-sql-driver/mysql/infile.go +++ b/vendor/github.com/go-sql-driver/mysql/infile.go @@ -17,7 +17,7 @@ import ( ) var ( - fileRegister map[string]bool + fileRegister map[string]struct{} fileRegisterLock sync.RWMutex readerRegister map[string]func() io.Reader readerRegisterLock sync.RWMutex @@ -37,10 +37,10 @@ func RegisterLocalFile(filePath string) { fileRegisterLock.Lock() // lazy map init if fileRegister == nil { - fileRegister = make(map[string]bool) + fileRegister = make(map[string]struct{}) } - fileRegister[strings.Trim(filePath, `"`)] = true + fileRegister[strings.Trim(filePath, `"`)] = struct{}{} fileRegisterLock.Unlock() } @@ -95,7 +95,6 @@ const defaultPacketSize = 16 * 1024 // 16KB is small enough for disk readahead a func (mc *okHandler) handleInFileRequest(name string) (err error) { var rdr io.Reader - var data []byte packetSize := defaultPacketSize if mc.maxWriteSize < packetSize { packetSize = mc.maxWriteSize @@ -124,9 +123,9 @@ func (mc *okHandler) handleInFileRequest(name string) (err error) { } else { // File name = strings.Trim(name, `"`) fileRegisterLock.RLock() - fr := fileRegister[name] + _, exists := fileRegister[name] fileRegisterLock.RUnlock() - if mc.cfg.AllowAllFiles || fr { + if mc.cfg.AllowAllFiles || exists { var file *os.File var fi os.FileInfo @@ -147,9 +146,11 @@ func (mc *okHandler) handleInFileRequest(name string) (err error) { } // send content packets + var data []byte + // if packetSize == 0, the Reader contains no data if err == nil && packetSize > 0 { - data := make([]byte, 4+packetSize) + data = make([]byte, 4+packetSize) var n int for err == nil { n, err = rdr.Read(data[4:]) @@ -171,6 +172,7 @@ func (mc *okHandler) handleInFileRequest(name string) (err error) { if ioErr := mc.conn().writePacket(data[:4]); ioErr != nil { return ioErr } + mc.conn().syncSequence() // read OK packet if err == nil { diff --git a/vendor/github.com/go-sql-driver/mysql/packets.go b/vendor/github.com/go-sql-driver/mysql/packets.go index 90a34728..831fca6c 100644 --- a/vendor/github.com/go-sql-driver/mysql/packets.go +++ b/vendor/github.com/go-sql-driver/mysql/packets.go @@ -17,40 +17,66 @@ import ( "fmt" "io" "math" + "os" "strconv" "time" ) -// Packets documentation: -// http://dev.mysql.com/doc/internals/en/client-server-protocol.html +// MySQL client/server protocol documentations. +// https://dev.mysql.com/doc/dev/mysql-server/latest/PAGE_PROTOCOL.html +// https://mariadb.com/kb/en/clientserver-protocol/ + +// read n bytes from mc.buf +func (mc *mysqlConn) readNext(n int) ([]byte, error) { + if mc.buf.len() < n { + err := mc.buf.fill(n, mc.readWithTimeout) + if err != nil { + return nil, err + } + } + return mc.buf.readNext(n), nil +} // Read packet to buffer 'data' func (mc *mysqlConn) readPacket() ([]byte, error) { var prevData []byte + invalidSequence := false + + readNext := mc.readNext + if mc.compress { + readNext = mc.compIO.readNext + } + for { // read packet header - data, err := mc.buf.readNext(4) + data, err := readNext(4) if err != nil { + mc.close() if cerr := mc.canceled.Value(); cerr != nil { return nil, cerr } mc.log(err) - mc.Close() return nil, ErrInvalidConn } // packet length [24 bit] - pktLen := int(uint32(data[0]) | uint32(data[1])<<8 | uint32(data[2])<<16) + pktLen := getUint24(data[:3]) + seq := data[3] // check packet sync [8 bit] - if data[3] != mc.sequence { - mc.Close() - if data[3] > mc.sequence { - return nil, ErrPktSyncMul + if seq != mc.sequence { + mc.log(fmt.Sprintf("[warn] unexpected sequence nr: expected %v, got %v", mc.sequence, seq)) + // MySQL and MariaDB doesn't check packet nr in compressed packet. + if !mc.compress { + // For large packets, we stop reading as soon as sync error. + if len(prevData) > 0 { + mc.close() + return nil, ErrPktSyncMul + } + invalidSequence = true } - return nil, ErrPktSync } - mc.sequence++ + mc.sequence = seq + 1 // packets with length 0 terminate a previous packet which is a // multiple of (2^24)-1 bytes long @@ -58,32 +84,38 @@ func (mc *mysqlConn) readPacket() ([]byte, error) { // there was no previous packet if prevData == nil { mc.log(ErrMalformPkt) - mc.Close() + mc.close() return nil, ErrInvalidConn } - return prevData, nil } // read packet body [pktLen bytes] - data, err = mc.buf.readNext(pktLen) + data, err = readNext(pktLen) if err != nil { + mc.close() if cerr := mc.canceled.Value(); cerr != nil { return nil, cerr } mc.log(err) - mc.Close() return nil, ErrInvalidConn } // return data if this was the last packet if pktLen < maxPacketSize { // zero allocations for non-split packets - if prevData == nil { - return data, nil + if prevData != nil { + data = append(prevData, data...) } - - return append(prevData, data...), nil + if invalidSequence { + mc.close() + // return sync error only for regular packet. + // error packets may have wrong sequence number. + if data[0] != iERR { + return nil, ErrPktSync + } + } + return data, nil } prevData = append(prevData, data...) @@ -93,60 +125,52 @@ func (mc *mysqlConn) readPacket() ([]byte, error) { // Write packet buffer 'data' func (mc *mysqlConn) writePacket(data []byte) error { pktLen := len(data) - 4 - if pktLen > mc.maxAllowedPacket { return ErrPktTooLarge } + writeFunc := mc.writeWithTimeout + if mc.compress { + writeFunc = mc.compIO.writePackets + } + for { - var size int - if pktLen >= maxPacketSize { - data[0] = 0xff - data[1] = 0xff - data[2] = 0xff - size = maxPacketSize - } else { - data[0] = byte(pktLen) - data[1] = byte(pktLen >> 8) - data[2] = byte(pktLen >> 16) - size = pktLen - } + size := min(maxPacketSize, pktLen) + putUint24(data[:3], size) data[3] = mc.sequence // Write packet - if mc.writeTimeout > 0 { - if err := mc.netConn.SetWriteDeadline(time.Now().Add(mc.writeTimeout)); err != nil { - return err - } + if debug { + fmt.Fprintf(os.Stderr, "writePacket: size=%v seq=%v\n", size, mc.sequence) } - n, err := mc.netConn.Write(data[:4+size]) - if err == nil && n == 4+size { - mc.sequence++ - if size != maxPacketSize { - return nil - } - pktLen -= size - data = data[size:] - continue - } - - // Handle error - if err == nil { // n != len(data) + n, err := writeFunc(data[:4+size]) + if err != nil { mc.cleanup() - mc.log(ErrMalformPkt) - } else { if cerr := mc.canceled.Value(); cerr != nil { return cerr } if n == 0 && pktLen == len(data)-4 { // only for the first loop iteration when nothing was written yet + mc.log(err) return errBadConnNoWrite + } else { + return err } - mc.cleanup() - mc.log(err) } - return ErrInvalidConn + if n != 4+size { + // io.Writer(b) must return a non-nil error if it cannot write len(b) bytes. + // The io.ErrShortWrite error is used to indicate that this rule has not been followed. + mc.cleanup() + return io.ErrShortWrite + } + + mc.sequence++ + if size != maxPacketSize { + return nil + } + pktLen -= size + data = data[size:] } } @@ -159,11 +183,6 @@ func (mc *mysqlConn) writePacket(data []byte) error { func (mc *mysqlConn) readHandshakePacket() (data []byte, plugin string, err error) { data, err = mc.readPacket() if err != nil { - // for init we can rewrite this to ErrBadConn for sql.Driver to retry, since - // in connection initialization we don't risk retrying non-idempotent actions. - if err == ErrInvalidConn { - return nil, "", driver.ErrBadConn - } return } @@ -207,10 +226,13 @@ func (mc *mysqlConn) readHandshakePacket() (data []byte, plugin string, err erro if len(data) > pos { // character set [1 byte] // status flags [2 bytes] + pos += 3 // capability flags (upper 2 bytes) [2 bytes] + mc.flags |= clientFlag(binary.LittleEndian.Uint16(data[pos:pos+2])) << 16 + pos += 2 // length of auth-plugin-data [1 byte] // reserved (all [00]) [10 bytes] - pos += 1 + 2 + 2 + 1 + 10 + pos += 11 // second part of the password cipher [minimum 13 bytes], // where len=MAX(13, length of auth-plugin-data - 8) @@ -258,13 +280,17 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string clientLocalFiles | clientPluginAuth | clientMultiResults | - clientConnectAttrs | + mc.flags&clientConnectAttrs | mc.flags&clientLongFlag + sendConnectAttrs := mc.flags&clientConnectAttrs != 0 + if mc.cfg.ClientFoundRows { clientFlags |= clientFoundRows } - + if mc.cfg.compress && mc.flags&clientCompress == clientCompress { + clientFlags |= clientCompress + } // To enable TLS / SSL if mc.cfg.TLS != nil { clientFlags |= clientSSL @@ -293,43 +319,37 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string } // encode length of the connection attributes - var connAttrsLEIBuf [9]byte - connAttrsLen := len(mc.connector.encodedAttributes) - connAttrsLEI := appendLengthEncodedInteger(connAttrsLEIBuf[:0], uint64(connAttrsLen)) - pktLen += len(connAttrsLEI) + len(mc.connector.encodedAttributes) + var connAttrsLEI []byte + if sendConnectAttrs { + var connAttrsLEIBuf [9]byte + connAttrsLen := len(mc.connector.encodedAttributes) + connAttrsLEI = appendLengthEncodedInteger(connAttrsLEIBuf[:0], uint64(connAttrsLen)) + pktLen += len(connAttrsLEI) + len(mc.connector.encodedAttributes) + } // Calculate packet length and get buffer with that size data, err := mc.buf.takeBuffer(pktLen + 4) if err != nil { - // cannot take the buffer. Something must be wrong with the connection - mc.log(err) - return errBadConnNoWrite + mc.cleanup() + return err } // ClientFlags [32 bit] - data[4] = byte(clientFlags) - data[5] = byte(clientFlags >> 8) - data[6] = byte(clientFlags >> 16) - data[7] = byte(clientFlags >> 24) + binary.LittleEndian.PutUint32(data[4:], uint32(clientFlags)) // MaxPacketSize [32 bit] (none) - data[8] = 0x00 - data[9] = 0x00 - data[10] = 0x00 - data[11] = 0x00 + binary.LittleEndian.PutUint32(data[8:], 0) // Collation ID [1 byte] - cname := mc.cfg.Collation - if cname == "" { - cname = defaultCollation - } - var found bool - data[12], found = collations[cname] - if !found { - // Note possibility for false negatives: - // could be triggered although the collation is valid if the - // collations map does not contain entries the server supports. - return fmt.Errorf("unknown collation: %q", cname) + data[12] = defaultCollationID + if cname := mc.cfg.Collation; cname != "" { + colID, ok := collations[cname] + if ok { + data[12] = colID + } else if len(mc.cfg.charsets) > 0 { + // When cfg.charset is set, the collation is set by `SET NAMES COLLATE `. + return fmt.Errorf("unknown collation: %q", cname) + } } // Filler [23 bytes] (all 0x00) @@ -349,10 +369,12 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string // Switch to TLS tlsConn := tls.Client(mc.netConn, mc.cfg.TLS) if err := tlsConn.Handshake(); err != nil { + if cerr := mc.canceled.Value(); cerr != nil { + return cerr + } return err } mc.netConn = tlsConn - mc.buf.nc = tlsConn } // User [null terminated string] @@ -378,8 +400,10 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string pos++ // Connection Attributes - pos += copy(data[pos:], connAttrsLEI) - pos += copy(data[pos:], []byte(mc.connector.encodedAttributes)) + if sendConnectAttrs { + pos += copy(data[pos:], connAttrsLEI) + pos += copy(data[pos:], []byte(mc.connector.encodedAttributes)) + } // Send Auth packet return mc.writePacket(data[:pos]) @@ -388,11 +412,10 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string // http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse func (mc *mysqlConn) writeAuthSwitchPacket(authData []byte) error { pktLen := 4 + len(authData) - data, err := mc.buf.takeSmallBuffer(pktLen) + data, err := mc.buf.takeBuffer(pktLen) if err != nil { - // cannot take the buffer. Something must be wrong with the connection - mc.log(err) - return errBadConnNoWrite + mc.cleanup() + return err } // Add the auth data [EOF] @@ -406,32 +429,30 @@ func (mc *mysqlConn) writeAuthSwitchPacket(authData []byte) error { func (mc *mysqlConn) writeCommandPacket(command byte) error { // Reset Packet Sequence - mc.sequence = 0 + mc.resetSequence() data, err := mc.buf.takeSmallBuffer(4 + 1) if err != nil { - // cannot take the buffer. Something must be wrong with the connection - mc.log(err) - return errBadConnNoWrite + return err } // Add command byte data[4] = command // Send CMD packet - return mc.writePacket(data) + err = mc.writePacket(data) + mc.syncSequence() + return err } func (mc *mysqlConn) writeCommandPacketStr(command byte, arg string) error { // Reset Packet Sequence - mc.sequence = 0 + mc.resetSequence() pktLen := 1 + len(arg) data, err := mc.buf.takeBuffer(pktLen + 4) if err != nil { - // cannot take the buffer. Something must be wrong with the connection - mc.log(err) - return errBadConnNoWrite + return err } // Add command byte @@ -441,31 +462,30 @@ func (mc *mysqlConn) writeCommandPacketStr(command byte, arg string) error { copy(data[5:], arg) // Send CMD packet - return mc.writePacket(data) + err = mc.writePacket(data) + mc.syncSequence() + return err } func (mc *mysqlConn) writeCommandPacketUint32(command byte, arg uint32) error { // Reset Packet Sequence - mc.sequence = 0 + mc.resetSequence() data, err := mc.buf.takeSmallBuffer(4 + 1 + 4) if err != nil { - // cannot take the buffer. Something must be wrong with the connection - mc.log(err) - return errBadConnNoWrite + return err } // Add command byte data[4] = command // Add arg [32 bit] - data[5] = byte(arg) - data[6] = byte(arg >> 8) - data[7] = byte(arg >> 16) - data[8] = byte(arg >> 24) + binary.LittleEndian.PutUint32(data[5:], arg) // Send CMD packet - return mc.writePacket(data) + err = mc.writePacket(data) + mc.syncSequence() + return err } /****************************************************************************** @@ -500,6 +520,9 @@ func (mc *mysqlConn) readAuthResult() ([]byte, string, error) { } plugin := string(data[1:pluginEndIndex]) authData := data[pluginEndIndex+1:] + if len(authData) > 0 && authData[len(authData)-1] == 0 { + authData = authData[:len(authData)-1] + } return authData, plugin, nil default: // Error otherwise @@ -521,32 +544,33 @@ func (mc *okHandler) readResultOK() error { } // Result Set Header Packet -// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::Resultset +// https://dev.mysql.com/doc/dev/mysql-server/latest/page_protocol_com_query_response.html func (mc *okHandler) readResultSetHeaderPacket() (int, error) { // handleOkPacket replaces both values; other cases leave the values unchanged. mc.result.affectedRows = append(mc.result.affectedRows, 0) mc.result.insertIds = append(mc.result.insertIds, 0) data, err := mc.conn().readPacket() - if err == nil { - switch data[0] { - - case iOK: - return 0, mc.handleOkPacket(data) - - case iERR: - return 0, mc.conn().handleErrorPacket(data) - - case iLocalInFile: - return 0, mc.handleInFileRequest(string(data[1:])) - } - - // column count - num, _, _ := readLengthEncodedInteger(data) - // ignore remaining data in the packet. see #1478. - return int(num), nil + if err != nil { + return 0, err } - return 0, err + + switch data[0] { + case iOK: + return 0, mc.handleOkPacket(data) + + case iERR: + return 0, mc.conn().handleErrorPacket(data) + + case iLocalInFile: + return 0, mc.handleInFileRequest(string(data[1:])) + } + + // column count + // https://dev.mysql.com/doc/dev/mysql-server/latest/page_protocol_com_query_response_text_resultset.html + num, _, _ := readLengthEncodedInteger(data) + // ignore remaining data in the packet. see #1478. + return int(num), nil } // Error Packet @@ -563,7 +587,8 @@ func (mc *mysqlConn) handleErrorPacket(data []byte) error { // 1792: ER_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION // 1290: ER_OPTION_PREVENTS_STATEMENT (returned by Aurora during failover) - if (errno == 1792 || errno == 1290) && mc.cfg.RejectReadOnly { + // 1836: ER_READ_ONLY_MODE + if (errno == 1792 || errno == 1290 || errno == 1836) && mc.cfg.RejectReadOnly { // Oops; we are connected to a read-only connection, and won't be able // to issue any write statements. Since RejectReadOnly is configured, // we throw away this connection hoping this one would have write @@ -930,32 +955,26 @@ func (stmt *mysqlStmt) writeCommandLongData(paramID int, arg []byte) error { pktLen = dataOffset + argLen } - stmt.mc.sequence = 0 // Add command byte [1 byte] data[4] = comStmtSendLongData // Add stmtID [32 bit] - data[5] = byte(stmt.id) - data[6] = byte(stmt.id >> 8) - data[7] = byte(stmt.id >> 16) - data[8] = byte(stmt.id >> 24) + binary.LittleEndian.PutUint32(data[5:], stmt.id) // Add paramID [16 bit] - data[9] = byte(paramID) - data[10] = byte(paramID >> 8) + binary.LittleEndian.PutUint16(data[9:], uint16(paramID)) // Send CMD packet err := stmt.mc.writePacket(data[:4+pktLen]) + // Every COM_LONG_DATA packet reset Packet Sequence + stmt.mc.resetSequence() if err == nil { data = data[pktLen-dataOffset:] continue } return err - } - // Reset Packet Sequence - stmt.mc.sequence = 0 return nil } @@ -980,7 +999,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error { } // Reset packet-sequence - mc.sequence = 0 + mc.resetSequence() var data []byte var err error @@ -992,28 +1011,20 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error { // In this case the len(data) == cap(data) which is used to optimise the flow below. } if err != nil { - // cannot take the buffer. Something must be wrong with the connection - mc.log(err) - return errBadConnNoWrite + return err } // command [1 byte] data[4] = comStmtExecute // statement_id [4 bytes] - data[5] = byte(stmt.id) - data[6] = byte(stmt.id >> 8) - data[7] = byte(stmt.id >> 16) - data[8] = byte(stmt.id >> 24) + binary.LittleEndian.PutUint32(data[5:], stmt.id) // flags (0: CURSOR_TYPE_NO_CURSOR) [1 byte] data[9] = 0x00 // iteration_count (uint32(1)) [4 bytes] - data[10] = 0x01 - data[11] = 0x00 - data[12] = 0x00 - data[13] = 0x00 + binary.LittleEndian.PutUint32(data[10:], 1) if len(args) > 0 { pos := minPktLen @@ -1067,50 +1078,17 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error { case int64: paramTypes[i+i] = byte(fieldTypeLongLong) paramTypes[i+i+1] = 0x00 - - if cap(paramValues)-len(paramValues)-8 >= 0 { - paramValues = paramValues[:len(paramValues)+8] - binary.LittleEndian.PutUint64( - paramValues[len(paramValues)-8:], - uint64(v), - ) - } else { - paramValues = append(paramValues, - uint64ToBytes(uint64(v))..., - ) - } + paramValues = binary.LittleEndian.AppendUint64(paramValues, uint64(v)) case uint64: paramTypes[i+i] = byte(fieldTypeLongLong) paramTypes[i+i+1] = 0x80 // type is unsigned - - if cap(paramValues)-len(paramValues)-8 >= 0 { - paramValues = paramValues[:len(paramValues)+8] - binary.LittleEndian.PutUint64( - paramValues[len(paramValues)-8:], - uint64(v), - ) - } else { - paramValues = append(paramValues, - uint64ToBytes(uint64(v))..., - ) - } + paramValues = binary.LittleEndian.AppendUint64(paramValues, uint64(v)) case float64: paramTypes[i+i] = byte(fieldTypeDouble) paramTypes[i+i+1] = 0x00 - - if cap(paramValues)-len(paramValues)-8 >= 0 { - paramValues = paramValues[:len(paramValues)+8] - binary.LittleEndian.PutUint64( - paramValues[len(paramValues)-8:], - math.Float64bits(v), - ) - } else { - paramValues = append(paramValues, - uint64ToBytes(math.Float64bits(v))..., - ) - } + paramValues = binary.LittleEndian.AppendUint64(paramValues, math.Float64bits(v)) case bool: paramTypes[i+i] = byte(fieldTypeTiny) @@ -1191,17 +1169,16 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error { // In that case we must build the data packet with the new values buffer if valuesCap != cap(paramValues) { data = append(data[:pos], paramValues...) - if err = mc.buf.store(data); err != nil { - mc.log(err) - return errBadConnNoWrite - } + mc.buf.store(data) // allow this buffer to be reused } pos += len(paramValues) data = data[:pos] } - return mc.writePacket(data) + err = mc.writePacket(data) + mc.syncSequence() + return err } // For each remaining resultset in the stream, discards its rows and updates @@ -1325,7 +1302,8 @@ func (rows *binaryRows) readRow(dest []driver.Value) error { case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar, fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB, fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB, - fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON: + fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON, + fieldTypeVector: var isNull bool var n int dest[i], isNull, n, err = readLengthEncodedString(data[pos:]) diff --git a/vendor/github.com/go-sql-driver/mysql/rows.go b/vendor/github.com/go-sql-driver/mysql/rows.go index 81fa6062..df98417b 100644 --- a/vendor/github.com/go-sql-driver/mysql/rows.go +++ b/vendor/github.com/go-sql-driver/mysql/rows.go @@ -111,13 +111,6 @@ func (rows *mysqlRows) Close() (err error) { return err } - // flip the buffer for this connection if we need to drain it. - // note that for a successful query (i.e. one where rows.next() - // has been called until it returns false), `rows.mc` will be nil - // by the time the user calls `(*Rows).Close`, so we won't reach this - // see: https://github.com/golang/go/commit/651ddbdb5056ded455f47f9c494c67b389622a47 - mc.buf.flip() - // Remove unread packets from stream if !rows.rs.done { err = mc.readUntilEOF() diff --git a/vendor/github.com/go-sql-driver/mysql/statement.go b/vendor/github.com/go-sql-driver/mysql/statement.go index 0436f224..35df8545 100644 --- a/vendor/github.com/go-sql-driver/mysql/statement.go +++ b/vendor/github.com/go-sql-driver/mysql/statement.go @@ -24,11 +24,12 @@ type mysqlStmt struct { func (stmt *mysqlStmt) Close() error { if stmt.mc == nil || stmt.mc.closed.Load() { - // driver.Stmt.Close can be called more than once, thus this function - // has to be idempotent. - // See also Issue #450 and golang/go#16019. - //errLog.Print(ErrInvalidConn) - return driver.ErrBadConn + // driver.Stmt.Close could be called more than once, thus this function + // had to be idempotent. See also Issue #450 and golang/go#16019. + // This bug has been fixed in Go 1.8. + // https://github.com/golang/go/commit/90b8a0ca2d0b565c7c7199ffcf77b15ea6b6db3a + // But we keep this function idempotent because it is safer. + return nil } err := stmt.mc.writeCommandPacketUint32(comStmtClose, stmt.id) @@ -51,7 +52,6 @@ func (stmt *mysqlStmt) CheckNamedValue(nv *driver.NamedValue) (err error) { func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) { if stmt.mc.closed.Load() { - stmt.mc.log(ErrInvalidConn) return nil, driver.ErrBadConn } // Send command @@ -95,7 +95,6 @@ func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) { func (stmt *mysqlStmt) query(args []driver.Value) (*binaryRows, error) { if stmt.mc.closed.Load() { - stmt.mc.log(ErrInvalidConn) return nil, driver.ErrBadConn } // Send command diff --git a/vendor/github.com/go-sql-driver/mysql/transaction.go b/vendor/github.com/go-sql-driver/mysql/transaction.go index 4a4b6100..8c502f49 100644 --- a/vendor/github.com/go-sql-driver/mysql/transaction.go +++ b/vendor/github.com/go-sql-driver/mysql/transaction.go @@ -13,18 +13,32 @@ type mysqlTx struct { } func (tx *mysqlTx) Commit() (err error) { - if tx.mc == nil || tx.mc.closed.Load() { + if tx.mc == nil { return ErrInvalidConn } + if tx.mc.closed.Load() { + err = tx.mc.error() + if err == nil { + err = ErrInvalidConn + } + return + } err = tx.mc.exec("COMMIT") tx.mc = nil return } func (tx *mysqlTx) Rollback() (err error) { - if tx.mc == nil || tx.mc.closed.Load() { + if tx.mc == nil { return ErrInvalidConn } + if tx.mc.closed.Load() { + err = tx.mc.error() + if err == nil { + err = ErrInvalidConn + } + return + } err = tx.mc.exec("ROLLBACK") tx.mc = nil return diff --git a/vendor/github.com/go-sql-driver/mysql/utils.go b/vendor/github.com/go-sql-driver/mysql/utils.go index cda24fe7..8716c26c 100644 --- a/vendor/github.com/go-sql-driver/mysql/utils.go +++ b/vendor/github.com/go-sql-driver/mysql/utils.go @@ -490,17 +490,16 @@ func formatBinaryTime(src []byte, length uint8) (driver.Value, error) { * Convert from and to bytes * ******************************************************************************/ -func uint64ToBytes(n uint64) []byte { - return []byte{ - byte(n), - byte(n >> 8), - byte(n >> 16), - byte(n >> 24), - byte(n >> 32), - byte(n >> 40), - byte(n >> 48), - byte(n >> 56), - } +// 24bit integer: used for packet headers. + +func putUint24(data []byte, n int) { + data[2] = byte(n >> 16) + data[1] = byte(n >> 8) + data[0] = byte(n) +} + +func getUint24(data []byte) int { + return int(data[2])<<16 | int(data[1])<<8 | int(data[0]) } func uint64ToString(n uint64) []byte { @@ -525,16 +524,6 @@ func uint64ToString(n uint64) []byte { return a[i:] } -// treats string value as unsigned integer representation -func stringToInt(b []byte) int { - val := 0 - for i := range b { - val *= 10 - val += int(b[i] - 0x30) - } - return val -} - // returns the string read as a bytes slice, whether the value is NULL, // the number of bytes read and an error, in case the string is longer than // the input slice @@ -586,18 +575,15 @@ func readLengthEncodedInteger(b []byte) (uint64, bool, int) { // 252: value of following 2 case 0xfc: - return uint64(b[1]) | uint64(b[2])<<8, false, 3 + return uint64(binary.LittleEndian.Uint16(b[1:])), false, 3 // 253: value of following 3 case 0xfd: - return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16, false, 4 + return uint64(getUint24(b[1:])), false, 4 // 254: value of following 8 case 0xfe: - return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16 | - uint64(b[4])<<24 | uint64(b[5])<<32 | uint64(b[6])<<40 | - uint64(b[7])<<48 | uint64(b[8])<<56, - false, 9 + return uint64(binary.LittleEndian.Uint64(b[1:])), false, 9 } // 0-250: value of first byte @@ -611,13 +597,14 @@ func appendLengthEncodedInteger(b []byte, n uint64) []byte { return append(b, byte(n)) case n <= 0xffff: - return append(b, 0xfc, byte(n), byte(n>>8)) + b = append(b, 0xfc) + return binary.LittleEndian.AppendUint16(b, uint16(n)) case n <= 0xffffff: return append(b, 0xfd, byte(n), byte(n>>8), byte(n>>16)) } - return append(b, 0xfe, byte(n), byte(n>>8), byte(n>>16), byte(n>>24), - byte(n>>32), byte(n>>40), byte(n>>48), byte(n>>56)) + b = append(b, 0xfe) + return binary.LittleEndian.AppendUint64(b, n) } func appendLengthEncodedString(b []byte, s string) []byte { diff --git a/vendor/github.com/go-viper/mapstructure/v2/.editorconfig b/vendor/github.com/go-viper/mapstructure/v2/.editorconfig new file mode 100644 index 00000000..1f664d13 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/.editorconfig @@ -0,0 +1,18 @@ +root = true + +[*] +charset = utf-8 +end_of_line = lf +indent_size = 4 +indent_style = space +insert_final_newline = true +trim_trailing_whitespace = true + +[*.go] +indent_style = tab + +[{Makefile,*.mk}] +indent_style = tab + +[*.nix] +indent_size = 2 diff --git a/vendor/github.com/go-viper/mapstructure/v2/.envrc b/vendor/github.com/go-viper/mapstructure/v2/.envrc new file mode 100644 index 00000000..2e0f9f5f --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/.envrc @@ -0,0 +1,4 @@ +if ! has nix_direnv_version || ! nix_direnv_version 3.0.4; then + source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.0.4/direnvrc" "sha256-DzlYZ33mWF/Gs8DDeyjr8mnVmQGx7ASYqA5WlxwvBG4=" +fi +use flake . --impure diff --git a/vendor/github.com/go-viper/mapstructure/v2/.gitignore b/vendor/github.com/go-viper/mapstructure/v2/.gitignore new file mode 100644 index 00000000..470e7ca2 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/.gitignore @@ -0,0 +1,6 @@ +/.devenv/ +/.direnv/ +/.pre-commit-config.yaml +/bin/ +/build/ +/var/ diff --git a/vendor/github.com/go-viper/mapstructure/v2/.golangci.yaml b/vendor/github.com/go-viper/mapstructure/v2/.golangci.yaml new file mode 100644 index 00000000..763143aa --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/.golangci.yaml @@ -0,0 +1,23 @@ +run: + timeout: 5m + +linters-settings: + gci: + sections: + - standard + - default + - prefix(github.com/go-viper/mapstructure) + golint: + min-confidence: 0 + goimports: + local-prefixes: github.com/go-viper/maptstructure + +linters: + disable-all: true + enable: + - gci + - gofmt + - gofumpt + - goimports + - staticcheck + # - stylecheck diff --git a/vendor/github.com/go-viper/mapstructure/v2/CHANGELOG.md b/vendor/github.com/go-viper/mapstructure/v2/CHANGELOG.md new file mode 100644 index 00000000..afd44e5f --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/CHANGELOG.md @@ -0,0 +1,104 @@ +> [!WARNING] +> As of v2 of this library, change log can be found in GitHub releases. + +## 1.5.1 + +* Wrap errors so they're compatible with `errors.Is` and `errors.As` [GH-282] +* Fix map of slices not decoding properly in certain cases. [GH-266] + +## 1.5.0 + +* New option `IgnoreUntaggedFields` to ignore decoding to any fields + without `mapstructure` (or the configured tag name) set [GH-277] +* New option `ErrorUnset` which makes it an error if any fields + in a target struct are not set by the decoding process. [GH-225] +* New function `OrComposeDecodeHookFunc` to help compose decode hooks. [GH-240] +* Decoding to slice from array no longer crashes [GH-265] +* Decode nested struct pointers to map [GH-271] +* Fix issue where `,squash` was ignored if `Squash` option was set. [GH-280] +* Fix issue where fields with `,omitempty` would sometimes decode + into a map with an empty string key [GH-281] + +## 1.4.3 + +* Fix cases where `json.Number` didn't decode properly [GH-261] + +## 1.4.2 + +* Custom name matchers to support any sort of casing, formatting, etc. for + field names. [GH-250] +* Fix possible panic in ComposeDecodeHookFunc [GH-251] + +## 1.4.1 + +* Fix regression where `*time.Time` value would be set to empty and not be sent + to decode hooks properly [GH-232] + +## 1.4.0 + +* A new decode hook type `DecodeHookFuncValue` has been added that has + access to the full values. [GH-183] +* Squash is now supported with embedded fields that are struct pointers [GH-205] +* Empty strings will convert to 0 for all numeric types when weakly decoding [GH-206] + +## 1.3.3 + +* Decoding maps from maps creates a settable value for decode hooks [GH-203] + +## 1.3.2 + +* Decode into interface type with a struct value is supported [GH-187] + +## 1.3.1 + +* Squash should only squash embedded structs. [GH-194] + +## 1.3.0 + +* Added `",omitempty"` support. This will ignore zero values in the source + structure when encoding. [GH-145] + +## 1.2.3 + +* Fix duplicate entries in Keys list with pointer values. [GH-185] + +## 1.2.2 + +* Do not add unsettable (unexported) values to the unused metadata key + or "remain" value. [GH-150] + +## 1.2.1 + +* Go modules checksum mismatch fix + +## 1.2.0 + +* Added support to capture unused values in a field using the `",remain"` value + in the mapstructure tag. There is an example to showcase usage. +* Added `DecoderConfig` option to always squash embedded structs +* `json.Number` can decode into `uint` types +* Empty slices are preserved and not replaced with nil slices +* Fix panic that can occur in when decoding a map into a nil slice of structs +* Improved package documentation for godoc + +## 1.1.2 + +* Fix error when decode hook decodes interface implementation into interface + type. [GH-140] + +## 1.1.1 + +* Fix panic that can happen in `decodePtr` + +## 1.1.0 + +* Added `StringToIPHookFunc` to convert `string` to `net.IP` and `net.IPNet` [GH-133] +* Support struct to struct decoding [GH-137] +* If source map value is nil, then destination map value is nil (instead of empty) +* If source slice value is nil, then destination slice value is nil (instead of empty) +* If source pointer is nil, then destination pointer is set to nil (instead of + allocated zero value of type) + +## 1.0.0 + +* Initial tagged stable release. diff --git a/vendor/github.com/go-viper/mapstructure/v2/LICENSE b/vendor/github.com/go-viper/mapstructure/v2/LICENSE new file mode 100644 index 00000000..f9c841a5 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/go-viper/mapstructure/v2/README.md b/vendor/github.com/go-viper/mapstructure/v2/README.md new file mode 100644 index 00000000..dd5ec69d --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/README.md @@ -0,0 +1,80 @@ +# mapstructure + +[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/go-viper/mapstructure/ci.yaml?branch=main&style=flat-square)](https://github.com/go-viper/mapstructure/actions?query=workflow%3ACI) +[![go.dev reference](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/mod/github.com/go-viper/mapstructure/v2) +![Go Version](https://img.shields.io/badge/go%20version-%3E=1.18-61CFDD.svg?style=flat-square) + +mapstructure is a Go library for decoding generic map values to structures +and vice versa, while providing helpful error handling. + +This library is most useful when decoding values from some data stream (JSON, +Gob, etc.) where you don't _quite_ know the structure of the underlying data +until you read a part of it. You can therefore read a `map[string]interface{}` +and use this library to decode it into the proper underlying native Go +structure. + +## Installation + +```shell +go get github.com/go-viper/mapstructure/v2 +``` + +## Migrating from `github.com/mitchellh/mapstructure` + +[@mitchehllh](https://github.com/mitchellh) announced his intent to archive some of his unmaintained projects (see [here](https://gist.github.com/mitchellh/90029601268e59a29e64e55bab1c5bdc) and [here](https://github.com/mitchellh/mapstructure/issues/349)). This is a repository achieved the "blessed fork" status. + +You can migrate to this package by changing your import paths in your Go files to `github.com/go-viper/mapstructure/v2`. +The API is the same, so you don't need to change anything else. + +Here is a script that can help you with the migration: + +```shell +sed -i 's/github.com\/mitchellh\/mapstructure/github.com\/go-viper\/mapstructure\/v2/g' $(find . -type f -name '*.go') +``` + +If you need more time to migrate your code, that is absolutely fine. + +Some of the latest fixes are backported to the v1 release branch of this package, so you can use the Go modules `replace` feature until you are ready to migrate: + +```shell +replace github.com/mitchellh/mapstructure => github.com/go-viper/mapstructure v1.6.0 +``` + +## Usage & Example + +For usage and examples see the [documentation](https://pkg.go.dev/mod/github.com/go-viper/mapstructure/v2). + +The `Decode` function has examples associated with it there. + +## But Why?! + +Go offers fantastic standard libraries for decoding formats such as JSON. +The standard method is to have a struct pre-created, and populate that struct +from the bytes of the encoded format. This is great, but the problem is if +you have configuration or an encoding that changes slightly depending on +specific fields. For example, consider this JSON: + +```json +{ + "type": "person", + "name": "Mitchell" +} +``` + +Perhaps we can't populate a specific structure without first reading +the "type" field from the JSON. We could always do two passes over the +decoding of the JSON (reading the "type" first, and the rest later). +However, it is much simpler to just decode this into a `map[string]interface{}` +structure, read the "type" key, then use something like this library +to decode it into the proper structure. + +## Credits + +Mapstructure was originally created by [@mitchellh](https://github.com/mitchellh). +This is a maintained fork of the original library. + +Read more about the reasons for the fork [here](https://github.com/mitchellh/mapstructure/issues/349). + +## License + +The project is licensed under the [MIT License](LICENSE). diff --git a/vendor/github.com/go-viper/mapstructure/v2/decode_hooks.go b/vendor/github.com/go-viper/mapstructure/v2/decode_hooks.go new file mode 100644 index 00000000..2523c6ad --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/decode_hooks.go @@ -0,0 +1,609 @@ +package mapstructure + +import ( + "encoding" + "errors" + "fmt" + "net" + "net/netip" + "reflect" + "strconv" + "strings" + "time" +) + +// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns +// it into the proper DecodeHookFunc type, such as DecodeHookFuncType. +func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { + // Create variables here so we can reference them with the reflect pkg + var f1 DecodeHookFuncType + var f2 DecodeHookFuncKind + var f3 DecodeHookFuncValue + + // Fill in the variables into this interface and the rest is done + // automatically using the reflect package. + potential := []interface{}{f1, f2, f3} + + v := reflect.ValueOf(h) + vt := v.Type() + for _, raw := range potential { + pt := reflect.ValueOf(raw).Type() + if vt.ConvertibleTo(pt) { + return v.Convert(pt).Interface() + } + } + + return nil +} + +// cachedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns +// it into a closure to be used directly +// if the type fails to convert we return a closure always erroring to keep the previous behaviour +func cachedDecodeHook(raw DecodeHookFunc) func(from reflect.Value, to reflect.Value) (interface{}, error) { + switch f := typedDecodeHook(raw).(type) { + case DecodeHookFuncType: + return func(from reflect.Value, to reflect.Value) (interface{}, error) { + return f(from.Type(), to.Type(), from.Interface()) + } + case DecodeHookFuncKind: + return func(from reflect.Value, to reflect.Value) (interface{}, error) { + return f(from.Kind(), to.Kind(), from.Interface()) + } + case DecodeHookFuncValue: + return func(from reflect.Value, to reflect.Value) (interface{}, error) { + return f(from, to) + } + default: + return func(from reflect.Value, to reflect.Value) (interface{}, error) { + return nil, errors.New("invalid decode hook signature") + } + } +} + +// DecodeHookExec executes the given decode hook. This should be used +// since it'll naturally degrade to the older backwards compatible DecodeHookFunc +// that took reflect.Kind instead of reflect.Type. +func DecodeHookExec( + raw DecodeHookFunc, + from reflect.Value, to reflect.Value, +) (interface{}, error) { + switch f := typedDecodeHook(raw).(type) { + case DecodeHookFuncType: + return f(from.Type(), to.Type(), from.Interface()) + case DecodeHookFuncKind: + return f(from.Kind(), to.Kind(), from.Interface()) + case DecodeHookFuncValue: + return f(from, to) + default: + return nil, errors.New("invalid decode hook signature") + } +} + +// ComposeDecodeHookFunc creates a single DecodeHookFunc that +// automatically composes multiple DecodeHookFuncs. +// +// The composed funcs are called in order, with the result of the +// previous transformation. +func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { + cached := make([]func(from reflect.Value, to reflect.Value) (interface{}, error), 0, len(fs)) + for _, f := range fs { + cached = append(cached, cachedDecodeHook(f)) + } + return func(f reflect.Value, t reflect.Value) (interface{}, error) { + var err error + data := f.Interface() + + newFrom := f + for _, c := range cached { + data, err = c(newFrom, t) + if err != nil { + return nil, err + } + newFrom = reflect.ValueOf(data) + } + + return data, nil + } +} + +// OrComposeDecodeHookFunc executes all input hook functions until one of them returns no error. In that case its value is returned. +// If all hooks return an error, OrComposeDecodeHookFunc returns an error concatenating all error messages. +func OrComposeDecodeHookFunc(ff ...DecodeHookFunc) DecodeHookFunc { + cached := make([]func(from reflect.Value, to reflect.Value) (interface{}, error), 0, len(ff)) + for _, f := range ff { + cached = append(cached, cachedDecodeHook(f)) + } + return func(a, b reflect.Value) (interface{}, error) { + var allErrs string + var out interface{} + var err error + + for _, c := range cached { + out, err = c(a, b) + if err != nil { + allErrs += err.Error() + "\n" + continue + } + + return out, nil + } + + return nil, errors.New(allErrs) + } +} + +// StringToSliceHookFunc returns a DecodeHookFunc that converts +// string to []string by splitting on the given sep. +func StringToSliceHookFunc(sep string) DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.SliceOf(f) { + return data, nil + } + + raw := data.(string) + if raw == "" { + return []string{}, nil + } + + return strings.Split(raw, sep), nil + } +} + +// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts +// strings to time.Duration. +func StringToTimeDurationHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(time.Duration(5)) { + return data, nil + } + + // Convert it by parsing + return time.ParseDuration(data.(string)) + } +} + +// StringToIPHookFunc returns a DecodeHookFunc that converts +// strings to net.IP +func StringToIPHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(net.IP{}) { + return data, nil + } + + // Convert it by parsing + ip := net.ParseIP(data.(string)) + if ip == nil { + return net.IP{}, fmt.Errorf("failed parsing ip %v", data) + } + + return ip, nil + } +} + +// StringToIPNetHookFunc returns a DecodeHookFunc that converts +// strings to net.IPNet +func StringToIPNetHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(net.IPNet{}) { + return data, nil + } + + // Convert it by parsing + _, net, err := net.ParseCIDR(data.(string)) + return net, err + } +} + +// StringToTimeHookFunc returns a DecodeHookFunc that converts +// strings to time.Time. +func StringToTimeHookFunc(layout string) DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(time.Time{}) { + return data, nil + } + + // Convert it by parsing + return time.Parse(layout, data.(string)) + } +} + +// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to +// the decoder. +// +// Note that this is significantly different from the WeaklyTypedInput option +// of the DecoderConfig. +func WeaklyTypedHook( + f reflect.Kind, + t reflect.Kind, + data interface{}, +) (interface{}, error) { + dataVal := reflect.ValueOf(data) + switch t { + case reflect.String: + switch f { + case reflect.Bool: + if dataVal.Bool() { + return "1", nil + } + return "0", nil + case reflect.Float32: + return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil + case reflect.Int: + return strconv.FormatInt(dataVal.Int(), 10), nil + case reflect.Slice: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + if elemKind == reflect.Uint8 { + return string(dataVal.Interface().([]uint8)), nil + } + case reflect.Uint: + return strconv.FormatUint(dataVal.Uint(), 10), nil + } + } + + return data, nil +} + +func RecursiveStructToMapHookFunc() DecodeHookFunc { + return func(f reflect.Value, t reflect.Value) (interface{}, error) { + if f.Kind() != reflect.Struct { + return f.Interface(), nil + } + + var i interface{} = struct{}{} + if t.Type() != reflect.TypeOf(&i).Elem() { + return f.Interface(), nil + } + + m := make(map[string]interface{}) + t.Set(reflect.ValueOf(m)) + + return f.Interface(), nil + } +} + +// TextUnmarshallerHookFunc returns a DecodeHookFunc that applies +// strings to the UnmarshalText function, when the target type +// implements the encoding.TextUnmarshaler interface +func TextUnmarshallerHookFunc() DecodeHookFuncType { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + result := reflect.New(t).Interface() + unmarshaller, ok := result.(encoding.TextUnmarshaler) + if !ok { + return data, nil + } + str, ok := data.(string) + if !ok { + str = reflect.Indirect(reflect.ValueOf(&data)).Elem().String() + } + if err := unmarshaller.UnmarshalText([]byte(str)); err != nil { + return nil, err + } + return result, nil + } +} + +// StringToNetIPAddrHookFunc returns a DecodeHookFunc that converts +// strings to netip.Addr. +func StringToNetIPAddrHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(netip.Addr{}) { + return data, nil + } + + // Convert it by parsing + return netip.ParseAddr(data.(string)) + } +} + +// StringToNetIPAddrPortHookFunc returns a DecodeHookFunc that converts +// strings to netip.AddrPort. +func StringToNetIPAddrPortHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(netip.AddrPort{}) { + return data, nil + } + + // Convert it by parsing + return netip.ParseAddrPort(data.(string)) + } +} + +// StringToBasicTypeHookFunc returns a DecodeHookFunc that converts +// strings to basic types. +// int8, uint8, int16, uint16, int32, uint32, int64, uint64, int, uint, float32, float64, bool, byte, rune, complex64, complex128 +func StringToBasicTypeHookFunc() DecodeHookFunc { + return ComposeDecodeHookFunc( + StringToInt8HookFunc(), + StringToUint8HookFunc(), + StringToInt16HookFunc(), + StringToUint16HookFunc(), + StringToInt32HookFunc(), + StringToUint32HookFunc(), + StringToInt64HookFunc(), + StringToUint64HookFunc(), + StringToIntHookFunc(), + StringToUintHookFunc(), + StringToFloat32HookFunc(), + StringToFloat64HookFunc(), + StringToBoolHookFunc(), + // byte and rune are aliases for uint8 and int32 respectively + // StringToByteHookFunc(), + // StringToRuneHookFunc(), + StringToComplex64HookFunc(), + StringToComplex128HookFunc(), + ) +} + +// StringToInt8HookFunc returns a DecodeHookFunc that converts +// strings to int8. +func StringToInt8HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Int8 { + return data, nil + } + + // Convert it by parsing + i64, err := strconv.ParseInt(data.(string), 0, 8) + return int8(i64), err + } +} + +// StringToUint8HookFunc returns a DecodeHookFunc that converts +// strings to uint8. +func StringToUint8HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Uint8 { + return data, nil + } + + // Convert it by parsing + u64, err := strconv.ParseUint(data.(string), 0, 8) + return uint8(u64), err + } +} + +// StringToInt16HookFunc returns a DecodeHookFunc that converts +// strings to int16. +func StringToInt16HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Int16 { + return data, nil + } + + // Convert it by parsing + i64, err := strconv.ParseInt(data.(string), 0, 16) + return int16(i64), err + } +} + +// StringToUint16HookFunc returns a DecodeHookFunc that converts +// strings to uint16. +func StringToUint16HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Uint16 { + return data, nil + } + + // Convert it by parsing + u64, err := strconv.ParseUint(data.(string), 0, 16) + return uint16(u64), err + } +} + +// StringToInt32HookFunc returns a DecodeHookFunc that converts +// strings to int32. +func StringToInt32HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Int32 { + return data, nil + } + + // Convert it by parsing + i64, err := strconv.ParseInt(data.(string), 0, 32) + return int32(i64), err + } +} + +// StringToUint32HookFunc returns a DecodeHookFunc that converts +// strings to uint32. +func StringToUint32HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Uint32 { + return data, nil + } + + // Convert it by parsing + u64, err := strconv.ParseUint(data.(string), 0, 32) + return uint32(u64), err + } +} + +// StringToInt64HookFunc returns a DecodeHookFunc that converts +// strings to int64. +func StringToInt64HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Int64 { + return data, nil + } + + // Convert it by parsing + return strconv.ParseInt(data.(string), 0, 64) + } +} + +// StringToUint64HookFunc returns a DecodeHookFunc that converts +// strings to uint64. +func StringToUint64HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Uint64 { + return data, nil + } + + // Convert it by parsing + return strconv.ParseUint(data.(string), 0, 64) + } +} + +// StringToIntHookFunc returns a DecodeHookFunc that converts +// strings to int. +func StringToIntHookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Int { + return data, nil + } + + // Convert it by parsing + i64, err := strconv.ParseInt(data.(string), 0, 0) + return int(i64), err + } +} + +// StringToUintHookFunc returns a DecodeHookFunc that converts +// strings to uint. +func StringToUintHookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Uint { + return data, nil + } + + // Convert it by parsing + u64, err := strconv.ParseUint(data.(string), 0, 0) + return uint(u64), err + } +} + +// StringToFloat32HookFunc returns a DecodeHookFunc that converts +// strings to float32. +func StringToFloat32HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Float32 { + return data, nil + } + + // Convert it by parsing + f64, err := strconv.ParseFloat(data.(string), 32) + return float32(f64), err + } +} + +// StringToFloat64HookFunc returns a DecodeHookFunc that converts +// strings to float64. +func StringToFloat64HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Float64 { + return data, nil + } + + // Convert it by parsing + return strconv.ParseFloat(data.(string), 64) + } +} + +// StringToBoolHookFunc returns a DecodeHookFunc that converts +// strings to bool. +func StringToBoolHookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Bool { + return data, nil + } + + // Convert it by parsing + return strconv.ParseBool(data.(string)) + } +} + +// StringToByteHookFunc returns a DecodeHookFunc that converts +// strings to byte. +func StringToByteHookFunc() DecodeHookFunc { + return StringToUint8HookFunc() +} + +// StringToRuneHookFunc returns a DecodeHookFunc that converts +// strings to rune. +func StringToRuneHookFunc() DecodeHookFunc { + return StringToInt32HookFunc() +} + +// StringToComplex64HookFunc returns a DecodeHookFunc that converts +// strings to complex64. +func StringToComplex64HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Complex64 { + return data, nil + } + + // Convert it by parsing + c128, err := strconv.ParseComplex(data.(string), 64) + return complex64(c128), err + } +} + +// StringToComplex128HookFunc returns a DecodeHookFunc that converts +// strings to complex128. +func StringToComplex128HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Complex128 { + return data, nil + } + + // Convert it by parsing + return strconv.ParseComplex(data.(string), 128) + } +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/flake.lock b/vendor/github.com/go-viper/mapstructure/v2/flake.lock new file mode 100644 index 00000000..4bea8154 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/flake.lock @@ -0,0 +1,472 @@ +{ + "nodes": { + "cachix": { + "inputs": { + "devenv": "devenv_2", + "flake-compat": [ + "devenv", + "flake-compat" + ], + "nixpkgs": [ + "devenv", + "nixpkgs" + ], + "pre-commit-hooks": [ + "devenv", + "pre-commit-hooks" + ] + }, + "locked": { + "lastModified": 1712055811, + "narHash": "sha256-7FcfMm5A/f02yyzuavJe06zLa9hcMHsagE28ADcmQvk=", + "owner": "cachix", + "repo": "cachix", + "rev": "02e38da89851ec7fec3356a5c04bc8349cae0e30", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "cachix", + "type": "github" + } + }, + "devenv": { + "inputs": { + "cachix": "cachix", + "flake-compat": "flake-compat_2", + "nix": "nix_2", + "nixpkgs": "nixpkgs_2", + "pre-commit-hooks": "pre-commit-hooks" + }, + "locked": { + "lastModified": 1717245169, + "narHash": "sha256-+mW3rTBjGU8p1THJN0lX/Dd/8FbnF+3dB+mJuSaxewE=", + "owner": "cachix", + "repo": "devenv", + "rev": "c3f9f053c077c6f88a3de5276d9178c62baa3fc3", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "devenv", + "type": "github" + } + }, + "devenv_2": { + "inputs": { + "flake-compat": [ + "devenv", + "cachix", + "flake-compat" + ], + "nix": "nix", + "nixpkgs": "nixpkgs", + "poetry2nix": "poetry2nix", + "pre-commit-hooks": [ + "devenv", + "cachix", + "pre-commit-hooks" + ] + }, + "locked": { + "lastModified": 1708704632, + "narHash": "sha256-w+dOIW60FKMaHI1q5714CSibk99JfYxm0CzTinYWr+Q=", + "owner": "cachix", + "repo": "devenv", + "rev": "2ee4450b0f4b95a1b90f2eb5ffea98b90e48c196", + "type": "github" + }, + "original": { + "owner": "cachix", + "ref": "python-rewrite", + "repo": "devenv", + "type": "github" + } + }, + "flake-compat": { + "flake": false, + "locked": { + "lastModified": 1673956053, + "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=", + "owner": "edolstra", + "repo": "flake-compat", + "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9", + "type": "github" + }, + "original": { + "owner": "edolstra", + "repo": "flake-compat", + "type": "github" + } + }, + "flake-compat_2": { + "flake": false, + "locked": { + "lastModified": 1696426674, + "narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=", + "owner": "edolstra", + "repo": "flake-compat", + "rev": "0f9255e01c2351cc7d116c072cb317785dd33b33", + "type": "github" + }, + "original": { + "owner": "edolstra", + "repo": "flake-compat", + "type": "github" + } + }, + "flake-parts": { + "inputs": { + "nixpkgs-lib": "nixpkgs-lib" + }, + "locked": { + "lastModified": 1717285511, + "narHash": "sha256-iKzJcpdXih14qYVcZ9QC9XuZYnPc6T8YImb6dX166kw=", + "owner": "hercules-ci", + "repo": "flake-parts", + "rev": "2a55567fcf15b1b1c7ed712a2c6fadaec7412ea8", + "type": "github" + }, + "original": { + "owner": "hercules-ci", + "repo": "flake-parts", + "type": "github" + } + }, + "flake-utils": { + "inputs": { + "systems": "systems" + }, + "locked": { + "lastModified": 1689068808, + "narHash": "sha256-6ixXo3wt24N/melDWjq70UuHQLxGV8jZvooRanIHXw0=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "919d646de7be200f3bf08cb76ae1f09402b6f9b4", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "flake-utils_2": { + "inputs": { + "systems": "systems_2" + }, + "locked": { + "lastModified": 1710146030, + "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "gitignore": { + "inputs": { + "nixpkgs": [ + "devenv", + "pre-commit-hooks", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1709087332, + "narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=", + "owner": "hercules-ci", + "repo": "gitignore.nix", + "rev": "637db329424fd7e46cf4185293b9cc8c88c95394", + "type": "github" + }, + "original": { + "owner": "hercules-ci", + "repo": "gitignore.nix", + "type": "github" + } + }, + "nix": { + "inputs": { + "flake-compat": "flake-compat", + "nixpkgs": [ + "devenv", + "cachix", + "devenv", + "nixpkgs" + ], + "nixpkgs-regression": "nixpkgs-regression" + }, + "locked": { + "lastModified": 1712911606, + "narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=", + "owner": "domenkozar", + "repo": "nix", + "rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12", + "type": "github" + }, + "original": { + "owner": "domenkozar", + "ref": "devenv-2.21", + "repo": "nix", + "type": "github" + } + }, + "nix-github-actions": { + "inputs": { + "nixpkgs": [ + "devenv", + "cachix", + "devenv", + "poetry2nix", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1688870561, + "narHash": "sha256-4UYkifnPEw1nAzqqPOTL2MvWtm3sNGw1UTYTalkTcGY=", + "owner": "nix-community", + "repo": "nix-github-actions", + "rev": "165b1650b753316aa7f1787f3005a8d2da0f5301", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "nix-github-actions", + "type": "github" + } + }, + "nix_2": { + "inputs": { + "flake-compat": [ + "devenv", + "flake-compat" + ], + "nixpkgs": [ + "devenv", + "nixpkgs" + ], + "nixpkgs-regression": "nixpkgs-regression_2" + }, + "locked": { + "lastModified": 1712911606, + "narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=", + "owner": "domenkozar", + "repo": "nix", + "rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12", + "type": "github" + }, + "original": { + "owner": "domenkozar", + "ref": "devenv-2.21", + "repo": "nix", + "type": "github" + } + }, + "nixpkgs": { + "locked": { + "lastModified": 1692808169, + "narHash": "sha256-x9Opq06rIiwdwGeK2Ykj69dNc2IvUH1fY55Wm7atwrE=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "9201b5ff357e781bf014d0330d18555695df7ba8", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixpkgs-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs-lib": { + "locked": { + "lastModified": 1717284937, + "narHash": "sha256-lIbdfCsf8LMFloheeE6N31+BMIeixqyQWbSr2vk79EQ=", + "type": "tarball", + "url": "https://github.com/NixOS/nixpkgs/archive/eb9ceca17df2ea50a250b6b27f7bf6ab0186f198.tar.gz" + }, + "original": { + "type": "tarball", + "url": "https://github.com/NixOS/nixpkgs/archive/eb9ceca17df2ea50a250b6b27f7bf6ab0186f198.tar.gz" + } + }, + "nixpkgs-regression": { + "locked": { + "lastModified": 1643052045, + "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", + "type": "github" + }, + "original": { + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", + "type": "github" + } + }, + "nixpkgs-regression_2": { + "locked": { + "lastModified": 1643052045, + "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", + "type": "github" + }, + "original": { + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", + "type": "github" + } + }, + "nixpkgs-stable": { + "locked": { + "lastModified": 1710695816, + "narHash": "sha256-3Eh7fhEID17pv9ZxrPwCLfqXnYP006RKzSs0JptsN84=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "614b4613980a522ba49f0d194531beddbb7220d3", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixos-23.11", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs_2": { + "locked": { + "lastModified": 1713361204, + "narHash": "sha256-TA6EDunWTkc5FvDCqU3W2T3SFn0gRZqh6D/hJnM02MM=", + "owner": "cachix", + "repo": "devenv-nixpkgs", + "rev": "285676e87ad9f0ca23d8714a6ab61e7e027020c6", + "type": "github" + }, + "original": { + "owner": "cachix", + "ref": "rolling", + "repo": "devenv-nixpkgs", + "type": "github" + } + }, + "nixpkgs_3": { + "locked": { + "lastModified": 1717112898, + "narHash": "sha256-7R2ZvOnvd9h8fDd65p0JnB7wXfUvreox3xFdYWd1BnY=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "6132b0f6e344ce2fe34fc051b72fb46e34f668e0", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixpkgs-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "poetry2nix": { + "inputs": { + "flake-utils": "flake-utils", + "nix-github-actions": "nix-github-actions", + "nixpkgs": [ + "devenv", + "cachix", + "devenv", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1692876271, + "narHash": "sha256-IXfZEkI0Mal5y1jr6IRWMqK8GW2/f28xJenZIPQqkY0=", + "owner": "nix-community", + "repo": "poetry2nix", + "rev": "d5006be9c2c2417dafb2e2e5034d83fabd207ee3", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "poetry2nix", + "type": "github" + } + }, + "pre-commit-hooks": { + "inputs": { + "flake-compat": [ + "devenv", + "flake-compat" + ], + "flake-utils": "flake-utils_2", + "gitignore": "gitignore", + "nixpkgs": [ + "devenv", + "nixpkgs" + ], + "nixpkgs-stable": "nixpkgs-stable" + }, + "locked": { + "lastModified": 1713775815, + "narHash": "sha256-Wu9cdYTnGQQwtT20QQMg7jzkANKQjwBD9iccfGKkfls=", + "owner": "cachix", + "repo": "pre-commit-hooks.nix", + "rev": "2ac4dcbf55ed43f3be0bae15e181f08a57af24a4", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "pre-commit-hooks.nix", + "type": "github" + } + }, + "root": { + "inputs": { + "devenv": "devenv", + "flake-parts": "flake-parts", + "nixpkgs": "nixpkgs_3" + } + }, + "systems": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } + }, + "systems_2": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } + } + }, + "root": "root", + "version": 7 +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/flake.nix b/vendor/github.com/go-viper/mapstructure/v2/flake.nix new file mode 100644 index 00000000..4ed0f533 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/flake.nix @@ -0,0 +1,39 @@ +{ + inputs = { + nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable"; + flake-parts.url = "github:hercules-ci/flake-parts"; + devenv.url = "github:cachix/devenv"; + }; + + outputs = inputs@{ flake-parts, ... }: + flake-parts.lib.mkFlake { inherit inputs; } { + imports = [ + inputs.devenv.flakeModule + ]; + + systems = [ "x86_64-linux" "x86_64-darwin" "aarch64-darwin" ]; + + perSystem = { config, self', inputs', pkgs, system, ... }: rec { + devenv.shells = { + default = { + languages = { + go.enable = true; + }; + + pre-commit.hooks = { + nixpkgs-fmt.enable = true; + }; + + packages = with pkgs; [ + golangci-lint + ]; + + # https://github.com/cachix/devenv/issues/528#issuecomment-1556108767 + containers = pkgs.lib.mkForce { }; + }; + + ci = devenv.shells.default; + }; + }; + }; +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/internal/errors/errors.go b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/errors.go new file mode 100644 index 00000000..d1c15e47 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/errors.go @@ -0,0 +1,11 @@ +package errors + +import "errors" + +func New(text string) error { + return errors.New(text) +} + +func As(err error, target interface{}) bool { + return errors.As(err, target) +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join.go b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join.go new file mode 100644 index 00000000..d74e3a0b --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join.go @@ -0,0 +1,9 @@ +//go:build go1.20 + +package errors + +import "errors" + +func Join(errs ...error) error { + return errors.Join(errs...) +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join_go1_19.go b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join_go1_19.go new file mode 100644 index 00000000..700b4022 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join_go1_19.go @@ -0,0 +1,61 @@ +//go:build !go1.20 + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package errors + +// Join returns an error that wraps the given errors. +// Any nil error values are discarded. +// Join returns nil if every value in errs is nil. +// The error formats as the concatenation of the strings obtained +// by calling the Error method of each element of errs, with a newline +// between each string. +// +// A non-nil error returned by Join implements the Unwrap() []error method. +func Join(errs ...error) error { + n := 0 + for _, err := range errs { + if err != nil { + n++ + } + } + if n == 0 { + return nil + } + e := &joinError{ + errs: make([]error, 0, n), + } + for _, err := range errs { + if err != nil { + e.errs = append(e.errs, err) + } + } + return e +} + +type joinError struct { + errs []error +} + +func (e *joinError) Error() string { + // Since Join returns nil if every value in errs is nil, + // e.errs cannot be empty. + if len(e.errs) == 1 { + return e.errs[0].Error() + } + + b := []byte(e.errs[0].Error()) + for _, err := range e.errs[1:] { + b = append(b, '\n') + b = append(b, err.Error()...) + } + // At this point, b has at least one byte '\n'. + // return unsafe.String(&b[0], len(b)) + return string(b) +} + +func (e *joinError) Unwrap() []error { + return e.errs +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/mapstructure.go b/vendor/github.com/go-viper/mapstructure/v2/mapstructure.go new file mode 100644 index 00000000..1cd6204b --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/mapstructure.go @@ -0,0 +1,1593 @@ +// Package mapstructure exposes functionality to convert one arbitrary +// Go type into another, typically to convert a map[string]interface{} +// into a native Go structure. +// +// The Go structure can be arbitrarily complex, containing slices, +// other structs, etc. and the decoder will properly decode nested +// maps and so on into the proper structures in the native Go struct. +// See the examples to see what the decoder is capable of. +// +// The simplest function to start with is Decode. +// +// # Field Tags +// +// When decoding to a struct, mapstructure will use the field name by +// default to perform the mapping. For example, if a struct has a field +// "Username" then mapstructure will look for a key in the source value +// of "username" (case insensitive). +// +// type User struct { +// Username string +// } +// +// You can change the behavior of mapstructure by using struct tags. +// The default struct tag that mapstructure looks for is "mapstructure" +// but you can customize it using DecoderConfig. +// +// # Renaming Fields +// +// To rename the key that mapstructure looks for, use the "mapstructure" +// tag and set a value directly. For example, to change the "username" example +// above to "user": +// +// type User struct { +// Username string `mapstructure:"user"` +// } +// +// # Embedded Structs and Squashing +// +// Embedded structs are treated as if they're another field with that name. +// By default, the two structs below are equivalent when decoding with +// mapstructure: +// +// type Person struct { +// Name string +// } +// +// type Friend struct { +// Person +// } +// +// type Friend struct { +// Person Person +// } +// +// This would require an input that looks like below: +// +// map[string]interface{}{ +// "person": map[string]interface{}{"name": "alice"}, +// } +// +// If your "person" value is NOT nested, then you can append ",squash" to +// your tag value and mapstructure will treat it as if the embedded struct +// were part of the struct directly. Example: +// +// type Friend struct { +// Person `mapstructure:",squash"` +// } +// +// Now the following input would be accepted: +// +// map[string]interface{}{ +// "name": "alice", +// } +// +// When decoding from a struct to a map, the squash tag squashes the struct +// fields into a single map. Using the example structs from above: +// +// Friend{Person: Person{Name: "alice"}} +// +// Will be decoded into a map: +// +// map[string]interface{}{ +// "name": "alice", +// } +// +// DecoderConfig has a field that changes the behavior of mapstructure +// to always squash embedded structs. +// +// # Remainder Values +// +// If there are any unmapped keys in the source value, mapstructure by +// default will silently ignore them. You can error by setting ErrorUnused +// in DecoderConfig. If you're using Metadata you can also maintain a slice +// of the unused keys. +// +// You can also use the ",remain" suffix on your tag to collect all unused +// values in a map. The field with this tag MUST be a map type and should +// probably be a "map[string]interface{}" or "map[interface{}]interface{}". +// See example below: +// +// type Friend struct { +// Name string +// Other map[string]interface{} `mapstructure:",remain"` +// } +// +// Given the input below, Other would be populated with the other +// values that weren't used (everything but "name"): +// +// map[string]interface{}{ +// "name": "bob", +// "address": "123 Maple St.", +// } +// +// # Omit Empty Values +// +// When decoding from a struct to any other value, you may use the +// ",omitempty" suffix on your tag to omit that value if it equates to +// the zero value. The zero value of all types is specified in the Go +// specification. +// +// For example, the zero type of a numeric type is zero ("0"). If the struct +// field value is zero and a numeric type, the field is empty, and it won't +// be encoded into the destination type. +// +// type Source struct { +// Age int `mapstructure:",omitempty"` +// } +// +// # Unexported fields +// +// Since unexported (private) struct fields cannot be set outside the package +// where they are defined, the decoder will simply skip them. +// +// For this output type definition: +// +// type Exported struct { +// private string // this unexported field will be skipped +// Public string +// } +// +// Using this map as input: +// +// map[string]interface{}{ +// "private": "I will be ignored", +// "Public": "I made it through!", +// } +// +// The following struct will be decoded: +// +// type Exported struct { +// private: "" // field is left with an empty string (zero value) +// Public: "I made it through!" +// } +// +// # Other Configuration +// +// mapstructure is highly configurable. See the DecoderConfig struct +// for other features and options that are supported. +package mapstructure + +import ( + "encoding/json" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + + "github.com/go-viper/mapstructure/v2/internal/errors" +) + +// DecodeHookFunc is the callback function that can be used for +// data transformations. See "DecodeHook" in the DecoderConfig +// struct. +// +// The type must be one of DecodeHookFuncType, DecodeHookFuncKind, or +// DecodeHookFuncValue. +// Values are a superset of Types (Values can return types), and Types are a +// superset of Kinds (Types can return Kinds) and are generally a richer thing +// to use, but Kinds are simpler if you only need those. +// +// The reason DecodeHookFunc is multi-typed is for backwards compatibility: +// we started with Kinds and then realized Types were the better solution, +// but have a promise to not break backwards compat so we now support +// both. +type DecodeHookFunc interface{} + +// DecodeHookFuncType is a DecodeHookFunc which has complete information about +// the source and target types. +type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error) + +// DecodeHookFuncKind is a DecodeHookFunc which knows only the Kinds of the +// source and target types. +type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) + +// DecodeHookFuncValue is a DecodeHookFunc which has complete access to both the source and target +// values. +type DecodeHookFuncValue func(from reflect.Value, to reflect.Value) (interface{}, error) + +// DecoderConfig is the configuration that is used to create a new decoder +// and allows customization of various aspects of decoding. +type DecoderConfig struct { + // DecodeHook, if set, will be called before any decoding and any + // type conversion (if WeaklyTypedInput is on). This lets you modify + // the values before they're set down onto the resulting struct. The + // DecodeHook is called for every map and value in the input. This means + // that if a struct has embedded fields with squash tags the decode hook + // is called only once with all of the input data, not once for each + // embedded struct. + // + // If an error is returned, the entire decode will fail with that error. + DecodeHook DecodeHookFunc + + // If ErrorUnused is true, then it is an error for there to exist + // keys in the original map that were unused in the decoding process + // (extra keys). + ErrorUnused bool + + // If ErrorUnset is true, then it is an error for there to exist + // fields in the result that were not set in the decoding process + // (extra fields). This only applies to decoding to a struct. This + // will affect all nested structs as well. + ErrorUnset bool + + // ZeroFields, if set to true, will zero fields before writing them. + // For example, a map will be emptied before decoded values are put in + // it. If this is false, a map will be merged. + ZeroFields bool + + // If WeaklyTypedInput is true, the decoder will make the following + // "weak" conversions: + // + // - bools to string (true = "1", false = "0") + // - numbers to string (base 10) + // - bools to int/uint (true = 1, false = 0) + // - strings to int/uint (base implied by prefix) + // - int to bool (true if value != 0) + // - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F, + // FALSE, false, False. Anything else is an error) + // - empty array = empty map and vice versa + // - negative numbers to overflowed uint values (base 10) + // - slice of maps to a merged map + // - single values are converted to slices if required. Each + // element is weakly decoded. For example: "4" can become []int{4} + // if the target type is an int slice. + // + WeaklyTypedInput bool + + // Squash will squash embedded structs. A squash tag may also be + // added to an individual struct field using a tag. For example: + // + // type Parent struct { + // Child `mapstructure:",squash"` + // } + Squash bool + + // Metadata is the struct that will contain extra metadata about + // the decoding. If this is nil, then no metadata will be tracked. + Metadata *Metadata + + // Result is a pointer to the struct that will contain the decoded + // value. + Result interface{} + + // The tag name that mapstructure reads for field names. This + // defaults to "mapstructure" + TagName string + + // The option of the value in the tag that indicates a field should + // be squashed. This defaults to "squash". + SquashTagOption string + + // IgnoreUntaggedFields ignores all struct fields without explicit + // TagName, comparable to `mapstructure:"-"` as default behaviour. + IgnoreUntaggedFields bool + + // MatchName is the function used to match the map key to the struct + // field name or tag. Defaults to `strings.EqualFold`. This can be used + // to implement case-sensitive tag values, support snake casing, etc. + MatchName func(mapKey, fieldName string) bool +} + +// A Decoder takes a raw interface value and turns it into structured +// data, keeping track of rich error information along the way in case +// anything goes wrong. Unlike the basic top-level Decode method, you can +// more finely control how the Decoder behaves using the DecoderConfig +// structure. The top-level Decode method is just a convenience that sets +// up the most basic Decoder. +type Decoder struct { + config *DecoderConfig + cachedDecodeHook func(from reflect.Value, to reflect.Value) (interface{}, error) +} + +// Metadata contains information about decoding a structure that +// is tedious or difficult to get otherwise. +type Metadata struct { + // Keys are the keys of the structure which were successfully decoded + Keys []string + + // Unused is a slice of keys that were found in the raw value but + // weren't decoded since there was no matching field in the result interface + Unused []string + + // Unset is a slice of field names that were found in the result interface + // but weren't set in the decoding process since there was no matching value + // in the input + Unset []string +} + +// Decode takes an input structure and uses reflection to translate it to +// the output structure. output must be a pointer to a map or struct. +func Decode(input interface{}, output interface{}) error { + config := &DecoderConfig{ + Metadata: nil, + Result: output, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// WeakDecode is the same as Decode but is shorthand to enable +// WeaklyTypedInput. See DecoderConfig for more info. +func WeakDecode(input, output interface{}) error { + config := &DecoderConfig{ + Metadata: nil, + Result: output, + WeaklyTypedInput: true, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// DecodeMetadata is the same as Decode, but is shorthand to +// enable metadata collection. See DecoderConfig for more info. +func DecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { + config := &DecoderConfig{ + Metadata: metadata, + Result: output, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// WeakDecodeMetadata is the same as Decode, but is shorthand to +// enable both WeaklyTypedInput and metadata collection. See +// DecoderConfig for more info. +func WeakDecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { + config := &DecoderConfig{ + Metadata: metadata, + Result: output, + WeaklyTypedInput: true, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// NewDecoder returns a new decoder for the given configuration. Once +// a decoder has been returned, the same configuration must not be used +// again. +func NewDecoder(config *DecoderConfig) (*Decoder, error) { + val := reflect.ValueOf(config.Result) + if val.Kind() != reflect.Ptr { + return nil, errors.New("result must be a pointer") + } + + val = val.Elem() + if !val.CanAddr() { + return nil, errors.New("result must be addressable (a pointer)") + } + + if config.Metadata != nil { + if config.Metadata.Keys == nil { + config.Metadata.Keys = make([]string, 0) + } + + if config.Metadata.Unused == nil { + config.Metadata.Unused = make([]string, 0) + } + + if config.Metadata.Unset == nil { + config.Metadata.Unset = make([]string, 0) + } + } + + if config.TagName == "" { + config.TagName = "mapstructure" + } + + if config.SquashTagOption == "" { + config.SquashTagOption = "squash" + } + + if config.MatchName == nil { + config.MatchName = strings.EqualFold + } + + result := &Decoder{ + config: config, + } + if config.DecodeHook != nil { + result.cachedDecodeHook = cachedDecodeHook(config.DecodeHook) + } + + return result, nil +} + +// Decode decodes the given raw interface to the target pointer specified +// by the configuration. +func (d *Decoder) Decode(input interface{}) error { + err := d.decode("", input, reflect.ValueOf(d.config.Result).Elem()) + + // Retain some of the original behavior when multiple errors ocurr + var joinedErr interface{ Unwrap() []error } + if errors.As(err, &joinedErr) { + return fmt.Errorf("decoding failed due to the following error(s):\n\n%w", err) + } + + return err +} + +// Decodes an unknown data type into a specific reflection value. +func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) error { + var inputVal reflect.Value + if input != nil { + inputVal = reflect.ValueOf(input) + + // We need to check here if input is a typed nil. Typed nils won't + // match the "input == nil" below so we check that here. + if inputVal.Kind() == reflect.Ptr && inputVal.IsNil() { + input = nil + } + } + + if input == nil { + // If the data is nil, then we don't set anything, unless ZeroFields is set + // to true. + if d.config.ZeroFields { + outVal.Set(reflect.Zero(outVal.Type())) + + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + } + return nil + } + + if !inputVal.IsValid() { + // If the input value is invalid, then we just set the value + // to be the zero value. + outVal.Set(reflect.Zero(outVal.Type())) + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + return nil + } + + if d.cachedDecodeHook != nil { + // We have a DecodeHook, so let's pre-process the input. + var err error + input, err = d.cachedDecodeHook(inputVal, outVal) + if err != nil { + return fmt.Errorf("error decoding '%s': %w", name, err) + } + } + + var err error + outputKind := getKind(outVal) + addMetaKey := true + switch outputKind { + case reflect.Bool: + err = d.decodeBool(name, input, outVal) + case reflect.Interface: + err = d.decodeBasic(name, input, outVal) + case reflect.String: + err = d.decodeString(name, input, outVal) + case reflect.Int: + err = d.decodeInt(name, input, outVal) + case reflect.Uint: + err = d.decodeUint(name, input, outVal) + case reflect.Float32: + err = d.decodeFloat(name, input, outVal) + case reflect.Complex64: + err = d.decodeComplex(name, input, outVal) + case reflect.Struct: + err = d.decodeStruct(name, input, outVal) + case reflect.Map: + err = d.decodeMap(name, input, outVal) + case reflect.Ptr: + addMetaKey, err = d.decodePtr(name, input, outVal) + case reflect.Slice: + err = d.decodeSlice(name, input, outVal) + case reflect.Array: + err = d.decodeArray(name, input, outVal) + case reflect.Func: + err = d.decodeFunc(name, input, outVal) + default: + // If we reached this point then we weren't able to decode it + return fmt.Errorf("%s: unsupported type: %s", name, outputKind) + } + + // If we reached here, then we successfully decoded SOMETHING, so + // mark the key as used if we're tracking metainput. + if addMetaKey && d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + + return err +} + +// This decodes a basic type (bool, int, string, etc.) and sets the +// value to "data" of that type. +func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error { + if val.IsValid() && val.Elem().IsValid() { + elem := val.Elem() + + // If we can't address this element, then its not writable. Instead, + // we make a copy of the value (which is a pointer and therefore + // writable), decode into that, and replace the whole value. + copied := false + if !elem.CanAddr() { + copied = true + + // Make *T + copy := reflect.New(elem.Type()) + + // *T = elem + copy.Elem().Set(elem) + + // Set elem so we decode into it + elem = copy + } + + // Decode. If we have an error then return. We also return right + // away if we're not a copy because that means we decoded directly. + if err := d.decode(name, data, elem); err != nil || !copied { + return err + } + + // If we're a copy, we need to set te final result + val.Set(elem.Elem()) + return nil + } + + dataVal := reflect.ValueOf(data) + + // If the input data is a pointer, and the assigned type is the dereference + // of that exact pointer, then indirect it so that we can assign it. + // Example: *string to string + if dataVal.Kind() == reflect.Ptr && dataVal.Type().Elem() == val.Type() { + dataVal = reflect.Indirect(dataVal) + } + + if !dataVal.IsValid() { + dataVal = reflect.Zero(val.Type()) + } + + dataValType := dataVal.Type() + if !dataValType.AssignableTo(val.Type()) { + return fmt.Errorf( + "'%s' expected type '%s', got '%s'", + name, val.Type(), dataValType) + } + + val.Set(dataVal) + return nil +} + +func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + + converted := true + switch { + case dataKind == reflect.String: + val.SetString(dataVal.String()) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetString("1") + } else { + val.SetString("0") + } + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatInt(dataVal.Int(), 10)) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatUint(dataVal.Uint(), 10)) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64)) + case dataKind == reflect.Slice && d.config.WeaklyTypedInput, + dataKind == reflect.Array && d.config.WeaklyTypedInput: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + switch elemKind { + case reflect.Uint8: + var uints []uint8 + if dataKind == reflect.Array { + uints = make([]uint8, dataVal.Len(), dataVal.Len()) + for i := range uints { + uints[i] = dataVal.Index(i).Interface().(uint8) + } + } else { + uints = dataVal.Interface().([]uint8) + } + val.SetString(string(uints)) + default: + converted = false + } + default: + converted = false + } + + if !converted { + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + val.SetInt(dataVal.Int()) + case dataKind == reflect.Uint: + val.SetInt(int64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetInt(int64(dataVal.Float())) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetInt(1) + } else { + val.SetInt(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + str := dataVal.String() + if str == "" { + str = "0" + } + + i, err := strconv.ParseInt(str, 0, val.Type().Bits()) + if err == nil { + val.SetInt(i) + } else { + return fmt.Errorf("cannot parse '%s' as int: %s", name, err) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Int64() + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetInt(i) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + i := dataVal.Int() + if i < 0 && !d.config.WeaklyTypedInput { + return fmt.Errorf("cannot parse '%s', %d overflows uint", + name, i) + } + val.SetUint(uint64(i)) + case dataKind == reflect.Uint: + val.SetUint(dataVal.Uint()) + case dataKind == reflect.Float32: + f := dataVal.Float() + if f < 0 && !d.config.WeaklyTypedInput { + return fmt.Errorf("cannot parse '%s', %f overflows uint", + name, f) + } + val.SetUint(uint64(f)) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetUint(1) + } else { + val.SetUint(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + str := dataVal.String() + if str == "" { + str = "0" + } + + i, err := strconv.ParseUint(str, 0, val.Type().Bits()) + if err == nil { + val.SetUint(i) + } else { + return fmt.Errorf("cannot parse '%s' as uint: %s", name, err) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := strconv.ParseUint(string(jn), 0, 64) + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetUint(i) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Bool: + val.SetBool(dataVal.Bool()) + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Int() != 0) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Uint() != 0) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Float() != 0) + case dataKind == reflect.String && d.config.WeaklyTypedInput: + b, err := strconv.ParseBool(dataVal.String()) + if err == nil { + val.SetBool(b) + } else if dataVal.String() == "" { + val.SetBool(false) + } else { + return fmt.Errorf("cannot parse '%s' as bool: %s", name, err) + } + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + val.SetFloat(float64(dataVal.Int())) + case dataKind == reflect.Uint: + val.SetFloat(float64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetFloat(dataVal.Float()) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetFloat(1) + } else { + val.SetFloat(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + str := dataVal.String() + if str == "" { + str = "0" + } + + f, err := strconv.ParseFloat(str, val.Type().Bits()) + if err == nil { + val.SetFloat(f) + } else { + return fmt.Errorf("cannot parse '%s' as float: %s", name, err) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Float64() + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetFloat(i) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeComplex(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Complex64: + val.SetComplex(dataVal.Complex()) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error { + valType := val.Type() + valKeyType := valType.Key() + valElemType := valType.Elem() + + // By default we overwrite keys in the current map + valMap := val + + // If the map is nil or we're purposely zeroing fields, make a new map + if valMap.IsNil() || d.config.ZeroFields { + // Make a new map to hold our result + mapType := reflect.MapOf(valKeyType, valElemType) + valMap = reflect.MakeMap(mapType) + } + + dataVal := reflect.ValueOf(data) + + // Resolve any levels of indirection + for dataVal.Kind() == reflect.Pointer { + dataVal = reflect.Indirect(dataVal) + } + + // Check input type and based on the input type jump to the proper func + switch dataVal.Kind() { + case reflect.Map: + return d.decodeMapFromMap(name, dataVal, val, valMap) + + case reflect.Struct: + return d.decodeMapFromStruct(name, dataVal, val, valMap) + + case reflect.Array, reflect.Slice: + if d.config.WeaklyTypedInput { + return d.decodeMapFromSlice(name, dataVal, val, valMap) + } + + fallthrough + + default: + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) + } +} + +func (d *Decoder) decodeMapFromSlice(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + // Special case for BC reasons (covered by tests) + if dataVal.Len() == 0 { + val.Set(valMap) + return nil + } + + for i := 0; i < dataVal.Len(); i++ { + err := d.decode( + name+"["+strconv.Itoa(i)+"]", + dataVal.Index(i).Interface(), val) + if err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + valType := val.Type() + valKeyType := valType.Key() + valElemType := valType.Elem() + + // Accumulate errors + var errs []error + + // If the input data is empty, then we just match what the input data is. + if dataVal.Len() == 0 { + if dataVal.IsNil() { + if !val.IsNil() { + val.Set(dataVal) + } + } else { + // Set to empty allocated value + val.Set(valMap) + } + + return nil + } + + for _, k := range dataVal.MapKeys() { + fieldName := name + "[" + k.String() + "]" + + // First decode the key into the proper type + currentKey := reflect.Indirect(reflect.New(valKeyType)) + if err := d.decode(fieldName, k.Interface(), currentKey); err != nil { + errs = append(errs, err) + continue + } + + // Next decode the data into the proper type + v := dataVal.MapIndex(k).Interface() + currentVal := reflect.Indirect(reflect.New(valElemType)) + if err := d.decode(fieldName, v, currentVal); err != nil { + errs = append(errs, err) + continue + } + + valMap.SetMapIndex(currentKey, currentVal) + } + + // Set the built up map to the value + val.Set(valMap) + + return errors.Join(errs...) +} + +func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + typ := dataVal.Type() + for i := 0; i < typ.NumField(); i++ { + // Get the StructField first since this is a cheap operation. If the + // field is unexported, then ignore it. + f := typ.Field(i) + if f.PkgPath != "" { + continue + } + + // Next get the actual value of this field and verify it is assignable + // to the map value. + v := dataVal.Field(i) + if !v.Type().AssignableTo(valMap.Type().Elem()) { + return fmt.Errorf("cannot assign type '%s' to map value field of type '%s'", v.Type(), valMap.Type().Elem()) + } + + tagValue := f.Tag.Get(d.config.TagName) + keyName := f.Name + + if tagValue == "" && d.config.IgnoreUntaggedFields { + continue + } + + // If Squash is set in the config, we squash the field down. + squash := d.config.Squash && v.Kind() == reflect.Struct && f.Anonymous + + v = dereferencePtrToStructIfNeeded(v, d.config.TagName) + + // Determine the name of the key in the map + if index := strings.Index(tagValue, ","); index != -1 { + if tagValue[:index] == "-" { + continue + } + // If "omitempty" is specified in the tag, it ignores empty values. + if strings.Index(tagValue[index+1:], "omitempty") != -1 && isEmptyValue(v) { + continue + } + + // If "squash" is specified in the tag, we squash the field down. + squash = squash || strings.Contains(tagValue[index+1:], d.config.SquashTagOption) + if squash { + // When squashing, the embedded type can be a pointer to a struct. + if v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct { + v = v.Elem() + } + + // The final type must be a struct + if v.Kind() != reflect.Struct { + return fmt.Errorf("cannot squash non-struct type '%s'", v.Type()) + } + } else { + if strings.Index(tagValue[index+1:], "remain") != -1 { + if v.Kind() != reflect.Map { + return fmt.Errorf("error remain-tag field with invalid type: '%s'", v.Type()) + } + + ptr := v.MapRange() + for ptr.Next() { + valMap.SetMapIndex(ptr.Key(), ptr.Value()) + } + continue + } + } + if keyNameTagValue := tagValue[:index]; keyNameTagValue != "" { + keyName = keyNameTagValue + } + } else if len(tagValue) > 0 { + if tagValue == "-" { + continue + } + keyName = tagValue + } + + switch v.Kind() { + // this is an embedded struct, so handle it differently + case reflect.Struct: + x := reflect.New(v.Type()) + x.Elem().Set(v) + + vType := valMap.Type() + vKeyType := vType.Key() + vElemType := vType.Elem() + mType := reflect.MapOf(vKeyType, vElemType) + vMap := reflect.MakeMap(mType) + + // Creating a pointer to a map so that other methods can completely + // overwrite the map if need be (looking at you decodeMapFromMap). The + // indirection allows the underlying map to be settable (CanSet() == true) + // where as reflect.MakeMap returns an unsettable map. + addrVal := reflect.New(vMap.Type()) + reflect.Indirect(addrVal).Set(vMap) + + err := d.decode(keyName, x.Interface(), reflect.Indirect(addrVal)) + if err != nil { + return err + } + + // the underlying map may have been completely overwritten so pull + // it indirectly out of the enclosing value. + vMap = reflect.Indirect(addrVal) + + if squash { + for _, k := range vMap.MapKeys() { + valMap.SetMapIndex(k, vMap.MapIndex(k)) + } + } else { + valMap.SetMapIndex(reflect.ValueOf(keyName), vMap) + } + + default: + valMap.SetMapIndex(reflect.ValueOf(keyName), v) + } + } + + if val.CanAddr() { + val.Set(valMap) + } + + return nil +} + +func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) (bool, error) { + // If the input data is nil, then we want to just set the output + // pointer to be nil as well. + isNil := data == nil + if !isNil { + switch v := reflect.Indirect(reflect.ValueOf(data)); v.Kind() { + case reflect.Chan, + reflect.Func, + reflect.Interface, + reflect.Map, + reflect.Ptr, + reflect.Slice: + isNil = v.IsNil() + } + } + if isNil { + if !val.IsNil() && val.CanSet() { + nilValue := reflect.New(val.Type()).Elem() + val.Set(nilValue) + } + + return true, nil + } + + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + valType := val.Type() + valElemType := valType.Elem() + if val.CanSet() { + realVal := val + if realVal.IsNil() || d.config.ZeroFields { + realVal = reflect.New(valElemType) + } + + if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil { + return false, err + } + + val.Set(realVal) + } else { + if err := d.decode(name, data, reflect.Indirect(val)); err != nil { + return false, err + } + } + return false, nil +} + +func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error { + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + dataVal := reflect.Indirect(reflect.ValueOf(data)) + if val.Type() != dataVal.Type() { + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + val.Set(dataVal) + return nil +} + +func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataValKind := dataVal.Kind() + valType := val.Type() + valElemType := valType.Elem() + sliceType := reflect.SliceOf(valElemType) + + // If we have a non array/slice type then we first attempt to convert. + if dataValKind != reflect.Array && dataValKind != reflect.Slice { + if d.config.WeaklyTypedInput { + switch { + // Slice and array we use the normal logic + case dataValKind == reflect.Slice, dataValKind == reflect.Array: + break + + // Empty maps turn into empty slices + case dataValKind == reflect.Map: + if dataVal.Len() == 0 { + val.Set(reflect.MakeSlice(sliceType, 0, 0)) + return nil + } + // Create slice of maps of other sizes + return d.decodeSlice(name, []interface{}{data}, val) + + case dataValKind == reflect.String && valElemType.Kind() == reflect.Uint8: + return d.decodeSlice(name, []byte(dataVal.String()), val) + + // All other types we try to convert to the slice type + // and "lift" it into it. i.e. a string becomes a string slice. + default: + // Just re-try this function with data as a slice. + return d.decodeSlice(name, []interface{}{data}, val) + } + } + + return fmt.Errorf( + "'%s': source data must be an array or slice, got %s", name, dataValKind) + } + + // If the input value is nil, then don't allocate since empty != nil + if dataValKind != reflect.Array && dataVal.IsNil() { + return nil + } + + valSlice := val + if valSlice.IsNil() || d.config.ZeroFields { + // Make a new slice to hold our result, same size as the original data. + valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) + } else if valSlice.Len() > dataVal.Len() { + valSlice = valSlice.Slice(0, dataVal.Len()) + } + + // Accumulate any errors + var errs []error + + for i := 0; i < dataVal.Len(); i++ { + currentData := dataVal.Index(i).Interface() + for valSlice.Len() <= i { + valSlice = reflect.Append(valSlice, reflect.Zero(valElemType)) + } + currentField := valSlice.Index(i) + + fieldName := name + "[" + strconv.Itoa(i) + "]" + if err := d.decode(fieldName, currentData, currentField); err != nil { + errs = append(errs, err) + } + } + + // Finally, set the value to the slice we built up + val.Set(valSlice) + + return errors.Join(errs...) +} + +func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataValKind := dataVal.Kind() + valType := val.Type() + valElemType := valType.Elem() + arrayType := reflect.ArrayOf(valType.Len(), valElemType) + + valArray := val + + if isComparable(valArray) && valArray.Interface() == reflect.Zero(valArray.Type()).Interface() || d.config.ZeroFields { + // Check input type + if dataValKind != reflect.Array && dataValKind != reflect.Slice { + if d.config.WeaklyTypedInput { + switch { + // Empty maps turn into empty arrays + case dataValKind == reflect.Map: + if dataVal.Len() == 0 { + val.Set(reflect.Zero(arrayType)) + return nil + } + + // All other types we try to convert to the array type + // and "lift" it into it. i.e. a string becomes a string array. + default: + // Just re-try this function with data as a slice. + return d.decodeArray(name, []interface{}{data}, val) + } + } + + return fmt.Errorf( + "'%s': source data must be an array or slice, got %s", name, dataValKind) + + } + if dataVal.Len() > arrayType.Len() { + return fmt.Errorf( + "'%s': expected source data to have length less or equal to %d, got %d", name, arrayType.Len(), dataVal.Len()) + } + + // Make a new array to hold our result, same size as the original data. + valArray = reflect.New(arrayType).Elem() + } + + // Accumulate any errors + var errs []error + + for i := 0; i < dataVal.Len(); i++ { + currentData := dataVal.Index(i).Interface() + currentField := valArray.Index(i) + + fieldName := name + "[" + strconv.Itoa(i) + "]" + if err := d.decode(fieldName, currentData, currentField); err != nil { + errs = append(errs, err) + } + } + + // Finally, set the value to the array we built up + val.Set(valArray) + + return errors.Join(errs...) +} + +func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + + // If the type of the value to write to and the data match directly, + // then we just set it directly instead of recursing into the structure. + if dataVal.Type() == val.Type() { + val.Set(dataVal) + return nil + } + + dataValKind := dataVal.Kind() + switch dataValKind { + case reflect.Map: + return d.decodeStructFromMap(name, dataVal, val) + + case reflect.Struct: + // Not the most efficient way to do this but we can optimize later if + // we want to. To convert from struct to struct we go to map first + // as an intermediary. + + // Make a new map to hold our result + mapType := reflect.TypeOf((map[string]interface{})(nil)) + mval := reflect.MakeMap(mapType) + + // Creating a pointer to a map so that other methods can completely + // overwrite the map if need be (looking at you decodeMapFromMap). The + // indirection allows the underlying map to be settable (CanSet() == true) + // where as reflect.MakeMap returns an unsettable map. + addrVal := reflect.New(mval.Type()) + + reflect.Indirect(addrVal).Set(mval) + if err := d.decodeMapFromStruct(name, dataVal, reflect.Indirect(addrVal), mval); err != nil { + return err + } + + result := d.decodeStructFromMap(name, reflect.Indirect(addrVal), val) + return result + + default: + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) + } +} + +func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) error { + dataValType := dataVal.Type() + if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface { + return fmt.Errorf( + "'%s' needs a map with string keys, has '%s' keys", + name, dataValType.Key().Kind()) + } + + dataValKeys := make(map[reflect.Value]struct{}) + dataValKeysUnused := make(map[interface{}]struct{}) + for _, dataValKey := range dataVal.MapKeys() { + dataValKeys[dataValKey] = struct{}{} + dataValKeysUnused[dataValKey.Interface()] = struct{}{} + } + + targetValKeysUnused := make(map[interface{}]struct{}) + + var errs []error + + // This slice will keep track of all the structs we'll be decoding. + // There can be more than one struct if there are embedded structs + // that are squashed. + structs := make([]reflect.Value, 1, 5) + structs[0] = val + + // Compile the list of all the fields that we're going to be decoding + // from all the structs. + type field struct { + field reflect.StructField + val reflect.Value + } + + // remainField is set to a valid field set with the "remain" tag if + // we are keeping track of remaining values. + var remainField *field + + fields := []field{} + for len(structs) > 0 { + structVal := structs[0] + structs = structs[1:] + + structType := structVal.Type() + + for i := 0; i < structType.NumField(); i++ { + fieldType := structType.Field(i) + fieldVal := structVal.Field(i) + if fieldVal.Kind() == reflect.Ptr && fieldVal.Elem().Kind() == reflect.Struct { + // Handle embedded struct pointers as embedded structs. + fieldVal = fieldVal.Elem() + } + + // If "squash" is specified in the tag, we squash the field down. + squash := d.config.Squash && fieldVal.Kind() == reflect.Struct && fieldType.Anonymous + remain := false + + // We always parse the tags cause we're looking for other tags too + tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",") + for _, tag := range tagParts[1:] { + if tag == d.config.SquashTagOption { + squash = true + break + } + + if tag == "remain" { + remain = true + break + } + } + + if squash { + switch fieldVal.Kind() { + case reflect.Struct: + structs = append(structs, fieldVal) + case reflect.Interface: + if !fieldVal.IsNil() { + structs = append(structs, fieldVal.Elem().Elem()) + } + default: + errs = append(errs, fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldVal.Kind())) + } + continue + } + + // Build our field + if remain { + remainField = &field{fieldType, fieldVal} + } else { + // Normal struct field, store it away + fields = append(fields, field{fieldType, fieldVal}) + } + } + } + + // for fieldType, field := range fields { + for _, f := range fields { + field, fieldValue := f.field, f.val + fieldName := field.Name + + tagValue := field.Tag.Get(d.config.TagName) + if tagValue == "" && d.config.IgnoreUntaggedFields { + continue + } + tagValue = strings.SplitN(tagValue, ",", 2)[0] + if tagValue != "" { + fieldName = tagValue + } + + rawMapKey := reflect.ValueOf(fieldName) + rawMapVal := dataVal.MapIndex(rawMapKey) + if !rawMapVal.IsValid() { + // Do a slower search by iterating over each key and + // doing case-insensitive search. + for dataValKey := range dataValKeys { + mK, ok := dataValKey.Interface().(string) + if !ok { + // Not a string key + continue + } + + if d.config.MatchName(mK, fieldName) { + rawMapKey = dataValKey + rawMapVal = dataVal.MapIndex(dataValKey) + break + } + } + + if !rawMapVal.IsValid() { + // There was no matching key in the map for the value in + // the struct. Remember it for potential errors and metadata. + targetValKeysUnused[fieldName] = struct{}{} + continue + } + } + + if !fieldValue.IsValid() { + // This should never happen + panic("field is not valid") + } + + // If we can't set the field, then it is unexported or something, + // and we just continue onwards. + if !fieldValue.CanSet() { + continue + } + + // Delete the key we're using from the unused map so we stop tracking + delete(dataValKeysUnused, rawMapKey.Interface()) + + // If the name is empty string, then we're at the root, and we + // don't dot-join the fields. + if name != "" { + fieldName = name + "." + fieldName + } + + if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil { + errs = append(errs, err) + } + } + + // If we have a "remain"-tagged field and we have unused keys then + // we put the unused keys directly into the remain field. + if remainField != nil && len(dataValKeysUnused) > 0 { + // Build a map of only the unused values + remain := map[interface{}]interface{}{} + for key := range dataValKeysUnused { + remain[key] = dataVal.MapIndex(reflect.ValueOf(key)).Interface() + } + + // Decode it as-if we were just decoding this map onto our map. + if err := d.decodeMap(name, remain, remainField.val); err != nil { + errs = append(errs, err) + } + + // Set the map to nil so we have none so that the next check will + // not error (ErrorUnused) + dataValKeysUnused = nil + } + + if d.config.ErrorUnused && len(dataValKeysUnused) > 0 { + keys := make([]string, 0, len(dataValKeysUnused)) + for rawKey := range dataValKeysUnused { + keys = append(keys, rawKey.(string)) + } + sort.Strings(keys) + + err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", ")) + errs = append(errs, err) + } + + if d.config.ErrorUnset && len(targetValKeysUnused) > 0 { + keys := make([]string, 0, len(targetValKeysUnused)) + for rawKey := range targetValKeysUnused { + keys = append(keys, rawKey.(string)) + } + sort.Strings(keys) + + err := fmt.Errorf("'%s' has unset fields: %s", name, strings.Join(keys, ", ")) + errs = append(errs, err) + } + + if err := errors.Join(errs...); err != nil { + return err + } + + // Add the unused keys to the list of unused keys if we're tracking metadata + if d.config.Metadata != nil { + for rawKey := range dataValKeysUnused { + key := rawKey.(string) + if name != "" { + key = name + "." + key + } + + d.config.Metadata.Unused = append(d.config.Metadata.Unused, key) + } + for rawKey := range targetValKeysUnused { + key := rawKey.(string) + if name != "" { + key = name + "." + key + } + + d.config.Metadata.Unset = append(d.config.Metadata.Unset, key) + } + } + + return nil +} + +func isEmptyValue(v reflect.Value) bool { + switch getKind(v) { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func getKind(val reflect.Value) reflect.Kind { + kind := val.Kind() + + switch { + case kind >= reflect.Int && kind <= reflect.Int64: + return reflect.Int + case kind >= reflect.Uint && kind <= reflect.Uint64: + return reflect.Uint + case kind >= reflect.Float32 && kind <= reflect.Float64: + return reflect.Float32 + case kind >= reflect.Complex64 && kind <= reflect.Complex128: + return reflect.Complex64 + default: + return kind + } +} + +func isStructTypeConvertibleToMap(typ reflect.Type, checkMapstructureTags bool, tagName string) bool { + for i := 0; i < typ.NumField(); i++ { + f := typ.Field(i) + if f.PkgPath == "" && !checkMapstructureTags { // check for unexported fields + return true + } + if checkMapstructureTags && f.Tag.Get(tagName) != "" { // check for mapstructure tags inside + return true + } + } + return false +} + +func dereferencePtrToStructIfNeeded(v reflect.Value, tagName string) reflect.Value { + if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct { + return v + } + deref := v.Elem() + derefT := deref.Type() + if isStructTypeConvertibleToMap(derefT, true, tagName) { + return deref + } + return v +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_19.go b/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_19.go new file mode 100644 index 00000000..d0913fff --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_19.go @@ -0,0 +1,44 @@ +//go:build !go1.20 + +package mapstructure + +import "reflect" + +func isComparable(v reflect.Value) bool { + k := v.Kind() + switch k { + case reflect.Invalid: + return false + + case reflect.Array: + switch v.Type().Elem().Kind() { + case reflect.Interface, reflect.Array, reflect.Struct: + for i := 0; i < v.Type().Len(); i++ { + // if !v.Index(i).Comparable() { + if !isComparable(v.Index(i)) { + return false + } + } + return true + } + return v.Type().Comparable() + + case reflect.Interface: + // return v.Elem().Comparable() + return isComparable(v.Elem()) + + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + return false + + // if !v.Field(i).Comparable() { + if !isComparable(v.Field(i)) { + return false + } + } + return true + + default: + return v.Type().Comparable() + } +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_20.go b/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_20.go new file mode 100644 index 00000000..f8255a1b --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_20.go @@ -0,0 +1,10 @@ +//go:build go1.20 + +package mapstructure + +import "reflect" + +// TODO: remove once we drop support for Go <1.20 +func isComparable(v reflect.Value) bool { + return v.Comparable() +} diff --git a/vendor/github.com/golang-jwt/jwt/.gitignore b/vendor/github.com/golang-jwt/jwt/.gitignore deleted file mode 100644 index 09573e01..00000000 --- a/vendor/github.com/golang-jwt/jwt/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -.DS_Store -bin -.idea/ - diff --git a/vendor/github.com/golang-jwt/jwt/LICENSE b/vendor/github.com/golang-jwt/jwt/LICENSE deleted file mode 100644 index 35dbc252..00000000 --- a/vendor/github.com/golang-jwt/jwt/LICENSE +++ /dev/null @@ -1,9 +0,0 @@ -Copyright (c) 2012 Dave Grijalva -Copyright (c) 2021 golang-jwt maintainers - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - diff --git a/vendor/github.com/golang-jwt/jwt/MIGRATION_GUIDE.md b/vendor/github.com/golang-jwt/jwt/MIGRATION_GUIDE.md deleted file mode 100644 index c4efbd2a..00000000 --- a/vendor/github.com/golang-jwt/jwt/MIGRATION_GUIDE.md +++ /dev/null @@ -1,22 +0,0 @@ -## Migration Guide (v3.2.1) - -Starting from [v3.2.1](https://github.com/golang-jwt/jwt/releases/tag/v3.2.1]), the import path has changed from `github.com/dgrijalva/jwt-go` to `github.com/golang-jwt/jwt`. Future releases will be using the `github.com/golang-jwt/jwt` import path and continue the existing versioning scheme of `v3.x.x+incompatible`. Backwards-compatible patches and fixes will be done on the `v3` release branch, where as new build-breaking features will be developed in a `v4` release, possibly including a SIV-style import path. - -### go.mod replacement - -In a first step, the easiest way is to use `go mod edit` to issue a replacement. - -``` -go mod edit -replace github.com/dgrijalva/jwt-go=github.com/golang-jwt/jwt@v3.2.1+incompatible -go mod tidy -``` - -This will still keep the old import path in your code but replace it with the new package and also introduce a new indirect dependency to `github.com/golang-jwt/jwt`. Try to compile your project; it should still work. - -### Cleanup - -If your code still consistently builds, you can replace all occurences of `github.com/dgrijalva/jwt-go` with `github.com/golang-jwt/jwt`, either manually or by using tools such as `sed`. Finally, the `replace` directive in the `go.mod` file can be removed. - -## Older releases (before v3.2.0) - -The original migration guide for older releases can be found at https://github.com/dgrijalva/jwt-go/blob/master/MIGRATION_GUIDE.md. \ No newline at end of file diff --git a/vendor/github.com/golang-jwt/jwt/README.md b/vendor/github.com/golang-jwt/jwt/README.md deleted file mode 100644 index 9b653e46..00000000 --- a/vendor/github.com/golang-jwt/jwt/README.md +++ /dev/null @@ -1,113 +0,0 @@ -# jwt-go - -[![build](https://github.com/golang-jwt/jwt/actions/workflows/build.yml/badge.svg)](https://github.com/golang-jwt/jwt/actions/workflows/build.yml) -[![Go Reference](https://pkg.go.dev/badge/github.com/golang-jwt/jwt.svg)](https://pkg.go.dev/github.com/golang-jwt/jwt) - -A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](https://datatracker.ietf.org/doc/html/rfc7519). - -**IMPORT PATH CHANGE:** Starting from [v3.2.1](https://github.com/golang-jwt/jwt/releases/tag/v3.2.1), the import path has changed from `github.com/dgrijalva/jwt-go` to `github.com/golang-jwt/jwt`. After the original author of the library suggested migrating the maintenance of `jwt-go`, a dedicated team of open source maintainers decided to clone the existing library into this repository. See [dgrijalva/jwt-go#462](https://github.com/dgrijalva/jwt-go/issues/462) for a detailed discussion on this topic. - -Future releases will be using the `github.com/golang-jwt/jwt` import path and continue the existing versioning scheme of `v3.x.x+incompatible`. Backwards-compatible patches and fixes will be done on the `v3` release branch, where as new build-breaking features will be developed in a `v4` release, possibly including a SIV-style import path. - -**SECURITY NOTICE:** Some older versions of Go have a security issue in the crypto/elliptic. Recommendation is to upgrade to at least 1.15 See issue [dgrijalva/jwt-go#216](https://github.com/dgrijalva/jwt-go/issues/216) for more detail. - -**SECURITY NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided. - -### Supported Go versions - -Our support of Go versions is aligned with Go's [version release policy](https://golang.org/doc/devel/release#policy). -So we will support a major version of Go until there are two newer major releases. -We no longer support building jwt-go with unsupported Go versions, as these contain security vulnerabilities -which will not be fixed. - -## What the heck is a JWT? - -JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens. - -In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](https://datatracker.ietf.org/doc/html/rfc4648) encoded. The last part is the signature, encoded the same way. - -The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used. - -The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [RFC 7519](https://datatracker.ietf.org/doc/html/rfc7519) for information about reserved keys and the proper way to add your own. - -## What's in the box? - -This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own. - -## Examples - -See [the project documentation](https://pkg.go.dev/github.com/golang-jwt/jwt) for examples of usage: - -* [Simple example of parsing and validating a token](https://pkg.go.dev/github.com/golang-jwt/jwt#example-Parse-Hmac) -* [Simple example of building and signing a token](https://pkg.go.dev/github.com/golang-jwt/jwt#example-New-Hmac) -* [Directory of Examples](https://pkg.go.dev/github.com/golang-jwt/jwt#pkg-examples) - -## Extensions - -This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`. - -Here's an example of an extension that integrates with multiple Google Cloud Platform signing tools (AppEngine, IAM API, Cloud KMS): https://github.com/someone1/gcp-jwt-go - -## Compliance - -This library was last reviewed to comply with [RTF 7519](https://datatracker.ietf.org/doc/html/rfc7519) dated May 2015 with a few notable differences: - -* In order to protect against accidental use of [Unsecured JWTs](https://datatracker.ietf.org/doc/html/rfc7519#section-6), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key. - -## Project Status & Versioning - -This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason). - -This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `main`. Periodically, versions will be tagged from `main`. You can find all the releases on [the project releases page](https://github.com/golang-jwt/jwt/releases). - -While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/golang-jwt/jwt.v3`. It will do the right thing WRT semantic versioning. - -**BREAKING CHANGES:*** -* Version 3.0.0 includes _a lot_ of changes from the 2.x line, including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code. - -## Usage Tips - -### Signing vs Encryption - -A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data: - -* The author of the token was in the possession of the signing secret -* The data has not been modified since it was signed - -It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library. - -### Choosing a Signing Method - -There are several signing methods available, and you should probably take the time to learn about the various options before choosing one. The principal design decision is most likely going to be symmetric vs asymmetric. - -Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation. - -Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification. - -### Signing Methods and Key Types - -Each signing method expects a different object type for its signing keys. See the package documentation for details. Here are the most common ones: - -* The [HMAC signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation -* The [RSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation -* The [ECDSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation - -### JWT and OAuth - -It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication. - -Without going too far down the rabbit hole, here's a description of the interaction of these technologies: - -* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth. -* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token. -* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL. - -### Troubleshooting - -This library uses descriptive error messages whenever possible. If you are not getting the expected result, have a look at the errors. The most common place people get stuck is providing the correct type of key to the parser. See the above section on signing methods and key types. - -## More - -Documentation can be found [on pkg.go.dev](https://pkg.go.dev/github.com/golang-jwt/jwt). - -The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation. diff --git a/vendor/github.com/golang-jwt/jwt/VERSION_HISTORY.md b/vendor/github.com/golang-jwt/jwt/VERSION_HISTORY.md deleted file mode 100644 index 637f2ba6..00000000 --- a/vendor/github.com/golang-jwt/jwt/VERSION_HISTORY.md +++ /dev/null @@ -1,131 +0,0 @@ -## `jwt-go` Version History - -#### 3.2.2 - -* Starting from this release, we are adopting the policy to support the most 2 recent versions of Go currently available. By the time of this release, this is Go 1.15 and 1.16 ([#28](https://github.com/golang-jwt/jwt/pull/28)). -* Fixed a potential issue that could occur when the verification of `exp`, `iat` or `nbf` was not required and contained invalid contents, i.e. non-numeric/date. Thanks for @thaJeztah for making us aware of that and @giorgos-f3 for originally reporting it to the formtech fork ([#40](https://github.com/golang-jwt/jwt/pull/40)). -* Added support for EdDSA / ED25519 ([#36](https://github.com/golang-jwt/jwt/pull/36)). -* Optimized allocations ([#33](https://github.com/golang-jwt/jwt/pull/33)). - -#### 3.2.1 - -* **Import Path Change**: See MIGRATION_GUIDE.md for tips on updating your code - * Changed the import path from `github.com/dgrijalva/jwt-go` to `github.com/golang-jwt/jwt` -* Fixed type confusing issue between `string` and `[]string` in `VerifyAudience` ([#12](https://github.com/golang-jwt/jwt/pull/12)). This fixes CVE-2020-26160 - -#### 3.2.0 - -* Added method `ParseUnverified` to allow users to split up the tasks of parsing and validation -* HMAC signing method returns `ErrInvalidKeyType` instead of `ErrInvalidKey` where appropriate -* Added options to `request.ParseFromRequest`, which allows for an arbitrary list of modifiers to parsing behavior. Initial set include `WithClaims` and `WithParser`. Existing usage of this function will continue to work as before. -* Deprecated `ParseFromRequestWithClaims` to simplify API in the future. - -#### 3.1.0 - -* Improvements to `jwt` command line tool -* Added `SkipClaimsValidation` option to `Parser` -* Documentation updates - -#### 3.0.0 - -* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code - * Dropped support for `[]byte` keys when using RSA signing methods. This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods. - * `ParseFromRequest` has been moved to `request` subpackage and usage has changed - * The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`. The default value is type `MapClaims`, which is an alias to `map[string]interface{}`. This makes it possible to use a custom type when decoding claims. -* Other Additions and Changes - * Added `Claims` interface type to allow users to decode the claims into a custom type - * Added `ParseWithClaims`, which takes a third argument of type `Claims`. Use this function instead of `Parse` if you have a custom type you'd like to decode into. - * Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage - * Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims` - * Added new interface type `Extractor`, which is used for extracting JWT strings from http requests. Used with `ParseFromRequest` and `ParseFromRequestWithClaims`. - * Added several new, more specific, validation errors to error type bitmask - * Moved examples from README to executable example files - * Signing method registry is now thread safe - * Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser) - -#### 2.7.0 - -This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes. - -* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying -* Error text for expired tokens includes how long it's been expired -* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM` -* Documentation updates - -#### 2.6.0 - -* Exposed inner error within ValidationError -* Fixed validation errors when using UseJSONNumber flag -* Added several unit tests - -#### 2.5.0 - -* Added support for signing method none. You shouldn't use this. The API tries to make this clear. -* Updated/fixed some documentation -* Added more helpful error message when trying to parse tokens that begin with `BEARER ` - -#### 2.4.0 - -* Added new type, Parser, to allow for configuration of various parsing parameters - * You can now specify a list of valid signing methods. Anything outside this set will be rejected. - * You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON -* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go) -* Fixed some bugs with ECDSA parsing - -#### 2.3.0 - -* Added support for ECDSA signing methods -* Added support for RSA PSS signing methods (requires go v1.4) - -#### 2.2.0 - -* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic. - -#### 2.1.0 - -Backwards compatible API change that was missed in 2.0.0. - -* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte` - -#### 2.0.0 - -There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change. - -The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`. - -It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`. - -* **Compatibility Breaking Changes** - * `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct` - * `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct` - * `KeyFunc` now returns `interface{}` instead of `[]byte` - * `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key - * `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key -* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type. - * Added public package global `SigningMethodHS256` - * Added public package global `SigningMethodHS384` - * Added public package global `SigningMethodHS512` -* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type. - * Added public package global `SigningMethodRS256` - * Added public package global `SigningMethodRS384` - * Added public package global `SigningMethodRS512` -* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged. -* Refactored the RSA implementation to be easier to read -* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM` - -#### 1.0.2 - -* Fixed bug in parsing public keys from certificates -* Added more tests around the parsing of keys for RS256 -* Code refactoring in RS256 implementation. No functional changes - -#### 1.0.1 - -* Fixed panic if RS256 signing method was passed an invalid key - -#### 1.0.0 - -* First versioned release -* API stabilized -* Supports creating, signing, parsing, and validating JWT tokens -* Supports RS256 and HS256 signing methods diff --git a/vendor/github.com/golang-jwt/jwt/claims.go b/vendor/github.com/golang-jwt/jwt/claims.go deleted file mode 100644 index f1dba3cb..00000000 --- a/vendor/github.com/golang-jwt/jwt/claims.go +++ /dev/null @@ -1,146 +0,0 @@ -package jwt - -import ( - "crypto/subtle" - "fmt" - "time" -) - -// For a type to be a Claims object, it must just have a Valid method that determines -// if the token is invalid for any supported reason -type Claims interface { - Valid() error -} - -// Structured version of Claims Section, as referenced at -// https://tools.ietf.org/html/rfc7519#section-4.1 -// See examples for how to use this with your own claim types -type StandardClaims struct { - Audience string `json:"aud,omitempty"` - ExpiresAt int64 `json:"exp,omitempty"` - Id string `json:"jti,omitempty"` - IssuedAt int64 `json:"iat,omitempty"` - Issuer string `json:"iss,omitempty"` - NotBefore int64 `json:"nbf,omitempty"` - Subject string `json:"sub,omitempty"` -} - -// Validates time based claims "exp, iat, nbf". -// There is no accounting for clock skew. -// As well, if any of the above claims are not in the token, it will still -// be considered a valid claim. -func (c StandardClaims) Valid() error { - vErr := new(ValidationError) - now := TimeFunc().Unix() - - // The claims below are optional, by default, so if they are set to the - // default value in Go, let's not fail the verification for them. - if !c.VerifyExpiresAt(now, false) { - delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0)) - vErr.Inner = fmt.Errorf("token is expired by %v", delta) - vErr.Errors |= ValidationErrorExpired - } - - if !c.VerifyIssuedAt(now, false) { - vErr.Inner = fmt.Errorf("Token used before issued") - vErr.Errors |= ValidationErrorIssuedAt - } - - if !c.VerifyNotBefore(now, false) { - vErr.Inner = fmt.Errorf("token is not valid yet") - vErr.Errors |= ValidationErrorNotValidYet - } - - if vErr.valid() { - return nil - } - - return vErr -} - -// Compares the aud claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool { - return verifyAud([]string{c.Audience}, cmp, req) -} - -// Compares the exp claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool { - return verifyExp(c.ExpiresAt, cmp, req) -} - -// Compares the iat claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool { - return verifyIat(c.IssuedAt, cmp, req) -} - -// Compares the iss claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool { - return verifyIss(c.Issuer, cmp, req) -} - -// Compares the nbf claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool { - return verifyNbf(c.NotBefore, cmp, req) -} - -// ----- helpers - -func verifyAud(aud []string, cmp string, required bool) bool { - if len(aud) == 0 { - return !required - } - // use a var here to keep constant time compare when looping over a number of claims - result := false - - var stringClaims string - for _, a := range aud { - if subtle.ConstantTimeCompare([]byte(a), []byte(cmp)) != 0 { - result = true - } - stringClaims = stringClaims + a - } - - // case where "" is sent in one or many aud claims - if len(stringClaims) == 0 { - return !required - } - - return result -} - -func verifyExp(exp int64, now int64, required bool) bool { - if exp == 0 { - return !required - } - return now <= exp -} - -func verifyIat(iat int64, now int64, required bool) bool { - if iat == 0 { - return !required - } - return now >= iat -} - -func verifyIss(iss string, cmp string, required bool) bool { - if iss == "" { - return !required - } - if subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 { - return true - } else { - return false - } -} - -func verifyNbf(nbf int64, now int64, required bool) bool { - if nbf == 0 { - return !required - } - return now >= nbf -} diff --git a/vendor/github.com/golang-jwt/jwt/doc.go b/vendor/github.com/golang-jwt/jwt/doc.go deleted file mode 100644 index a86dc1a3..00000000 --- a/vendor/github.com/golang-jwt/jwt/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html -// -// See README.md for more info. -package jwt diff --git a/vendor/github.com/golang-jwt/jwt/ecdsa.go b/vendor/github.com/golang-jwt/jwt/ecdsa.go deleted file mode 100644 index 15e23435..00000000 --- a/vendor/github.com/golang-jwt/jwt/ecdsa.go +++ /dev/null @@ -1,142 +0,0 @@ -package jwt - -import ( - "crypto" - "crypto/ecdsa" - "crypto/rand" - "errors" - "math/big" -) - -var ( - // Sadly this is missing from crypto/ecdsa compared to crypto/rsa - ErrECDSAVerification = errors.New("crypto/ecdsa: verification error") -) - -// Implements the ECDSA family of signing methods signing methods -// Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification -type SigningMethodECDSA struct { - Name string - Hash crypto.Hash - KeySize int - CurveBits int -} - -// Specific instances for EC256 and company -var ( - SigningMethodES256 *SigningMethodECDSA - SigningMethodES384 *SigningMethodECDSA - SigningMethodES512 *SigningMethodECDSA -) - -func init() { - // ES256 - SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256} - RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod { - return SigningMethodES256 - }) - - // ES384 - SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384} - RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod { - return SigningMethodES384 - }) - - // ES512 - SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521} - RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod { - return SigningMethodES512 - }) -} - -func (m *SigningMethodECDSA) Alg() string { - return m.Name -} - -// Implements the Verify method from SigningMethod -// For this verify method, key must be an ecdsa.PublicKey struct -func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error { - var err error - - // Decode the signature - var sig []byte - if sig, err = DecodeSegment(signature); err != nil { - return err - } - - // Get the key - var ecdsaKey *ecdsa.PublicKey - switch k := key.(type) { - case *ecdsa.PublicKey: - ecdsaKey = k - default: - return ErrInvalidKeyType - } - - if len(sig) != 2*m.KeySize { - return ErrECDSAVerification - } - - r := big.NewInt(0).SetBytes(sig[:m.KeySize]) - s := big.NewInt(0).SetBytes(sig[m.KeySize:]) - - // Create hasher - if !m.Hash.Available() { - return ErrHashUnavailable - } - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - // Verify the signature - if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus { - return nil - } - - return ErrECDSAVerification -} - -// Implements the Sign method from SigningMethod -// For this signing method, key must be an ecdsa.PrivateKey struct -func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) { - // Get the key - var ecdsaKey *ecdsa.PrivateKey - switch k := key.(type) { - case *ecdsa.PrivateKey: - ecdsaKey = k - default: - return "", ErrInvalidKeyType - } - - // Create the hasher - if !m.Hash.Available() { - return "", ErrHashUnavailable - } - - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - // Sign the string and return r, s - if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil { - curveBits := ecdsaKey.Curve.Params().BitSize - - if m.CurveBits != curveBits { - return "", ErrInvalidKey - } - - keyBytes := curveBits / 8 - if curveBits%8 > 0 { - keyBytes += 1 - } - - // We serialize the outputs (r and s) into big-endian byte arrays - // padded with zeros on the left to make sure the sizes work out. - // Output must be 2*keyBytes long. - out := make([]byte, 2*keyBytes) - r.FillBytes(out[0:keyBytes]) // r is assigned to the first half of output. - s.FillBytes(out[keyBytes:]) // s is assigned to the second half of output. - - return EncodeSegment(out), nil - } else { - return "", err - } -} diff --git a/vendor/github.com/golang-jwt/jwt/ecdsa_utils.go b/vendor/github.com/golang-jwt/jwt/ecdsa_utils.go deleted file mode 100644 index db9f4be7..00000000 --- a/vendor/github.com/golang-jwt/jwt/ecdsa_utils.go +++ /dev/null @@ -1,69 +0,0 @@ -package jwt - -import ( - "crypto/ecdsa" - "crypto/x509" - "encoding/pem" - "errors" -) - -var ( - ErrNotECPublicKey = errors.New("Key is not a valid ECDSA public key") - ErrNotECPrivateKey = errors.New("Key is not a valid ECDSA private key") -) - -// Parse PEM encoded Elliptic Curve Private Key Structure -func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - // Parse the key - var parsedKey interface{} - if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil { - if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { - return nil, err - } - } - - var pkey *ecdsa.PrivateKey - var ok bool - if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok { - return nil, ErrNotECPrivateKey - } - - return pkey, nil -} - -// Parse PEM encoded PKCS1 or PKCS8 public key -func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - // Parse the key - var parsedKey interface{} - if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { - if cert, err := x509.ParseCertificate(block.Bytes); err == nil { - parsedKey = cert.PublicKey - } else { - return nil, err - } - } - - var pkey *ecdsa.PublicKey - var ok bool - if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok { - return nil, ErrNotECPublicKey - } - - return pkey, nil -} diff --git a/vendor/github.com/golang-jwt/jwt/ed25519.go b/vendor/github.com/golang-jwt/jwt/ed25519.go deleted file mode 100644 index a2f8ddbe..00000000 --- a/vendor/github.com/golang-jwt/jwt/ed25519.go +++ /dev/null @@ -1,81 +0,0 @@ -package jwt - -import ( - "errors" - - "crypto/ed25519" -) - -var ( - ErrEd25519Verification = errors.New("ed25519: verification error") -) - -// Implements the EdDSA family -// Expects ed25519.PrivateKey for signing and ed25519.PublicKey for verification -type SigningMethodEd25519 struct{} - -// Specific instance for EdDSA -var ( - SigningMethodEdDSA *SigningMethodEd25519 -) - -func init() { - SigningMethodEdDSA = &SigningMethodEd25519{} - RegisterSigningMethod(SigningMethodEdDSA.Alg(), func() SigningMethod { - return SigningMethodEdDSA - }) -} - -func (m *SigningMethodEd25519) Alg() string { - return "EdDSA" -} - -// Implements the Verify method from SigningMethod -// For this verify method, key must be an ed25519.PublicKey -func (m *SigningMethodEd25519) Verify(signingString, signature string, key interface{}) error { - var err error - var ed25519Key ed25519.PublicKey - var ok bool - - if ed25519Key, ok = key.(ed25519.PublicKey); !ok { - return ErrInvalidKeyType - } - - if len(ed25519Key) != ed25519.PublicKeySize { - return ErrInvalidKey - } - - // Decode the signature - var sig []byte - if sig, err = DecodeSegment(signature); err != nil { - return err - } - - // Verify the signature - if !ed25519.Verify(ed25519Key, []byte(signingString), sig) { - return ErrEd25519Verification - } - - return nil -} - -// Implements the Sign method from SigningMethod -// For this signing method, key must be an ed25519.PrivateKey -func (m *SigningMethodEd25519) Sign(signingString string, key interface{}) (string, error) { - var ed25519Key ed25519.PrivateKey - var ok bool - - if ed25519Key, ok = key.(ed25519.PrivateKey); !ok { - return "", ErrInvalidKeyType - } - - // ed25519.Sign panics if private key not equal to ed25519.PrivateKeySize - // this allows to avoid recover usage - if len(ed25519Key) != ed25519.PrivateKeySize { - return "", ErrInvalidKey - } - - // Sign the string and return the encoded result - sig := ed25519.Sign(ed25519Key, []byte(signingString)) - return EncodeSegment(sig), nil -} diff --git a/vendor/github.com/golang-jwt/jwt/ed25519_utils.go b/vendor/github.com/golang-jwt/jwt/ed25519_utils.go deleted file mode 100644 index c6357275..00000000 --- a/vendor/github.com/golang-jwt/jwt/ed25519_utils.go +++ /dev/null @@ -1,64 +0,0 @@ -package jwt - -import ( - "crypto" - "crypto/ed25519" - "crypto/x509" - "encoding/pem" - "errors" -) - -var ( - ErrNotEdPrivateKey = errors.New("Key is not a valid Ed25519 private key") - ErrNotEdPublicKey = errors.New("Key is not a valid Ed25519 public key") -) - -// Parse PEM-encoded Edwards curve private key -func ParseEdPrivateKeyFromPEM(key []byte) (crypto.PrivateKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - // Parse the key - var parsedKey interface{} - if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { - return nil, err - } - - var pkey ed25519.PrivateKey - var ok bool - if pkey, ok = parsedKey.(ed25519.PrivateKey); !ok { - return nil, ErrNotEdPrivateKey - } - - return pkey, nil -} - -// Parse PEM-encoded Edwards curve public key -func ParseEdPublicKeyFromPEM(key []byte) (crypto.PublicKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - // Parse the key - var parsedKey interface{} - if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { - return nil, err - } - - var pkey ed25519.PublicKey - var ok bool - if pkey, ok = parsedKey.(ed25519.PublicKey); !ok { - return nil, ErrNotEdPublicKey - } - - return pkey, nil -} diff --git a/vendor/github.com/golang-jwt/jwt/errors.go b/vendor/github.com/golang-jwt/jwt/errors.go deleted file mode 100644 index 1c93024a..00000000 --- a/vendor/github.com/golang-jwt/jwt/errors.go +++ /dev/null @@ -1,59 +0,0 @@ -package jwt - -import ( - "errors" -) - -// Error constants -var ( - ErrInvalidKey = errors.New("key is invalid") - ErrInvalidKeyType = errors.New("key is of invalid type") - ErrHashUnavailable = errors.New("the requested hash function is unavailable") -) - -// The errors that might occur when parsing and validating a token -const ( - ValidationErrorMalformed uint32 = 1 << iota // Token is malformed - ValidationErrorUnverifiable // Token could not be verified because of signing problems - ValidationErrorSignatureInvalid // Signature validation failed - - // Standard Claim validation errors - ValidationErrorAudience // AUD validation failed - ValidationErrorExpired // EXP validation failed - ValidationErrorIssuedAt // IAT validation failed - ValidationErrorIssuer // ISS validation failed - ValidationErrorNotValidYet // NBF validation failed - ValidationErrorId // JTI validation failed - ValidationErrorClaimsInvalid // Generic claims validation error -) - -// Helper for constructing a ValidationError with a string error message -func NewValidationError(errorText string, errorFlags uint32) *ValidationError { - return &ValidationError{ - text: errorText, - Errors: errorFlags, - } -} - -// The error from Parse if token is not valid -type ValidationError struct { - Inner error // stores the error returned by external dependencies, i.e.: KeyFunc - Errors uint32 // bitfield. see ValidationError... constants - text string // errors that do not have a valid error just have text -} - -// Validation error is an error type -func (e ValidationError) Error() string { - if e.Inner != nil { - return e.Inner.Error() - } else if e.text != "" { - return e.text - } else { - return "token is invalid" - } -} - -// No errors -func (e *ValidationError) valid() bool { - return e.Errors == 0 -} diff --git a/vendor/github.com/golang-jwt/jwt/hmac.go b/vendor/github.com/golang-jwt/jwt/hmac.go deleted file mode 100644 index addbe5d4..00000000 --- a/vendor/github.com/golang-jwt/jwt/hmac.go +++ /dev/null @@ -1,95 +0,0 @@ -package jwt - -import ( - "crypto" - "crypto/hmac" - "errors" -) - -// Implements the HMAC-SHA family of signing methods signing methods -// Expects key type of []byte for both signing and validation -type SigningMethodHMAC struct { - Name string - Hash crypto.Hash -} - -// Specific instances for HS256 and company -var ( - SigningMethodHS256 *SigningMethodHMAC - SigningMethodHS384 *SigningMethodHMAC - SigningMethodHS512 *SigningMethodHMAC - ErrSignatureInvalid = errors.New("signature is invalid") -) - -func init() { - // HS256 - SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256} - RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod { - return SigningMethodHS256 - }) - - // HS384 - SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384} - RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod { - return SigningMethodHS384 - }) - - // HS512 - SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512} - RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod { - return SigningMethodHS512 - }) -} - -func (m *SigningMethodHMAC) Alg() string { - return m.Name -} - -// Verify the signature of HSXXX tokens. Returns nil if the signature is valid. -func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error { - // Verify the key is the right type - keyBytes, ok := key.([]byte) - if !ok { - return ErrInvalidKeyType - } - - // Decode signature, for comparison - sig, err := DecodeSegment(signature) - if err != nil { - return err - } - - // Can we use the specified hashing method? - if !m.Hash.Available() { - return ErrHashUnavailable - } - - // This signing method is symmetric, so we validate the signature - // by reproducing the signature from the signing string and key, then - // comparing that against the provided signature. - hasher := hmac.New(m.Hash.New, keyBytes) - hasher.Write([]byte(signingString)) - if !hmac.Equal(sig, hasher.Sum(nil)) { - return ErrSignatureInvalid - } - - // No validation errors. Signature is good. - return nil -} - -// Implements the Sign method from SigningMethod for this signing method. -// Key must be []byte -func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) { - if keyBytes, ok := key.([]byte); ok { - if !m.Hash.Available() { - return "", ErrHashUnavailable - } - - hasher := hmac.New(m.Hash.New, keyBytes) - hasher.Write([]byte(signingString)) - - return EncodeSegment(hasher.Sum(nil)), nil - } - - return "", ErrInvalidKeyType -} diff --git a/vendor/github.com/golang-jwt/jwt/map_claims.go b/vendor/github.com/golang-jwt/jwt/map_claims.go deleted file mode 100644 index 72c79f92..00000000 --- a/vendor/github.com/golang-jwt/jwt/map_claims.go +++ /dev/null @@ -1,120 +0,0 @@ -package jwt - -import ( - "encoding/json" - "errors" - // "fmt" -) - -// Claims type that uses the map[string]interface{} for JSON decoding -// This is the default claims type if you don't supply one -type MapClaims map[string]interface{} - -// VerifyAudience Compares the aud claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (m MapClaims) VerifyAudience(cmp string, req bool) bool { - var aud []string - switch v := m["aud"].(type) { - case string: - aud = append(aud, v) - case []string: - aud = v - case []interface{}: - for _, a := range v { - vs, ok := a.(string) - if !ok { - return false - } - aud = append(aud, vs) - } - } - return verifyAud(aud, cmp, req) -} - -// Compares the exp claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool { - exp, ok := m["exp"] - if !ok { - return !req - } - switch expType := exp.(type) { - case float64: - return verifyExp(int64(expType), cmp, req) - case json.Number: - v, _ := expType.Int64() - return verifyExp(v, cmp, req) - } - return false -} - -// Compares the iat claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool { - iat, ok := m["iat"] - if !ok { - return !req - } - switch iatType := iat.(type) { - case float64: - return verifyIat(int64(iatType), cmp, req) - case json.Number: - v, _ := iatType.Int64() - return verifyIat(v, cmp, req) - } - return false -} - -// Compares the iss claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (m MapClaims) VerifyIssuer(cmp string, req bool) bool { - iss, _ := m["iss"].(string) - return verifyIss(iss, cmp, req) -} - -// Compares the nbf claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool { - nbf, ok := m["nbf"] - if !ok { - return !req - } - switch nbfType := nbf.(type) { - case float64: - return verifyNbf(int64(nbfType), cmp, req) - case json.Number: - v, _ := nbfType.Int64() - return verifyNbf(v, cmp, req) - } - return false -} - -// Validates time based claims "exp, iat, nbf". -// There is no accounting for clock skew. -// As well, if any of the above claims are not in the token, it will still -// be considered a valid claim. -func (m MapClaims) Valid() error { - vErr := new(ValidationError) - now := TimeFunc().Unix() - - if !m.VerifyExpiresAt(now, false) { - vErr.Inner = errors.New("Token is expired") - vErr.Errors |= ValidationErrorExpired - } - - if !m.VerifyIssuedAt(now, false) { - vErr.Inner = errors.New("Token used before issued") - vErr.Errors |= ValidationErrorIssuedAt - } - - if !m.VerifyNotBefore(now, false) { - vErr.Inner = errors.New("Token is not valid yet") - vErr.Errors |= ValidationErrorNotValidYet - } - - if vErr.valid() { - return nil - } - - return vErr -} diff --git a/vendor/github.com/golang-jwt/jwt/none.go b/vendor/github.com/golang-jwt/jwt/none.go deleted file mode 100644 index f04d189d..00000000 --- a/vendor/github.com/golang-jwt/jwt/none.go +++ /dev/null @@ -1,52 +0,0 @@ -package jwt - -// Implements the none signing method. This is required by the spec -// but you probably should never use it. -var SigningMethodNone *signingMethodNone - -const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed" - -var NoneSignatureTypeDisallowedError error - -type signingMethodNone struct{} -type unsafeNoneMagicConstant string - -func init() { - SigningMethodNone = &signingMethodNone{} - NoneSignatureTypeDisallowedError = NewValidationError("'none' signature type is not allowed", ValidationErrorSignatureInvalid) - - RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod { - return SigningMethodNone - }) -} - -func (m *signingMethodNone) Alg() string { - return "none" -} - -// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key -func (m *signingMethodNone) Verify(signingString, signature string, key interface{}) (err error) { - // Key must be UnsafeAllowNoneSignatureType to prevent accidentally - // accepting 'none' signing method - if _, ok := key.(unsafeNoneMagicConstant); !ok { - return NoneSignatureTypeDisallowedError - } - // If signing method is none, signature must be an empty string - if signature != "" { - return NewValidationError( - "'none' signing method with non-empty signature", - ValidationErrorSignatureInvalid, - ) - } - - // Accept 'none' signing method. - return nil -} - -// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key -func (m *signingMethodNone) Sign(signingString string, key interface{}) (string, error) { - if _, ok := key.(unsafeNoneMagicConstant); ok { - return "", nil - } - return "", NoneSignatureTypeDisallowedError -} diff --git a/vendor/github.com/golang-jwt/jwt/parser.go b/vendor/github.com/golang-jwt/jwt/parser.go deleted file mode 100644 index d6901d9a..00000000 --- a/vendor/github.com/golang-jwt/jwt/parser.go +++ /dev/null @@ -1,148 +0,0 @@ -package jwt - -import ( - "bytes" - "encoding/json" - "fmt" - "strings" -) - -type Parser struct { - ValidMethods []string // If populated, only these methods will be considered valid - UseJSONNumber bool // Use JSON Number format in JSON decoder - SkipClaimsValidation bool // Skip claims validation during token parsing -} - -// Parse, validate, and return a token. -// keyFunc will receive the parsed token and should return the key for validating. -// If everything is kosher, err will be nil -func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { - return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc) -} - -func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { - token, parts, err := p.ParseUnverified(tokenString, claims) - if err != nil { - return token, err - } - - // Verify signing method is in the required set - if p.ValidMethods != nil { - var signingMethodValid = false - var alg = token.Method.Alg() - for _, m := range p.ValidMethods { - if m == alg { - signingMethodValid = true - break - } - } - if !signingMethodValid { - // signing method is not in the listed set - return token, NewValidationError(fmt.Sprintf("signing method %v is invalid", alg), ValidationErrorSignatureInvalid) - } - } - - // Lookup key - var key interface{} - if keyFunc == nil { - // keyFunc was not provided. short circuiting validation - return token, NewValidationError("no Keyfunc was provided.", ValidationErrorUnverifiable) - } - if key, err = keyFunc(token); err != nil { - // keyFunc returned an error - if ve, ok := err.(*ValidationError); ok { - return token, ve - } - return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable} - } - - vErr := &ValidationError{} - - // Validate Claims - if !p.SkipClaimsValidation { - if err := token.Claims.Valid(); err != nil { - - // If the Claims Valid returned an error, check if it is a validation error, - // If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set - if e, ok := err.(*ValidationError); !ok { - vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid} - } else { - vErr = e - } - } - } - - // Perform validation - token.Signature = parts[2] - if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil { - vErr.Inner = err - vErr.Errors |= ValidationErrorSignatureInvalid - } - - if vErr.valid() { - token.Valid = true - return token, nil - } - - return token, vErr -} - -// WARNING: Don't use this method unless you know what you're doing -// -// This method parses the token but doesn't validate the signature. It's only -// ever useful in cases where you know the signature is valid (because it has -// been checked previously in the stack) and you want to extract values from -// it. -func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) { - parts = strings.Split(tokenString, ".") - if len(parts) != 3 { - return nil, parts, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed) - } - - token = &Token{Raw: tokenString} - - // parse Header - var headerBytes []byte - if headerBytes, err = DecodeSegment(parts[0]); err != nil { - if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") { - return token, parts, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed) - } - return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} - } - if err = json.Unmarshal(headerBytes, &token.Header); err != nil { - return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} - } - - // parse Claims - var claimBytes []byte - token.Claims = claims - - if claimBytes, err = DecodeSegment(parts[1]); err != nil { - return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} - } - dec := json.NewDecoder(bytes.NewBuffer(claimBytes)) - if p.UseJSONNumber { - dec.UseNumber() - } - // JSON Decode. Special case for map type to avoid weird pointer behavior - if c, ok := token.Claims.(MapClaims); ok { - err = dec.Decode(&c) - } else { - err = dec.Decode(&claims) - } - // Handle decode error - if err != nil { - return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} - } - - // Lookup signature method - if method, ok := token.Header["alg"].(string); ok { - if token.Method = GetSigningMethod(method); token.Method == nil { - return token, parts, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable) - } - } else { - return token, parts, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable) - } - - return token, parts, nil -} diff --git a/vendor/github.com/golang-jwt/jwt/rsa.go b/vendor/github.com/golang-jwt/jwt/rsa.go deleted file mode 100644 index e4caf1ca..00000000 --- a/vendor/github.com/golang-jwt/jwt/rsa.go +++ /dev/null @@ -1,101 +0,0 @@ -package jwt - -import ( - "crypto" - "crypto/rand" - "crypto/rsa" -) - -// Implements the RSA family of signing methods signing methods -// Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation -type SigningMethodRSA struct { - Name string - Hash crypto.Hash -} - -// Specific instances for RS256 and company -var ( - SigningMethodRS256 *SigningMethodRSA - SigningMethodRS384 *SigningMethodRSA - SigningMethodRS512 *SigningMethodRSA -) - -func init() { - // RS256 - SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256} - RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod { - return SigningMethodRS256 - }) - - // RS384 - SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384} - RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod { - return SigningMethodRS384 - }) - - // RS512 - SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512} - RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod { - return SigningMethodRS512 - }) -} - -func (m *SigningMethodRSA) Alg() string { - return m.Name -} - -// Implements the Verify method from SigningMethod -// For this signing method, must be an *rsa.PublicKey structure. -func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error { - var err error - - // Decode the signature - var sig []byte - if sig, err = DecodeSegment(signature); err != nil { - return err - } - - var rsaKey *rsa.PublicKey - var ok bool - - if rsaKey, ok = key.(*rsa.PublicKey); !ok { - return ErrInvalidKeyType - } - - // Create hasher - if !m.Hash.Available() { - return ErrHashUnavailable - } - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - // Verify the signature - return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig) -} - -// Implements the Sign method from SigningMethod -// For this signing method, must be an *rsa.PrivateKey structure. -func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) { - var rsaKey *rsa.PrivateKey - var ok bool - - // Validate type of key - if rsaKey, ok = key.(*rsa.PrivateKey); !ok { - return "", ErrInvalidKey - } - - // Create the hasher - if !m.Hash.Available() { - return "", ErrHashUnavailable - } - - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - // Sign the string and return the encoded bytes - if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil { - return EncodeSegment(sigBytes), nil - } else { - return "", err - } -} diff --git a/vendor/github.com/golang-jwt/jwt/rsa_pss.go b/vendor/github.com/golang-jwt/jwt/rsa_pss.go deleted file mode 100644 index c0147086..00000000 --- a/vendor/github.com/golang-jwt/jwt/rsa_pss.go +++ /dev/null @@ -1,142 +0,0 @@ -// +build go1.4 - -package jwt - -import ( - "crypto" - "crypto/rand" - "crypto/rsa" -) - -// Implements the RSAPSS family of signing methods signing methods -type SigningMethodRSAPSS struct { - *SigningMethodRSA - Options *rsa.PSSOptions - // VerifyOptions is optional. If set overrides Options for rsa.VerifyPPS. - // Used to accept tokens signed with rsa.PSSSaltLengthAuto, what doesn't follow - // https://tools.ietf.org/html/rfc7518#section-3.5 but was used previously. - // See https://github.com/dgrijalva/jwt-go/issues/285#issuecomment-437451244 for details. - VerifyOptions *rsa.PSSOptions -} - -// Specific instances for RS/PS and company. -var ( - SigningMethodPS256 *SigningMethodRSAPSS - SigningMethodPS384 *SigningMethodRSAPSS - SigningMethodPS512 *SigningMethodRSAPSS -) - -func init() { - // PS256 - SigningMethodPS256 = &SigningMethodRSAPSS{ - SigningMethodRSA: &SigningMethodRSA{ - Name: "PS256", - Hash: crypto.SHA256, - }, - Options: &rsa.PSSOptions{ - SaltLength: rsa.PSSSaltLengthEqualsHash, - }, - VerifyOptions: &rsa.PSSOptions{ - SaltLength: rsa.PSSSaltLengthAuto, - }, - } - RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod { - return SigningMethodPS256 - }) - - // PS384 - SigningMethodPS384 = &SigningMethodRSAPSS{ - SigningMethodRSA: &SigningMethodRSA{ - Name: "PS384", - Hash: crypto.SHA384, - }, - Options: &rsa.PSSOptions{ - SaltLength: rsa.PSSSaltLengthEqualsHash, - }, - VerifyOptions: &rsa.PSSOptions{ - SaltLength: rsa.PSSSaltLengthAuto, - }, - } - RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod { - return SigningMethodPS384 - }) - - // PS512 - SigningMethodPS512 = &SigningMethodRSAPSS{ - SigningMethodRSA: &SigningMethodRSA{ - Name: "PS512", - Hash: crypto.SHA512, - }, - Options: &rsa.PSSOptions{ - SaltLength: rsa.PSSSaltLengthEqualsHash, - }, - VerifyOptions: &rsa.PSSOptions{ - SaltLength: rsa.PSSSaltLengthAuto, - }, - } - RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod { - return SigningMethodPS512 - }) -} - -// Implements the Verify method from SigningMethod -// For this verify method, key must be an rsa.PublicKey struct -func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error { - var err error - - // Decode the signature - var sig []byte - if sig, err = DecodeSegment(signature); err != nil { - return err - } - - var rsaKey *rsa.PublicKey - switch k := key.(type) { - case *rsa.PublicKey: - rsaKey = k - default: - return ErrInvalidKey - } - - // Create hasher - if !m.Hash.Available() { - return ErrHashUnavailable - } - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - opts := m.Options - if m.VerifyOptions != nil { - opts = m.VerifyOptions - } - - return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, opts) -} - -// Implements the Sign method from SigningMethod -// For this signing method, key must be an rsa.PrivateKey struct -func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) { - var rsaKey *rsa.PrivateKey - - switch k := key.(type) { - case *rsa.PrivateKey: - rsaKey = k - default: - return "", ErrInvalidKeyType - } - - // Create the hasher - if !m.Hash.Available() { - return "", ErrHashUnavailable - } - - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - // Sign the string and return the encoded bytes - if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil { - return EncodeSegment(sigBytes), nil - } else { - return "", err - } -} diff --git a/vendor/github.com/golang-jwt/jwt/rsa_utils.go b/vendor/github.com/golang-jwt/jwt/rsa_utils.go deleted file mode 100644 index 14c78c29..00000000 --- a/vendor/github.com/golang-jwt/jwt/rsa_utils.go +++ /dev/null @@ -1,101 +0,0 @@ -package jwt - -import ( - "crypto/rsa" - "crypto/x509" - "encoding/pem" - "errors" -) - -var ( - ErrKeyMustBePEMEncoded = errors.New("Invalid Key: Key must be a PEM encoded PKCS1 or PKCS8 key") - ErrNotRSAPrivateKey = errors.New("Key is not a valid RSA private key") - ErrNotRSAPublicKey = errors.New("Key is not a valid RSA public key") -) - -// Parse PEM encoded PKCS1 or PKCS8 private key -func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - var parsedKey interface{} - if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil { - if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { - return nil, err - } - } - - var pkey *rsa.PrivateKey - var ok bool - if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { - return nil, ErrNotRSAPrivateKey - } - - return pkey, nil -} - -// Parse PEM encoded PKCS1 or PKCS8 private key protected with password -func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - var parsedKey interface{} - - var blockDecrypted []byte - if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil { - return nil, err - } - - if parsedKey, err = x509.ParsePKCS1PrivateKey(blockDecrypted); err != nil { - if parsedKey, err = x509.ParsePKCS8PrivateKey(blockDecrypted); err != nil { - return nil, err - } - } - - var pkey *rsa.PrivateKey - var ok bool - if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { - return nil, ErrNotRSAPrivateKey - } - - return pkey, nil -} - -// Parse PEM encoded PKCS1 or PKCS8 public key -func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - // Parse the key - var parsedKey interface{} - if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { - if cert, err := x509.ParseCertificate(block.Bytes); err == nil { - parsedKey = cert.PublicKey - } else { - return nil, err - } - } - - var pkey *rsa.PublicKey - var ok bool - if pkey, ok = parsedKey.(*rsa.PublicKey); !ok { - return nil, ErrNotRSAPublicKey - } - - return pkey, nil -} diff --git a/vendor/github.com/golang-jwt/jwt/signing_method.go b/vendor/github.com/golang-jwt/jwt/signing_method.go deleted file mode 100644 index ed1f212b..00000000 --- a/vendor/github.com/golang-jwt/jwt/signing_method.go +++ /dev/null @@ -1,35 +0,0 @@ -package jwt - -import ( - "sync" -) - -var signingMethods = map[string]func() SigningMethod{} -var signingMethodLock = new(sync.RWMutex) - -// Implement SigningMethod to add new methods for signing or verifying tokens. -type SigningMethod interface { - Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid - Sign(signingString string, key interface{}) (string, error) // Returns encoded signature or error - Alg() string // returns the alg identifier for this method (example: 'HS256') -} - -// Register the "alg" name and a factory function for signing method. -// This is typically done during init() in the method's implementation -func RegisterSigningMethod(alg string, f func() SigningMethod) { - signingMethodLock.Lock() - defer signingMethodLock.Unlock() - - signingMethods[alg] = f -} - -// Get a signing method from an "alg" string -func GetSigningMethod(alg string) (method SigningMethod) { - signingMethodLock.RLock() - defer signingMethodLock.RUnlock() - - if methodF, ok := signingMethods[alg]; ok { - method = methodF() - } - return -} diff --git a/vendor/github.com/golang-jwt/jwt/token.go b/vendor/github.com/golang-jwt/jwt/token.go deleted file mode 100644 index 6b30ced1..00000000 --- a/vendor/github.com/golang-jwt/jwt/token.go +++ /dev/null @@ -1,104 +0,0 @@ -package jwt - -import ( - "encoding/base64" - "encoding/json" - "strings" - "time" -) - -// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time). -// You can override it to use another time value. This is useful for testing or if your -// server uses a different time zone than your tokens. -var TimeFunc = time.Now - -// Parse methods use this callback function to supply -// the key for verification. The function receives the parsed, -// but unverified Token. This allows you to use properties in the -// Header of the token (such as `kid`) to identify which key to use. -type Keyfunc func(*Token) (interface{}, error) - -// A JWT Token. Different fields will be used depending on whether you're -// creating or parsing/verifying a token. -type Token struct { - Raw string // The raw token. Populated when you Parse a token - Method SigningMethod // The signing method used or to be used - Header map[string]interface{} // The first segment of the token - Claims Claims // The second segment of the token - Signature string // The third segment of the token. Populated when you Parse a token - Valid bool // Is the token valid? Populated when you Parse/Verify a token -} - -// Create a new Token. Takes a signing method -func New(method SigningMethod) *Token { - return NewWithClaims(method, MapClaims{}) -} - -func NewWithClaims(method SigningMethod, claims Claims) *Token { - return &Token{ - Header: map[string]interface{}{ - "typ": "JWT", - "alg": method.Alg(), - }, - Claims: claims, - Method: method, - } -} - -// Get the complete, signed token -func (t *Token) SignedString(key interface{}) (string, error) { - var sig, sstr string - var err error - if sstr, err = t.SigningString(); err != nil { - return "", err - } - if sig, err = t.Method.Sign(sstr, key); err != nil { - return "", err - } - return strings.Join([]string{sstr, sig}, "."), nil -} - -// Generate the signing string. This is the -// most expensive part of the whole deal. Unless you -// need this for something special, just go straight for -// the SignedString. -func (t *Token) SigningString() (string, error) { - var err error - parts := make([]string, 2) - for i := range parts { - var jsonValue []byte - if i == 0 { - if jsonValue, err = json.Marshal(t.Header); err != nil { - return "", err - } - } else { - if jsonValue, err = json.Marshal(t.Claims); err != nil { - return "", err - } - } - - parts[i] = EncodeSegment(jsonValue) - } - return strings.Join(parts, "."), nil -} - -// Parse, validate, and return a token. -// keyFunc will receive the parsed token and should return the key for validating. -// If everything is kosher, err will be nil -func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { - return new(Parser).Parse(tokenString, keyFunc) -} - -func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { - return new(Parser).ParseWithClaims(tokenString, claims, keyFunc) -} - -// Encode JWT specific base64url encoding with padding stripped -func EncodeSegment(seg []byte) string { - return base64.RawURLEncoding.EncodeToString(seg) -} - -// Decode JWT specific base64url encoding with padding stripped -func DecodeSegment(seg string) ([]byte, error) { - return base64.RawURLEncoding.DecodeString(seg) -} diff --git a/vendor/github.com/golang-jwt/jwt/v4/parser.go b/vendor/github.com/golang-jwt/jwt/v4/parser.go index c0a6f692..0fc510a0 100644 --- a/vendor/github.com/golang-jwt/jwt/v4/parser.go +++ b/vendor/github.com/golang-jwt/jwt/v4/parser.go @@ -7,6 +7,8 @@ import ( "strings" ) +const tokenDelimiter = "." + type Parser struct { // If populated, only these methods will be considered valid. // @@ -36,19 +38,21 @@ func NewParser(options ...ParserOption) *Parser { return p } -// Parse parses, validates, verifies the signature and returns the parsed token. -// keyFunc will receive the parsed token and should return the key for validating. +// Parse parses, validates, verifies the signature and returns the parsed token. keyFunc will +// receive the parsed token and should return the key for validating. func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc) } -// ParseWithClaims parses, validates, and verifies like Parse, but supplies a default object implementing the Claims -// interface. This provides default values which can be overridden and allows a caller to use their own type, rather -// than the default MapClaims implementation of Claims. +// ParseWithClaims parses, validates, and verifies like Parse, but supplies a default object +// implementing the Claims interface. This provides default values which can be overridden and +// allows a caller to use their own type, rather than the default MapClaims implementation of +// Claims. // -// Note: If you provide a custom claim implementation that embeds one of the standard claims (such as RegisteredClaims), -// make sure that a) you either embed a non-pointer version of the claims or b) if you are using a pointer, allocate the -// proper memory for it before passing in the overall claims, otherwise you might run into a panic. +// Note: If you provide a custom claim implementation that embeds one of the standard claims (such +// as RegisteredClaims), make sure that a) you either embed a non-pointer version of the claims or +// b) if you are using a pointer, allocate the proper memory for it before passing in the overall +// claims, otherwise you might run into a panic. func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { token, parts, err := p.ParseUnverified(tokenString, claims) if err != nil { @@ -85,12 +89,17 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable} } + // Perform validation + token.Signature = parts[2] + if err := token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil { + return token, &ValidationError{Inner: err, Errors: ValidationErrorSignatureInvalid} + } + vErr := &ValidationError{} // Validate Claims if !p.SkipClaimsValidation { if err := token.Claims.Valid(); err != nil { - // If the Claims Valid returned an error, check if it is a validation error, // If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set if e, ok := err.(*ValidationError); !ok { @@ -98,22 +107,14 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf } else { vErr = e } + return token, vErr } } - // Perform validation - token.Signature = parts[2] - if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil { - vErr.Inner = err - vErr.Errors |= ValidationErrorSignatureInvalid - } + // No errors so far, token is valid. + token.Valid = true - if vErr.valid() { - token.Valid = true - return token, nil - } - - return token, vErr + return token, nil } // ParseUnverified parses the token but doesn't validate the signature. @@ -123,9 +124,10 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf // It's only ever useful in cases where you know the signature is valid (because it has // been checked previously in the stack) and you want to extract values from it. func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) { - parts = strings.Split(tokenString, ".") - if len(parts) != 3 { - return nil, parts, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed) + var ok bool + parts, ok = splitToken(tokenString) + if !ok { + return nil, nil, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed) } token = &Token{Raw: tokenString} @@ -175,3 +177,30 @@ func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Toke return token, parts, nil } + +// splitToken splits a token string into three parts: header, claims, and signature. It will only +// return true if the token contains exactly two delimiters and three parts. In all other cases, it +// will return nil parts and false. +func splitToken(token string) ([]string, bool) { + parts := make([]string, 3) + header, remain, ok := strings.Cut(token, tokenDelimiter) + if !ok { + return nil, false + } + parts[0] = header + claims, remain, ok := strings.Cut(remain, tokenDelimiter) + if !ok { + return nil, false + } + parts[1] = claims + // One more cut to ensure the signature is the last part of the token and there are no more + // delimiters. This avoids an issue where malicious input could contain additional delimiters + // causing unecessary overhead parsing tokens. + signature, _, unexpected := strings.Cut(remain, tokenDelimiter) + if unexpected { + return nil, false + } + parts[2] = signature + + return parts, true +} diff --git a/vendor/github.com/golang-jwt/jwt/v5/MIGRATION_GUIDE.md b/vendor/github.com/golang-jwt/jwt/v5/MIGRATION_GUIDE.md index 6ad1c22b..b3178e75 100644 --- a/vendor/github.com/golang-jwt/jwt/v5/MIGRATION_GUIDE.md +++ b/vendor/github.com/golang-jwt/jwt/v5/MIGRATION_GUIDE.md @@ -17,7 +17,7 @@ and corresponding updates for existing programs. ## Parsing and Validation Options -Under the hood, a new `validator` struct takes care of validating the claims. A +Under the hood, a new `Validator` struct takes care of validating the claims. A long awaited feature has been the option to fine-tune the validation of tokens. This is now possible with several `ParserOption` functions that can be appended to most `Parse` functions, such as `ParseWithClaims`. The most important options @@ -68,6 +68,16 @@ type Claims interface { } ``` +Users that previously directly called the `Valid` function on their claims, +e.g., to perform validation independently of parsing/verifying a token, can now +use the `jwt.NewValidator` function to create a `Validator` independently of the +`Parser`. + +```go +var v = jwt.NewValidator(jwt.WithLeeway(5*time.Second)) +v.Validate(myClaims) +``` + ### Supported Claim Types and Removal of `StandardClaims` The two standard claim types supported by this library, `MapClaims` and @@ -145,7 +155,7 @@ stored in base64 encoded form, which was redundant with the information in the type Token struct { Raw string // Raw contains the raw token Method SigningMethod // Method is the signing method used or to be used - Header map[string]interface{} // Header is the first segment of the token in decoded form + Header map[string]any // Header is the first segment of the token in decoded form Claims Claims // Claims is the second segment of the token in decoded form Signature []byte // Signature is the third segment of the token in decoded form Valid bool // Valid specifies if the token is valid @@ -169,7 +179,7 @@ be a drop-in replacement, if you're having troubles migrating, please open an issue. You can replace all occurrences of `github.com/dgrijalva/jwt-go` or -`github.com/golang-jwt/jwt` with `github.com/golang-jwt/jwt/v5`, either manually +`github.com/golang-jwt/jwt` with `github.com/golang-jwt/jwt/v4`, either manually or by using tools such as `sed` or `gofmt`. And then you'd typically run: diff --git a/vendor/github.com/golang-jwt/jwt/v5/README.md b/vendor/github.com/golang-jwt/jwt/v5/README.md index 964598a3..0bb636f2 100644 --- a/vendor/github.com/golang-jwt/jwt/v5/README.md +++ b/vendor/github.com/golang-jwt/jwt/v5/README.md @@ -10,11 +10,11 @@ implementation of [JSON Web Tokens](https://datatracker.ietf.org/doc/html/rfc7519). Starting with [v4.0.0](https://github.com/golang-jwt/jwt/releases/tag/v4.0.0) -this project adds Go module support, but maintains backwards compatibility with +this project adds Go module support, but maintains backward compatibility with older `v3.x.y` tags and upstream `github.com/dgrijalva/jwt-go`. See the [`MIGRATION_GUIDE.md`](./MIGRATION_GUIDE.md) for more information. Version v5.0.0 introduces major improvements to the validation of tokens, but is not -entirely backwards compatible. +entirely backward compatible. > After the original author of the library suggested migrating the maintenance > of `jwt-go`, a dedicated team of open source maintainers decided to clone the @@ -24,7 +24,7 @@ entirely backwards compatible. **SECURITY NOTICE:** Some older versions of Go have a security issue in the -crypto/elliptic. Recommendation is to upgrade to at least 1.15 See issue +crypto/elliptic. The recommendation is to upgrade to at least 1.15 See issue [dgrijalva/jwt-go#216](https://github.com/dgrijalva/jwt-go/issues/216) for more detail. @@ -32,7 +32,7 @@ detail. what you expect](https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key -types match the expected alg, but you should take the extra step to verify it in +types to match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided. ### Supported Go versions @@ -41,7 +41,7 @@ Our support of Go versions is aligned with Go's [version release policy](https://golang.org/doc/devel/release#policy). So we will support a major version of Go until there are two newer major releases. We no longer support building jwt-go with unsupported Go versions, as these contain security -vulnerabilities which will not be fixed. +vulnerabilities that will not be fixed. ## What the heck is a JWT? @@ -117,7 +117,7 @@ notable differences: This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few -backwards-incompatible changes outside of major version updates (and only with +backward-incompatible changes outside of major version updates (and only with good reason). This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull @@ -125,8 +125,8 @@ requests will land on `main`. Periodically, versions will be tagged from `main`. You can find all the releases on [the project releases page](https://github.com/golang-jwt/jwt/releases). -**BREAKING CHANGES:*** A full list of breaking changes is available in -`VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating +**BREAKING CHANGES:** A full list of breaking changes is available in +`VERSION_HISTORY.md`. See [`MIGRATION_GUIDE.md`](./MIGRATION_GUIDE.md) for more information on updating your code. ## Extensions diff --git a/vendor/github.com/golang-jwt/jwt/v5/SECURITY.md b/vendor/github.com/golang-jwt/jwt/v5/SECURITY.md index b08402c3..2740597f 100644 --- a/vendor/github.com/golang-jwt/jwt/v5/SECURITY.md +++ b/vendor/github.com/golang-jwt/jwt/v5/SECURITY.md @@ -2,11 +2,11 @@ ## Supported Versions -As of February 2022 (and until this document is updated), the latest version `v4` is supported. +As of November 2024 (and until this document is updated), the latest version `v5` is supported. In critical cases, we might supply back-ported patches for `v4`. ## Reporting a Vulnerability -If you think you found a vulnerability, and even if you are not sure, please report it to jwt-go-security@googlegroups.com or one of the other [golang-jwt maintainers](https://github.com/orgs/golang-jwt/people). Please try be explicit, describe steps to reproduce the security issue with code example(s). +If you think you found a vulnerability, and even if you are not sure, please report it a [GitHub Security Advisory](https://github.com/golang-jwt/jwt/security/advisories/new). Please try be explicit, describe steps to reproduce the security issue with code example(s). You will receive a response within a timely manner. If the issue is confirmed, we will do our best to release a patch as soon as possible given the complexity of the problem. diff --git a/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go b/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go index 4ccae2a8..06cd94d2 100644 --- a/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go +++ b/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go @@ -55,14 +55,14 @@ func (m *SigningMethodECDSA) Alg() string { // Verify implements token verification for the SigningMethod. // For this verify method, key must be an ecdsa.PublicKey struct -func (m *SigningMethodECDSA) Verify(signingString string, sig []byte, key interface{}) error { +func (m *SigningMethodECDSA) Verify(signingString string, sig []byte, key any) error { // Get the key var ecdsaKey *ecdsa.PublicKey switch k := key.(type) { case *ecdsa.PublicKey: ecdsaKey = k default: - return ErrInvalidKeyType + return newError("ECDSA verify expects *ecdsa.PublicKey", ErrInvalidKeyType) } if len(sig) != 2*m.KeySize { @@ -89,14 +89,14 @@ func (m *SigningMethodECDSA) Verify(signingString string, sig []byte, key interf // Sign implements token signing for the SigningMethod. // For this signing method, key must be an ecdsa.PrivateKey struct -func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) ([]byte, error) { +func (m *SigningMethodECDSA) Sign(signingString string, key any) ([]byte, error) { // Get the key var ecdsaKey *ecdsa.PrivateKey switch k := key.(type) { case *ecdsa.PrivateKey: ecdsaKey = k default: - return nil, ErrInvalidKeyType + return nil, newError("ECDSA sign expects *ecdsa.PrivateKey", ErrInvalidKeyType) } // Create the hasher diff --git a/vendor/github.com/golang-jwt/jwt/v5/ecdsa_utils.go b/vendor/github.com/golang-jwt/jwt/v5/ecdsa_utils.go index 5700636d..44a3b7a1 100644 --- a/vendor/github.com/golang-jwt/jwt/v5/ecdsa_utils.go +++ b/vendor/github.com/golang-jwt/jwt/v5/ecdsa_utils.go @@ -23,7 +23,7 @@ func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) { } // Parse the key - var parsedKey interface{} + var parsedKey any if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil { if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { return nil, err @@ -50,7 +50,7 @@ func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) { } // Parse the key - var parsedKey interface{} + var parsedKey any if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { if cert, err := x509.ParseCertificate(block.Bytes); err == nil { parsedKey = cert.PublicKey diff --git a/vendor/github.com/golang-jwt/jwt/v5/ed25519.go b/vendor/github.com/golang-jwt/jwt/v5/ed25519.go index 3db00e4a..4159e57b 100644 --- a/vendor/github.com/golang-jwt/jwt/v5/ed25519.go +++ b/vendor/github.com/golang-jwt/jwt/v5/ed25519.go @@ -1,11 +1,10 @@ package jwt import ( - "errors" - "crypto" "crypto/ed25519" "crypto/rand" + "errors" ) var ( @@ -34,12 +33,12 @@ func (m *SigningMethodEd25519) Alg() string { // Verify implements token verification for the SigningMethod. // For this verify method, key must be an ed25519.PublicKey -func (m *SigningMethodEd25519) Verify(signingString string, sig []byte, key interface{}) error { +func (m *SigningMethodEd25519) Verify(signingString string, sig []byte, key any) error { var ed25519Key ed25519.PublicKey var ok bool if ed25519Key, ok = key.(ed25519.PublicKey); !ok { - return ErrInvalidKeyType + return newError("Ed25519 verify expects ed25519.PublicKey", ErrInvalidKeyType) } if len(ed25519Key) != ed25519.PublicKeySize { @@ -56,12 +55,12 @@ func (m *SigningMethodEd25519) Verify(signingString string, sig []byte, key inte // Sign implements token signing for the SigningMethod. // For this signing method, key must be an ed25519.PrivateKey -func (m *SigningMethodEd25519) Sign(signingString string, key interface{}) ([]byte, error) { +func (m *SigningMethodEd25519) Sign(signingString string, key any) ([]byte, error) { var ed25519Key crypto.Signer var ok bool if ed25519Key, ok = key.(crypto.Signer); !ok { - return nil, ErrInvalidKeyType + return nil, newError("Ed25519 sign expects crypto.Signer", ErrInvalidKeyType) } if _, ok := ed25519Key.Public().(ed25519.PublicKey); !ok { diff --git a/vendor/github.com/golang-jwt/jwt/v5/ed25519_utils.go b/vendor/github.com/golang-jwt/jwt/v5/ed25519_utils.go index cdb5e68e..6f46e886 100644 --- a/vendor/github.com/golang-jwt/jwt/v5/ed25519_utils.go +++ b/vendor/github.com/golang-jwt/jwt/v5/ed25519_utils.go @@ -24,7 +24,7 @@ func ParseEdPrivateKeyFromPEM(key []byte) (crypto.PrivateKey, error) { } // Parse the key - var parsedKey interface{} + var parsedKey any if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { return nil, err } @@ -49,7 +49,7 @@ func ParseEdPublicKeyFromPEM(key []byte) (crypto.PublicKey, error) { } // Parse the key - var parsedKey interface{} + var parsedKey any if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { return nil, err } diff --git a/vendor/github.com/golang-jwt/jwt/v5/errors.go b/vendor/github.com/golang-jwt/jwt/v5/errors.go index 23bb616d..14e00751 100644 --- a/vendor/github.com/golang-jwt/jwt/v5/errors.go +++ b/vendor/github.com/golang-jwt/jwt/v5/errors.go @@ -2,6 +2,7 @@ package jwt import ( "errors" + "fmt" "strings" ) @@ -47,3 +48,42 @@ func joinErrors(errs ...error) error { errs: errs, } } + +// Unwrap implements the multiple error unwrapping for this error type, which is +// possible in Go 1.20. +func (je joinedError) Unwrap() []error { + return je.errs +} + +// newError creates a new error message with a detailed error message. The +// message will be prefixed with the contents of the supplied error type. +// Additionally, more errors, that provide more context can be supplied which +// will be appended to the message. This makes use of Go 1.20's possibility to +// include more than one %w formatting directive in [fmt.Errorf]. +// +// For example, +// +// newError("no keyfunc was provided", ErrTokenUnverifiable) +// +// will produce the error string +// +// "token is unverifiable: no keyfunc was provided" +func newError(message string, err error, more ...error) error { + var format string + var args []any + if message != "" { + format = "%w: %s" + args = []any{err, message} + } else { + format = "%w" + args = []any{err} + } + + for _, e := range more { + format += ": %w" + args = append(args, e) + } + + err = fmt.Errorf(format, args...) + return err +} diff --git a/vendor/github.com/golang-jwt/jwt/v5/errors_go1_20.go b/vendor/github.com/golang-jwt/jwt/v5/errors_go1_20.go deleted file mode 100644 index a893d355..00000000 --- a/vendor/github.com/golang-jwt/jwt/v5/errors_go1_20.go +++ /dev/null @@ -1,47 +0,0 @@ -//go:build go1.20 -// +build go1.20 - -package jwt - -import ( - "fmt" -) - -// Unwrap implements the multiple error unwrapping for this error type, which is -// possible in Go 1.20. -func (je joinedError) Unwrap() []error { - return je.errs -} - -// newError creates a new error message with a detailed error message. The -// message will be prefixed with the contents of the supplied error type. -// Additionally, more errors, that provide more context can be supplied which -// will be appended to the message. This makes use of Go 1.20's possibility to -// include more than one %w formatting directive in [fmt.Errorf]. -// -// For example, -// -// newError("no keyfunc was provided", ErrTokenUnverifiable) -// -// will produce the error string -// -// "token is unverifiable: no keyfunc was provided" -func newError(message string, err error, more ...error) error { - var format string - var args []any - if message != "" { - format = "%w: %s" - args = []any{err, message} - } else { - format = "%w" - args = []any{err} - } - - for _, e := range more { - format += ": %w" - args = append(args, e) - } - - err = fmt.Errorf(format, args...) - return err -} diff --git a/vendor/github.com/golang-jwt/jwt/v5/errors_go_other.go b/vendor/github.com/golang-jwt/jwt/v5/errors_go_other.go deleted file mode 100644 index 3afb04e6..00000000 --- a/vendor/github.com/golang-jwt/jwt/v5/errors_go_other.go +++ /dev/null @@ -1,78 +0,0 @@ -//go:build !go1.20 -// +build !go1.20 - -package jwt - -import ( - "errors" - "fmt" -) - -// Is implements checking for multiple errors using [errors.Is], since multiple -// error unwrapping is not possible in versions less than Go 1.20. -func (je joinedError) Is(err error) bool { - for _, e := range je.errs { - if errors.Is(e, err) { - return true - } - } - - return false -} - -// wrappedErrors is a workaround for wrapping multiple errors in environments -// where Go 1.20 is not available. It basically uses the already implemented -// functionatlity of joinedError to handle multiple errors with supplies a -// custom error message that is identical to the one we produce in Go 1.20 using -// multiple %w directives. -type wrappedErrors struct { - msg string - joinedError -} - -// Error returns the stored error string -func (we wrappedErrors) Error() string { - return we.msg -} - -// newError creates a new error message with a detailed error message. The -// message will be prefixed with the contents of the supplied error type. -// Additionally, more errors, that provide more context can be supplied which -// will be appended to the message. Since we cannot use of Go 1.20's possibility -// to include more than one %w formatting directive in [fmt.Errorf], we have to -// emulate that. -// -// For example, -// -// newError("no keyfunc was provided", ErrTokenUnverifiable) -// -// will produce the error string -// -// "token is unverifiable: no keyfunc was provided" -func newError(message string, err error, more ...error) error { - // We cannot wrap multiple errors here with %w, so we have to be a little - // bit creative. Basically, we are using %s instead of %w to produce the - // same error message and then throw the result into a custom error struct. - var format string - var args []any - if message != "" { - format = "%s: %s" - args = []any{err, message} - } else { - format = "%s" - args = []any{err} - } - errs := []error{err} - - for _, e := range more { - format += ": %s" - args = append(args, e) - errs = append(errs, e) - } - - err = &wrappedErrors{ - msg: fmt.Sprintf(format, args...), - joinedError: joinedError{errs: errs}, - } - return err -} diff --git a/vendor/github.com/golang-jwt/jwt/v5/hmac.go b/vendor/github.com/golang-jwt/jwt/v5/hmac.go index 91b688ba..1bef138c 100644 --- a/vendor/github.com/golang-jwt/jwt/v5/hmac.go +++ b/vendor/github.com/golang-jwt/jwt/v5/hmac.go @@ -55,11 +55,11 @@ func (m *SigningMethodHMAC) Alg() string { // about this, and why we intentionally are not supporting string as a key can // be found on our usage guide // https://golang-jwt.github.io/jwt/usage/signing_methods/#signing-methods-and-key-types. -func (m *SigningMethodHMAC) Verify(signingString string, sig []byte, key interface{}) error { +func (m *SigningMethodHMAC) Verify(signingString string, sig []byte, key any) error { // Verify the key is the right type keyBytes, ok := key.([]byte) if !ok { - return ErrInvalidKeyType + return newError("HMAC verify expects []byte", ErrInvalidKeyType) } // Can we use the specified hashing method? @@ -88,7 +88,7 @@ func (m *SigningMethodHMAC) Verify(signingString string, sig []byte, key interfa // cryptographically random source, e.g. crypto/rand. Additional information // about this, and why we intentionally are not supporting string as a key can // be found on our usage guide https://golang-jwt.github.io/jwt/usage/signing_methods/. -func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) ([]byte, error) { +func (m *SigningMethodHMAC) Sign(signingString string, key any) ([]byte, error) { if keyBytes, ok := key.([]byte); ok { if !m.Hash.Available() { return nil, ErrHashUnavailable @@ -100,5 +100,5 @@ func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) ([]byte, return hasher.Sum(nil), nil } - return nil, ErrInvalidKeyType + return nil, newError("HMAC sign expects []byte", ErrInvalidKeyType) } diff --git a/vendor/github.com/golang-jwt/jwt/v5/map_claims.go b/vendor/github.com/golang-jwt/jwt/v5/map_claims.go index b2b51a1f..3b920527 100644 --- a/vendor/github.com/golang-jwt/jwt/v5/map_claims.go +++ b/vendor/github.com/golang-jwt/jwt/v5/map_claims.go @@ -5,9 +5,9 @@ import ( "fmt" ) -// MapClaims is a claims type that uses the map[string]interface{} for JSON +// MapClaims is a claims type that uses the map[string]any for JSON // decoding. This is the default claims type if you don't supply one -type MapClaims map[string]interface{} +type MapClaims map[string]any // GetExpirationTime implements the Claims interface. func (m MapClaims) GetExpirationTime() (*NumericDate, error) { @@ -73,7 +73,7 @@ func (m MapClaims) parseClaimsString(key string) (ClaimStrings, error) { cs = append(cs, v) case []string: cs = v - case []interface{}: + case []any: for _, a := range v { vs, ok := a.(string) if !ok { @@ -92,7 +92,7 @@ func (m MapClaims) parseClaimsString(key string) (ClaimStrings, error) { func (m MapClaims) parseString(key string) (string, error) { var ( ok bool - raw interface{} + raw any iss string ) raw, ok = m[key] diff --git a/vendor/github.com/golang-jwt/jwt/v5/none.go b/vendor/github.com/golang-jwt/jwt/v5/none.go index c93daa58..624ad55e 100644 --- a/vendor/github.com/golang-jwt/jwt/v5/none.go +++ b/vendor/github.com/golang-jwt/jwt/v5/none.go @@ -25,14 +25,14 @@ func (m *signingMethodNone) Alg() string { } // Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key -func (m *signingMethodNone) Verify(signingString string, sig []byte, key interface{}) (err error) { +func (m *signingMethodNone) Verify(signingString string, sig []byte, key any) (err error) { // Key must be UnsafeAllowNoneSignatureType to prevent accidentally // accepting 'none' signing method if _, ok := key.(unsafeNoneMagicConstant); !ok { return NoneSignatureTypeDisallowedError } // If signing method is none, signature must be an empty string - if string(sig) != "" { + if len(sig) != 0 { return newError("'none' signing method with non-empty signature", ErrTokenUnverifiable) } @@ -41,7 +41,7 @@ func (m *signingMethodNone) Verify(signingString string, sig []byte, key interfa } // Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key -func (m *signingMethodNone) Sign(signingString string, key interface{}) ([]byte, error) { +func (m *signingMethodNone) Sign(signingString string, key any) ([]byte, error) { if _, ok := key.(unsafeNoneMagicConstant); ok { return []byte{}, nil } diff --git a/vendor/github.com/golang-jwt/jwt/v5/parser.go b/vendor/github.com/golang-jwt/jwt/v5/parser.go index f4386fba..054c7eb6 100644 --- a/vendor/github.com/golang-jwt/jwt/v5/parser.go +++ b/vendor/github.com/golang-jwt/jwt/v5/parser.go @@ -8,6 +8,8 @@ import ( "strings" ) +const tokenDelimiter = "." + type Parser struct { // If populated, only these methods will be considered valid. validMethods []string @@ -18,7 +20,7 @@ type Parser struct { // Skip claims validation during token parsing. skipClaimsValidation bool - validator *validator + validator *Validator decodeStrict bool @@ -28,7 +30,7 @@ type Parser struct { // NewParser creates a new Parser with the specified options func NewParser(options ...ParserOption) *Parser { p := &Parser{ - validator: &validator{}, + validator: &Validator{}, } // Loop through our parsing options and apply them @@ -74,24 +76,40 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf } } - // Lookup key - var key interface{} - if keyFunc == nil { - // keyFunc was not provided. short circuiting validation - return token, newError("no keyfunc was provided", ErrTokenUnverifiable) - } - if key, err = keyFunc(token); err != nil { - return token, newError("error while executing keyfunc", ErrTokenUnverifiable, err) - } - // Decode signature token.Signature, err = p.DecodeSegment(parts[2]) if err != nil { return token, newError("could not base64 decode signature", ErrTokenMalformed, err) } + text := strings.Join(parts[0:2], ".") - // Perform signature validation - if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil { + // Lookup key(s) + if keyFunc == nil { + // keyFunc was not provided. short circuiting validation + return token, newError("no keyfunc was provided", ErrTokenUnverifiable) + } + + got, err := keyFunc(token) + if err != nil { + return token, newError("error while executing keyfunc", ErrTokenUnverifiable, err) + } + + switch have := got.(type) { + case VerificationKeySet: + if len(have.Keys) == 0 { + return token, newError("keyfunc returned empty verification key set", ErrTokenUnverifiable) + } + // Iterate through keys and verify signature, skipping the rest when a match is found. + // Return the last error if no match is found. + for _, key := range have.Keys { + if err = token.Method.Verify(text, token.Signature, key); err == nil { + break + } + } + default: + err = token.Method.Verify(text, token.Signature, have) + } + if err != nil { return token, newError("", ErrTokenSignatureInvalid, err) } @@ -99,7 +117,7 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf if !p.skipClaimsValidation { // Make sure we have at least a default validator if p.validator == nil { - p.validator = newValidator() + p.validator = NewValidator() } if err := p.validator.Validate(claims); err != nil { @@ -117,12 +135,13 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf // // WARNING: Don't use this method unless you know what you're doing. // -// It's only ever useful in cases where you know the signature is valid (because it has -// been checked previously in the stack) and you want to extract values from it. +// It's only ever useful in cases where you know the signature is valid (since it has already +// been or will be checked elsewhere in the stack) and you want to extract values from it. func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) { - parts = strings.Split(tokenString, ".") - if len(parts) != 3 { - return nil, parts, newError("token contains an invalid number of segments", ErrTokenMalformed) + var ok bool + parts, ok = splitToken(tokenString) + if !ok { + return nil, nil, newError("token contains an invalid number of segments", ErrTokenMalformed) } token = &Token{Raw: tokenString} @@ -130,9 +149,6 @@ func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Toke // parse Header var headerBytes []byte if headerBytes, err = p.DecodeSegment(parts[0]); err != nil { - if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") { - return token, parts, newError("tokenstring should not contain 'bearer '", ErrTokenMalformed) - } return token, parts, newError("could not base64 decode header", ErrTokenMalformed, err) } if err = json.Unmarshal(headerBytes, &token.Header); err != nil { @@ -140,23 +156,33 @@ func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Toke } // parse Claims - var claimBytes []byte token.Claims = claims - if claimBytes, err = p.DecodeSegment(parts[1]); err != nil { + claimBytes, err := p.DecodeSegment(parts[1]) + if err != nil { return token, parts, newError("could not base64 decode claim", ErrTokenMalformed, err) } - dec := json.NewDecoder(bytes.NewBuffer(claimBytes)) - if p.useJSONNumber { - dec.UseNumber() - } - // JSON Decode. Special case for map type to avoid weird pointer behavior - if c, ok := token.Claims.(MapClaims); ok { - err = dec.Decode(&c) + + // If `useJSONNumber` is enabled then we must use *json.Decoder to decode + // the claims. However, this comes with a performance penalty so only use + // it if we must and, otherwise, simple use json.Unmarshal. + if !p.useJSONNumber { + // JSON Unmarshal. Special case for map type to avoid weird pointer behavior. + if c, ok := token.Claims.(MapClaims); ok { + err = json.Unmarshal(claimBytes, &c) + } else { + err = json.Unmarshal(claimBytes, &claims) + } } else { - err = dec.Decode(&claims) + dec := json.NewDecoder(bytes.NewBuffer(claimBytes)) + dec.UseNumber() + // JSON Decode. Special case for map type to avoid weird pointer behavior. + if c, ok := token.Claims.(MapClaims); ok { + err = dec.Decode(&c) + } else { + err = dec.Decode(&claims) + } } - // Handle decode error if err != nil { return token, parts, newError("could not JSON decode claim", ErrTokenMalformed, err) } @@ -173,6 +199,33 @@ func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Toke return token, parts, nil } +// splitToken splits a token string into three parts: header, claims, and signature. It will only +// return true if the token contains exactly two delimiters and three parts. In all other cases, it +// will return nil parts and false. +func splitToken(token string) ([]string, bool) { + parts := make([]string, 3) + header, remain, ok := strings.Cut(token, tokenDelimiter) + if !ok { + return nil, false + } + parts[0] = header + claims, remain, ok := strings.Cut(remain, tokenDelimiter) + if !ok { + return nil, false + } + parts[1] = claims + // One more cut to ensure the signature is the last part of the token and there are no more + // delimiters. This avoids an issue where malicious input could contain additional delimiters + // causing unecessary overhead parsing tokens. + signature, _, unexpected := strings.Cut(remain, tokenDelimiter) + if unexpected { + return nil, false + } + parts[2] = signature + + return parts, true +} + // DecodeSegment decodes a JWT specific base64url encoding. This function will // take into account whether the [Parser] is configured with additional options, // such as [WithStrictDecoding] or [WithPaddingAllowed]. diff --git a/vendor/github.com/golang-jwt/jwt/v5/parser_option.go b/vendor/github.com/golang-jwt/jwt/v5/parser_option.go index 1b5af970..43157355 100644 --- a/vendor/github.com/golang-jwt/jwt/v5/parser_option.go +++ b/vendor/github.com/golang-jwt/jwt/v5/parser_option.go @@ -58,20 +58,45 @@ func WithIssuedAt() ParserOption { } } -// WithAudience configures the validator to require the specified audience in -// the `aud` claim. Validation will fail if the audience is not listed in the -// token or the `aud` claim is missing. +// WithExpirationRequired returns the ParserOption to make exp claim required. +// By default exp claim is optional. +func WithExpirationRequired() ParserOption { + return func(p *Parser) { + p.validator.requireExp = true + } +} + +// WithAudience configures the validator to require any of the specified +// audiences in the `aud` claim. Validation will fail if the audience is not +// listed in the token or the `aud` claim is missing. // // NOTE: While the `aud` claim is OPTIONAL in a JWT, the handling of it is // application-specific. Since this validation API is helping developers in // writing secure application, we decided to REQUIRE the existence of the claim, // if an audience is expected. -func WithAudience(aud string) ParserOption { +func WithAudience(aud ...string) ParserOption { return func(p *Parser) { p.validator.expectedAud = aud } } +// WithAllAudiences configures the validator to require all the specified +// audiences in the `aud` claim. Validation will fail if the specified audiences +// are not listed in the token or the `aud` claim is missing. Duplicates within +// the list are de-duplicated since internally, we use a map to look up the +// audiences. +// +// NOTE: While the `aud` claim is OPTIONAL in a JWT, the handling of it is +// application-specific. Since this validation API is helping developers in +// writing secure application, we decided to REQUIRE the existence of the claim, +// if an audience is expected. +func WithAllAudiences(aud ...string) ParserOption { + return func(p *Parser) { + p.validator.expectedAud = aud + p.validator.expectAllAud = true + } +} + // WithIssuer configures the validator to require the specified issuer in the // `iss` claim. Validation will fail if a different issuer is specified in the // token or the `iss` claim is missing. diff --git a/vendor/github.com/golang-jwt/jwt/v5/rsa.go b/vendor/github.com/golang-jwt/jwt/v5/rsa.go index daff0943..98b960a7 100644 --- a/vendor/github.com/golang-jwt/jwt/v5/rsa.go +++ b/vendor/github.com/golang-jwt/jwt/v5/rsa.go @@ -46,12 +46,12 @@ func (m *SigningMethodRSA) Alg() string { // Verify implements token verification for the SigningMethod // For this signing method, must be an *rsa.PublicKey structure. -func (m *SigningMethodRSA) Verify(signingString string, sig []byte, key interface{}) error { +func (m *SigningMethodRSA) Verify(signingString string, sig []byte, key any) error { var rsaKey *rsa.PublicKey var ok bool if rsaKey, ok = key.(*rsa.PublicKey); !ok { - return ErrInvalidKeyType + return newError("RSA verify expects *rsa.PublicKey", ErrInvalidKeyType) } // Create hasher @@ -67,13 +67,13 @@ func (m *SigningMethodRSA) Verify(signingString string, sig []byte, key interfac // Sign implements token signing for the SigningMethod // For this signing method, must be an *rsa.PrivateKey structure. -func (m *SigningMethodRSA) Sign(signingString string, key interface{}) ([]byte, error) { +func (m *SigningMethodRSA) Sign(signingString string, key any) ([]byte, error) { var rsaKey *rsa.PrivateKey var ok bool // Validate type of key if rsaKey, ok = key.(*rsa.PrivateKey); !ok { - return nil, ErrInvalidKey + return nil, newError("RSA sign expects *rsa.PrivateKey", ErrInvalidKeyType) } // Create the hasher diff --git a/vendor/github.com/golang-jwt/jwt/v5/rsa_pss.go b/vendor/github.com/golang-jwt/jwt/v5/rsa_pss.go index 9599f0a4..f17590cc 100644 --- a/vendor/github.com/golang-jwt/jwt/v5/rsa_pss.go +++ b/vendor/github.com/golang-jwt/jwt/v5/rsa_pss.go @@ -1,6 +1,3 @@ -//go:build go1.4 -// +build go1.4 - package jwt import ( @@ -82,13 +79,13 @@ func init() { // Verify implements token verification for the SigningMethod. // For this verify method, key must be an rsa.PublicKey struct -func (m *SigningMethodRSAPSS) Verify(signingString string, sig []byte, key interface{}) error { +func (m *SigningMethodRSAPSS) Verify(signingString string, sig []byte, key any) error { var rsaKey *rsa.PublicKey switch k := key.(type) { case *rsa.PublicKey: rsaKey = k default: - return ErrInvalidKey + return newError("RSA-PSS verify expects *rsa.PublicKey", ErrInvalidKeyType) } // Create hasher @@ -108,14 +105,14 @@ func (m *SigningMethodRSAPSS) Verify(signingString string, sig []byte, key inter // Sign implements token signing for the SigningMethod. // For this signing method, key must be an rsa.PrivateKey struct -func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) ([]byte, error) { +func (m *SigningMethodRSAPSS) Sign(signingString string, key any) ([]byte, error) { var rsaKey *rsa.PrivateKey switch k := key.(type) { case *rsa.PrivateKey: rsaKey = k default: - return nil, ErrInvalidKeyType + return nil, newError("RSA-PSS sign expects *rsa.PrivateKey", ErrInvalidKeyType) } // Create the hasher diff --git a/vendor/github.com/golang-jwt/jwt/v5/rsa_utils.go b/vendor/github.com/golang-jwt/jwt/v5/rsa_utils.go index b3aeebbe..f22c3d06 100644 --- a/vendor/github.com/golang-jwt/jwt/v5/rsa_utils.go +++ b/vendor/github.com/golang-jwt/jwt/v5/rsa_utils.go @@ -23,7 +23,7 @@ func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) { return nil, ErrKeyMustBePEMEncoded } - var parsedKey interface{} + var parsedKey any if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil { if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { return nil, err @@ -53,7 +53,7 @@ func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.Pr return nil, ErrKeyMustBePEMEncoded } - var parsedKey interface{} + var parsedKey any var blockDecrypted []byte if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil { @@ -86,7 +86,7 @@ func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) { } // Parse the key - var parsedKey interface{} + var parsedKey any if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { if cert, err := x509.ParseCertificate(block.Bytes); err == nil { parsedKey = cert.PublicKey diff --git a/vendor/github.com/golang-jwt/jwt/v5/signing_method.go b/vendor/github.com/golang-jwt/jwt/v5/signing_method.go index 0d73631c..096d0ed4 100644 --- a/vendor/github.com/golang-jwt/jwt/v5/signing_method.go +++ b/vendor/github.com/golang-jwt/jwt/v5/signing_method.go @@ -12,9 +12,9 @@ var signingMethodLock = new(sync.RWMutex) // signature in Sign. The signature is then usually base64 encoded as part of a // JWT. type SigningMethod interface { - Verify(signingString string, sig []byte, key interface{}) error // Returns nil if signature is valid - Sign(signingString string, key interface{}) ([]byte, error) // Returns signature or error - Alg() string // returns the alg identifier for this method (example: 'HS256') + Verify(signingString string, sig []byte, key any) error // Returns nil if signature is valid + Sign(signingString string, key any) ([]byte, error) // Returns signature or error + Alg() string // returns the alg identifier for this method (example: 'HS256') } // RegisterSigningMethod registers the "alg" name and a factory function for signing method. diff --git a/vendor/github.com/golang-jwt/jwt/v5/token.go b/vendor/github.com/golang-jwt/jwt/v5/token.go index c8ad7c78..3f715588 100644 --- a/vendor/github.com/golang-jwt/jwt/v5/token.go +++ b/vendor/github.com/golang-jwt/jwt/v5/token.go @@ -1,6 +1,7 @@ package jwt import ( + "crypto" "encoding/base64" "encoding/json" ) @@ -9,17 +10,30 @@ import ( // the key for verification. The function receives the parsed, but unverified // Token. This allows you to use properties in the Header of the token (such as // `kid`) to identify which key to use. -type Keyfunc func(*Token) (interface{}, error) +// +// The returned any may be a single key or a VerificationKeySet containing +// multiple keys. +type Keyfunc func(*Token) (any, error) + +// VerificationKey represents a public or secret key for verifying a token's signature. +type VerificationKey interface { + crypto.PublicKey | []uint8 +} + +// VerificationKeySet is a set of public or secret keys. It is used by the parser to verify a token. +type VerificationKeySet struct { + Keys []VerificationKey +} // Token represents a JWT Token. Different fields will be used depending on // whether you're creating or parsing/verifying a token. type Token struct { - Raw string // Raw contains the raw token. Populated when you [Parse] a token - Method SigningMethod // Method is the signing method used or to be used - Header map[string]interface{} // Header is the first segment of the token in decoded form - Claims Claims // Claims is the second segment of the token in decoded form - Signature []byte // Signature is the third segment of the token in decoded form. Populated when you Parse a token - Valid bool // Valid specifies if the token is valid. Populated when you Parse/Verify a token + Raw string // Raw contains the raw token. Populated when you [Parse] a token + Method SigningMethod // Method is the signing method used or to be used + Header map[string]any // Header is the first segment of the token in decoded form + Claims Claims // Claims is the second segment of the token in decoded form + Signature []byte // Signature is the third segment of the token in decoded form. Populated when you Parse a token + Valid bool // Valid specifies if the token is valid. Populated when you Parse/Verify a token } // New creates a new [Token] with the specified signing method and an empty map @@ -32,7 +46,7 @@ func New(method SigningMethod, opts ...TokenOption) *Token { // claims. Additional options can be specified, but are currently unused. func NewWithClaims(method SigningMethod, claims Claims, opts ...TokenOption) *Token { return &Token{ - Header: map[string]interface{}{ + Header: map[string]any{ "typ": "JWT", "alg": method.Alg(), }, @@ -46,7 +60,7 @@ func NewWithClaims(method SigningMethod, claims Claims, opts ...TokenOption) *To // https://golang-jwt.github.io/jwt/usage/signing_methods/#signing-methods-and-key-types // for an overview of the different signing methods and their respective key // types. -func (t *Token) SignedString(key interface{}) (string, error) { +func (t *Token) SignedString(key any) (string, error) { sstr, err := t.SigningString() if err != nil { return "", err @@ -61,7 +75,7 @@ func (t *Token) SignedString(key interface{}) (string, error) { } // SigningString generates the signing string. This is the most expensive part -// of the whole deal. Unless you need this for something special, just go +// of the whole deal. Unless you need this for something special, just go // straight for the SignedString. func (t *Token) SigningString() (string, error) { h, err := json.Marshal(t.Header) diff --git a/vendor/github.com/golang-jwt/jwt/v5/types.go b/vendor/github.com/golang-jwt/jwt/v5/types.go index b82b3886..a3e0ef12 100644 --- a/vendor/github.com/golang-jwt/jwt/v5/types.go +++ b/vendor/github.com/golang-jwt/jwt/v5/types.go @@ -4,7 +4,6 @@ import ( "encoding/json" "fmt" "math" - "reflect" "strconv" "time" ) @@ -104,7 +103,7 @@ func (date *NumericDate) UnmarshalJSON(b []byte) (err error) { type ClaimStrings []string func (s *ClaimStrings) UnmarshalJSON(data []byte) (err error) { - var value interface{} + var value any if err = json.Unmarshal(data, &value); err != nil { return err @@ -117,18 +116,18 @@ func (s *ClaimStrings) UnmarshalJSON(data []byte) (err error) { aud = append(aud, v) case []string: aud = ClaimStrings(v) - case []interface{}: + case []any: for _, vv := range v { vs, ok := vv.(string) if !ok { - return &json.UnsupportedTypeError{Type: reflect.TypeOf(vv)} + return ErrInvalidType } aud = append(aud, vs) } case nil: return nil default: - return &json.UnsupportedTypeError{Type: reflect.TypeOf(v)} + return ErrInvalidType } *s = aud diff --git a/vendor/github.com/golang-jwt/jwt/v5/validator.go b/vendor/github.com/golang-jwt/jwt/v5/validator.go index 38504389..92b5c057 100644 --- a/vendor/github.com/golang-jwt/jwt/v5/validator.go +++ b/vendor/github.com/golang-jwt/jwt/v5/validator.go @@ -1,8 +1,8 @@ package jwt import ( - "crypto/subtle" "fmt" + "slices" "time" ) @@ -28,13 +28,12 @@ type ClaimsValidator interface { Validate() error } -// validator is the core of the new Validation API. It is automatically used by +// Validator is the core of the new Validation API. It is automatically used by // a [Parser] during parsing and can be modified with various parser options. // -// Note: This struct is intentionally not exported (yet) as we want to -// internally finalize its API. In the future, we might make it publicly -// available. -type validator struct { +// The [NewValidator] function should be used to create an instance of this +// struct. +type Validator struct { // leeway is an optional leeway that can be provided to account for clock skew. leeway time.Duration @@ -42,6 +41,9 @@ type validator struct { // validation. If unspecified, this defaults to time.Now. timeFunc func() time.Time + // requireExp specifies whether the exp claim is required + requireExp bool + // verifyIat specifies whether the iat (Issued At) claim will be verified. // According to https://www.rfc-editor.org/rfc/rfc7519#section-4.1.6 this // only specifies the age of the token, but no validation check is @@ -50,8 +52,12 @@ type validator struct { verifyIat bool // expectedAud contains the audience this token expects. Supplying an empty - // string will disable aud checking. - expectedAud string + // slice will disable aud checking. + expectedAud []string + + // expectAllAud specifies whether all expected audiences must be present in + // the token. If false, only one of the expected audiences must be present. + expectAllAud bool // expectedIss contains the issuer this token expects. Supplying an empty // string will disable iss checking. @@ -62,19 +68,31 @@ type validator struct { expectedSub string } -// newValidator can be used to create a stand-alone validator with the supplied +// NewValidator can be used to create a stand-alone validator with the supplied // options. This validator can then be used to validate already parsed claims. -func newValidator(opts ...ParserOption) *validator { +// +// Note: Under normal circumstances, explicitly creating a validator is not +// needed and can potentially be dangerous; instead functions of the [Parser] +// class should be used. +// +// The [Validator] is only checking the *validity* of the claims, such as its +// expiration time, but it does NOT perform *signature verification* of the +// token. +func NewValidator(opts ...ParserOption) *Validator { p := NewParser(opts...) return p.validator } // Validate validates the given claims. It will also perform any custom // validation if claims implements the [ClaimsValidator] interface. -func (v *validator) Validate(claims Claims) error { +// +// Note: It will NOT perform any *signature verification* on the token that +// contains the claims and expects that the [Claim] was already successfully +// verified. +func (v *Validator) Validate(claims Claims) error { var ( now time.Time - errs []error = make([]error, 0, 6) + errs = make([]error, 0, 6) err error ) @@ -86,8 +104,9 @@ func (v *validator) Validate(claims Claims) error { } // We always need to check the expiration time, but usage of the claim - // itself is OPTIONAL. - if err = v.verifyExpiresAt(claims, now, false); err != nil { + // itself is OPTIONAL by default. requireExp overrides this behavior + // and makes the exp claim mandatory. + if err = v.verifyExpiresAt(claims, now, v.requireExp); err != nil { errs = append(errs, err) } @@ -105,8 +124,8 @@ func (v *validator) Validate(claims Claims) error { } // If we have an expected audience, we also require the audience claim - if v.expectedAud != "" { - if err = v.verifyAudience(claims, v.expectedAud, true); err != nil { + if len(v.expectedAud) > 0 { + if err = v.verifyAudience(claims, v.expectedAud, v.expectAllAud); err != nil { errs = append(errs, err) } } @@ -149,7 +168,7 @@ func (v *validator) Validate(claims Claims) error { // // Additionally, if any error occurs while retrieving the claim, e.g., when its // the wrong type, an ErrTokenUnverifiable error will be returned. -func (v *validator) verifyExpiresAt(claims Claims, cmp time.Time, required bool) error { +func (v *Validator) verifyExpiresAt(claims Claims, cmp time.Time, required bool) error { exp, err := claims.GetExpirationTime() if err != nil { return err @@ -170,7 +189,7 @@ func (v *validator) verifyExpiresAt(claims Claims, cmp time.Time, required bool) // // Additionally, if any error occurs while retrieving the claim, e.g., when its // the wrong type, an ErrTokenUnverifiable error will be returned. -func (v *validator) verifyIssuedAt(claims Claims, cmp time.Time, required bool) error { +func (v *Validator) verifyIssuedAt(claims Claims, cmp time.Time, required bool) error { iat, err := claims.GetIssuedAt() if err != nil { return err @@ -191,7 +210,7 @@ func (v *validator) verifyIssuedAt(claims Claims, cmp time.Time, required bool) // // Additionally, if any error occurs while retrieving the claim, e.g., when its // the wrong type, an ErrTokenUnverifiable error will be returned. -func (v *validator) verifyNotBefore(claims Claims, cmp time.Time, required bool) error { +func (v *Validator) verifyNotBefore(claims Claims, cmp time.Time, required bool) error { nbf, err := claims.GetNotBefore() if err != nil { return err @@ -211,33 +230,39 @@ func (v *validator) verifyNotBefore(claims Claims, cmp time.Time, required bool) // // Additionally, if any error occurs while retrieving the claim, e.g., when its // the wrong type, an ErrTokenUnverifiable error will be returned. -func (v *validator) verifyAudience(claims Claims, cmp string, required bool) error { +func (v *Validator) verifyAudience(claims Claims, cmp []string, expectAllAud bool) error { aud, err := claims.GetAudience() if err != nil { return err } - if len(aud) == 0 { + // Check that aud exists and is not empty. We only require the aud claim + // if we expect at least one audience to be present. + if len(aud) == 0 || len(aud) == 1 && aud[0] == "" { + required := len(v.expectedAud) > 0 return errorIfRequired(required, "aud") } - // use a var here to keep constant time compare when looping over a number of claims - result := false - - var stringClaims string - for _, a := range aud { - if subtle.ConstantTimeCompare([]byte(a), []byte(cmp)) != 0 { - result = true + if !expectAllAud { + for _, a := range aud { + // If we only expect one match, we can stop early if we find a match + if slices.Contains(cmp, a) { + return nil + } } - stringClaims = stringClaims + a + + return ErrTokenInvalidAudience } - // case where "" is sent in one or many aud claims - if stringClaims == "" { - return errorIfRequired(required, "aud") + // Note that we are looping cmp here to ensure that all expected audiences + // are present in the aud claim. + for _, a := range cmp { + if !slices.Contains(aud, a) { + return ErrTokenInvalidAudience + } } - return errorIfFalse(result, ErrTokenInvalidAudience) + return nil } // verifyIssuer compares the iss claim in claims against cmp. @@ -247,7 +272,7 @@ func (v *validator) verifyAudience(claims Claims, cmp string, required bool) err // // Additionally, if any error occurs while retrieving the claim, e.g., when its // the wrong type, an ErrTokenUnverifiable error will be returned. -func (v *validator) verifyIssuer(claims Claims, cmp string, required bool) error { +func (v *Validator) verifyIssuer(claims Claims, cmp string, required bool) error { iss, err := claims.GetIssuer() if err != nil { return err @@ -267,7 +292,7 @@ func (v *validator) verifyIssuer(claims Claims, cmp string, required bool) error // // Additionally, if any error occurs while retrieving the claim, e.g., when its // the wrong type, an ErrTokenUnverifiable error will be returned. -func (v *validator) verifySubject(claims Claims, cmp string, required bool) error { +func (v *Validator) verifySubject(claims Claims, cmp string, required bool) error { sub, err := claims.GetSubject() if err != nil { return err diff --git a/vendor/github.com/kavenegar/kavenegar-go/message_send.go b/vendor/github.com/kavenegar/kavenegar-go/message_send.go index 0ac75a31..02b0b640 100644 --- a/vendor/github.com/kavenegar/kavenegar-go/message_send.go +++ b/vendor/github.com/kavenegar/kavenegar-go/message_send.go @@ -7,6 +7,7 @@ import ( //Send ... func (m *MessageService) Send(sender string, receptor []string, message string, params *MessageSendParam) ([]Message, error) { v := url.Values{} + v.Set("sender", sender) v.Set("receptor", ToString(receptor)) v.Set("message", message) if params != nil { diff --git a/vendor/github.com/labstack/echo-jwt/v4/CHANGELOG.md b/vendor/github.com/labstack/echo-jwt/v4/CHANGELOG.md index 16f10543..54efdb86 100644 --- a/vendor/github.com/labstack/echo-jwt/v4/CHANGELOG.md +++ b/vendor/github.com/labstack/echo-jwt/v4/CHANGELOG.md @@ -1,5 +1,37 @@ # Changelog +## v4.4.0 - 2025-11-20 + +**Enhancements** + +* Revert 'Return HTTP status 400 if missing JWT' PR to return 401 [#39](https://github.com/labstack/echo-jwt/pull/39) +* Updated dependencies [#39](https://github.com/labstack/echo-jwt/pull/39) +* Return ErrJWTMissing, ErrJWTInvalid clones so error code could be changed more easily + + +## v4.3.1 - 2025-03-22 + +**Security** + +* update JWT dependencies (https://github.com/advisories/GHSA-mh63-6h87-95cp) by @aldas in [#31](https://github.com/labstack/echo-jwt/pull/31) + + +## v4.3.0 - 2024-12-04 + +**Enhancements** + +* Update Echo dependency to v4.13.0 by @aldas in [#28](https://github.com/labstack/echo-jwt/pull/28) + + +## v4.2.1 - 2024-12-04 + +**Enhancements** + +* Return HTTP status 400 if missing JWT by @kitloong in [#13](https://github.com/labstack/echo-jwt/pull/13) +* Update dependencies and CI flow by @aldas in [#21](https://github.com/labstack/echo-jwt/pull/21), [#24](https://github.com/labstack/echo-jwt/pull/24), [#27](https://github.com/labstack/echo-jwt/pull/27) +* Improve readme formatting by @aldas in [#25](https://github.com/labstack/echo-jwt/pull/25) + + ## v4.2.0 - 2023-01-26 **Breaking change:** [JWT](github.com/golang-jwt/jwt) has been upgraded to `v5`. Check/test all your code involved with JWT tokens/claims. Search for `github.com/golang-jwt/jwt/v4` diff --git a/vendor/github.com/labstack/echo-jwt/v4/Makefile b/vendor/github.com/labstack/echo-jwt/v4/Makefile index 5173a5a5..dad1d894 100644 --- a/vendor/github.com/labstack/echo-jwt/v4/Makefile +++ b/vendor/github.com/labstack/echo-jwt/v4/Makefile @@ -31,6 +31,6 @@ format: ## Format the source code help: ## Display this help screen @grep -h -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' -goversion ?= "1.18" -test_version: ## Run tests inside Docker with given version (defaults to 1.18 oldest supported). Example: make test_version goversion=1.18 +goversion ?= "1.20" +test_version: ## Run tests inside Docker with given version (defaults to 1.20 oldest supported). Example: make test_version goversion=1.20 @docker run --rm -it -v $(shell pwd):/project golang:$(goversion) /bin/sh -c "cd /project && make race" diff --git a/vendor/github.com/labstack/echo-jwt/v4/README.md b/vendor/github.com/labstack/echo-jwt/v4/README.md index 66fedc9c..9c6cc48a 100644 --- a/vendor/github.com/labstack/echo-jwt/v4/README.md +++ b/vendor/github.com/labstack/echo-jwt/v4/README.md @@ -14,6 +14,9 @@ as JWT implementation. This repository does not use semantic versioning. MAJOR version tracks which Echo version should be used. MINOR version tracks API changes (possibly backwards incompatible) and PATCH version is incremented for fixes. +NB: When `golang-jwt` MAJOR version changes this library will release MINOR version with **breaking change**. Always +add at least one integration test in your project. + For Echo `v4` use `v4.x.y` releases. Minimal needed Echo versions: * `v4.0.0` needs Echo `v4.7.0+` @@ -65,6 +68,36 @@ e.GET("/", func(c echo.Context) error { }) ``` +## IMPORTANT: Integration Testing with JWT Library + +Ensure that your project includes at least one integration test to detect changes in major versions of the `golang-jwt/jwt` library early. +This is crucial because type assertions like `token := c.Get("user").(*jwt.Token)` may fail silently if the imported version of the JWT library (e.g., `import "github.com/golang-jwt/jwt/v5"`) differs from the version used internally by dependencies (e.g., echo-jwt may now use `v6`). Such discrepancies can lead to invalid casts, causing your handlers to panic or throw errors. Integration tests help safeguard against these version mismatches. + +```go +func TestIntegrationMiddlewareWithHandler(t *testing.T) { + e := echo.New() + e.Use(echojwt.WithConfig(echojwt.Config{ + SigningKey: []byte("secret"), + })) + + // use handler that gets token from context to fail your CI flow when `golang-jwt/jwt` library version changes + // a) `token, ok := c.Get("user").(*jwt.Token)` + // b) `token := c.Get("user").(*jwt.Token)` + e.GET("/example", exampleHandler) + + req := httptest.NewRequest(http.MethodGet, "/example", nil) + req.Header.Set(echo.HeaderAuthorization, "Bearer ") + res := httptest.NewRecorder() + + e.ServeHTTP(res, req) + + if res.Code != 200 { + t.Failed() + } +} +``` + + ## Full example ```go @@ -114,7 +147,7 @@ curl -v -H "Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiO Output should be ```bash -* Trying 127.0.0.1:8080... +* Trying 127.0.0.1:8080... * Connected to localhost (127.0.0.1) port 8080 (#0) > GET / HTTP/1.1 > Host: localhost:8080 diff --git a/vendor/github.com/labstack/echo-jwt/v4/jwt.go b/vendor/github.com/labstack/echo-jwt/v4/jwt.go index 24f9397f..2b6b48b0 100644 --- a/vendor/github.com/labstack/echo-jwt/v4/jwt.go +++ b/vendor/github.com/labstack/echo-jwt/v4/jwt.go @@ -57,6 +57,7 @@ type Config struct { SigningKeys map[string]interface{} // Signing method used to check the token's signing algorithm. + // SigningMethod is not checked when a user-defined KeyFunc is provided. // Optional. Default value HS256. SigningMethod string @@ -146,8 +147,7 @@ func (e *TokenError) Unwrap() error { return e.Err } // JWT returns a JSON Web Token (JWT) auth middleware. // // For valid token, it sets the user in context and calls next handler. -// For invalid token, it returns "401 - Unauthorized" error. -// For missing token, it returns "400 - Bad Request" error. +// For invalid or missing token, middleware returns "401 - Unauthorized" error. // // See: https://jwt.io/introduction func JWT(signingKey interface{}) echo.MiddlewareFunc { @@ -157,8 +157,7 @@ func JWT(signingKey interface{}) echo.MiddlewareFunc { // WithConfig returns a JSON Web Token (JWT) auth middleware or panics if configuration is invalid. // // For valid token, it sets the user in context and calls next handler. -// For invalid token, it returns "401 - Unauthorized" error. -// For missing token, it returns "400 - Bad Request" error. +// For invalid or missing token, middleware returns "401 - Unauthorized" error. // // See: https://jwt.io/introduction func WithConfig(config Config) echo.MiddlewareFunc { @@ -254,11 +253,11 @@ func (config Config) ToMiddleware() (echo.MiddlewareFunc, error) { return tmpErr } - message := "invalid or expired jwt" if lastTokenErr == nil { - message = "missing or malformed jwt" + return ErrJWTMissing.WithInternal(err) } - return echo.NewHTTPError(http.StatusUnauthorized, message).SetInternal(err) + + return ErrJWTInvalid.WithInternal(err) } }, nil } diff --git a/vendor/github.com/labstack/echo/v4/CHANGELOG.md b/vendor/github.com/labstack/echo/v4/CHANGELOG.md index de3857ff..b7fd0e14 100644 --- a/vendor/github.com/labstack/echo/v4/CHANGELOG.md +++ b/vendor/github.com/labstack/echo/v4/CHANGELOG.md @@ -1,5 +1,293 @@ # Changelog +## v4.15.0 - 2026-01-01 + + +**Security** + +NB: **If your application relies on cross-origin or same-site (same subdomain) requests do not blindly push this version to production** + + +The CSRF middleware now supports the [**Sec-Fetch-Site**](https://developer.mozilla.org/en-US/docs/Web/HTTP/Reference/Headers/Sec-Fetch-Site) header as a modern, defense-in-depth approach to [CSRF +protection](https://cheatsheetseries.owasp.org/cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#fetch-metadata-headers), implementing the OWASP-recommended Fetch Metadata API alongside the traditional token-based mechanism. + +**How it works:** + +Modern browsers automatically send the `Sec-Fetch-Site` header with all requests, indicating the relationship +between the request origin and the target. The middleware uses this to make security decisions: + +- **`same-origin`** or **`none`**: Requests are allowed (exact origin match or direct user navigation) +- **`same-site`**: Falls back to token validation (e.g., subdomain to main domain) +- **`cross-site`**: Blocked by default with 403 error for unsafe methods (POST, PUT, DELETE, PATCH) + +For browsers that don't send this header (older browsers), the middleware seamlessly falls back to +traditional token-based CSRF protection. + +**New Configuration Options:** +- `TrustedOrigins []string`: Allowlist specific origins for cross-site requests (useful for OAuth callbacks, webhooks) +- `AllowSecFetchSiteFunc func(echo.Context) (bool, error)`: Custom logic for same-site/cross-site request validation + +**Example:** + ```go + e.Use(middleware.CSRFWithConfig(middleware.CSRFConfig{ + // Allow OAuth callbacks from trusted provider + TrustedOrigins: []string{"https://oauth-provider.com"}, + + // Custom validation for same-site requests + AllowSecFetchSiteFunc: func(c echo.Context) (bool, error) { + // Your custom authorization logic here + return validateCustomAuth(c), nil + // return true, err // blocks request with error + // return true, nil // allows CSRF request through + // return false, nil // falls back to legacy token logic + }, + })) + ``` +PR: https://github.com/labstack/echo/pull/2858 + +**Type-Safe Generic Parameter Binding** + +* Added generic functions for type-safe parameter extraction and context access by @aldas in https://github.com/labstack/echo/pull/2856 + + Echo now provides generic functions for extracting path, query, and form parameters with automatic type conversion, + eliminating manual string parsing and type assertions. + + **New Functions:** + - Path parameters: `PathParam[T]`, `PathParamOr[T]` + - Query parameters: `QueryParam[T]`, `QueryParamOr[T]`, `QueryParams[T]`, `QueryParamsOr[T]` + - Form values: `FormParam[T]`, `FormParamOr[T]`, `FormParams[T]`, `FormParamsOr[T]` + - Context store: `ContextGet[T]`, `ContextGetOr[T]` + + **Supported Types:** + Primitives (`bool`, `string`, `int`/`uint` variants, `float32`/`float64`), `time.Duration`, `time.Time` + (with custom layouts and Unix timestamp support), and custom types implementing `BindUnmarshaler`, + `TextUnmarshaler`, or `JSONUnmarshaler`. + + **Example:** + ```go + // Before: Manual parsing + idStr := c.Param("id") + id, err := strconv.Atoi(idStr) + + // After: Type-safe with automatic parsing + id, err := echo.PathParam[int](c, "id") + + // With default values + page, err := echo.QueryParamOr[int](c, "page", 1) + limit, err := echo.QueryParamOr[int](c, "limit", 20) + + // Type-safe context access (no more panics from type assertions) + user, err := echo.ContextGet[*User](c, "user") + ``` + +PR: https://github.com/labstack/echo/pull/2856 + + + +**DEPRECATION NOTICE** Timeout Middleware Deprecated - Use ContextTimeout Instead + +The `middleware.Timeout` middleware has been **deprecated** due to fundamental architectural issues that cause +data races. Use `middleware.ContextTimeout` or `middleware.ContextTimeoutWithConfig` instead. + +**Why is this being deprecated?** + +The Timeout middleware manipulates response writers across goroutine boundaries, which causes data races that +cannot be reliably fixed without a complete architectural redesign. The middleware: + +- Swaps the response writer using `http.TimeoutHandler` +- Must be the first middleware in the chain (fragile constraint) +- Can cause races with other middleware (Logger, metrics, custom middleware) +- Has been the source of multiple race condition fixes over the years + +**What should you use instead?** + +The `ContextTimeout` middleware (available since v4.12.0) provides timeout functionality using Go's standard +context mechanism. It is: + +- Race-free by design +- Can be placed anywhere in the middleware chain +- Simpler and more maintainable +- Compatible with all other middleware + +**Migration Guide:** + +```go +// Before (deprecated): +e.Use(middleware.Timeout()) + +// After (recommended): +e.Use(middleware.ContextTimeout(30 * time.Second)) +``` + +**Important Behavioral Differences:** + +1. **Handler cooperation required**: With ContextTimeout, your handlers must check `context.Done()` for cooperative + cancellation. The old Timeout middleware would send a 503 response regardless of handler cooperation, but had + data race issues. + +2. **Error handling**: ContextTimeout returns errors through the standard error handling flow. Handlers that receive + `context.DeadlineExceeded` should handle it appropriately: + +```go +e.GET("/long-task", func(c echo.Context) error { + ctx := c.Request().Context() + + // Example: database query with context + result, err := db.QueryContext(ctx, "SELECT * FROM large_table") + if err != nil { + if errors.Is(err, context.DeadlineExceeded) { + // Handle timeout + return echo.NewHTTPError(http.StatusServiceUnavailable, "Request timeout") + } + return err + } + + return c.JSON(http.StatusOK, result) +}) +``` + +3. **Background tasks**: For long-running background tasks, use goroutines with context: + +```go +e.GET("/async-task", func(c echo.Context) error { + ctx := c.Request().Context() + + resultCh := make(chan Result, 1) + errCh := make(chan error, 1) + + go func() { + result, err := performLongTask(ctx) + if err != nil { + errCh <- err + return + } + resultCh <- result + }() + + select { + case result := <-resultCh: + return c.JSON(http.StatusOK, result) + case err := <-errCh: + return err + case <-ctx.Done(): + return echo.NewHTTPError(http.StatusServiceUnavailable, "Request timeout") + } +}) +``` + +**Enhancements** + +* Fixes by @aldas in https://github.com/labstack/echo/pull/2852 +* Generic functions by @aldas in https://github.com/labstack/echo/pull/2856 +* CRSF with Sec-Fetch-Site checks by @aldas in https://github.com/labstack/echo/pull/2858 + + +## v4.14.0 - 2025-12-11 + +`middleware.Logger` has been deprecated. For request logging, use `middleware.RequestLogger` or +`middleware.RequestLoggerWithConfig`. + +`middleware.RequestLogger` replaces `middleware.Logger`, offering comparable configuration while relying on the +Go standard library’s new `slog` logger. + +The previous default output format was JSON. The new default follows the standard `slog` logger settings. +To continue emitting request logs in JSON, configure `slog` accordingly: +```go +slog.SetDefault(slog.New(slog.NewJSONHandler(os.Stdout, nil))) +e.Use(middleware.RequestLogger()) +``` + + +**Security** + +* Logger middleware json string escaping and deprecation by @aldas in https://github.com/labstack/echo/pull/2849 + + + +**Enhancements** + +* Update deps by @aldas in https://github.com/labstack/echo/pull/2807 +* refactor to use reflect.TypeFor by @cuiweixie in https://github.com/labstack/echo/pull/2812 +* Use Go 1.25 in CI by @aldas in https://github.com/labstack/echo/pull/2810 +* Modernize context.go by replacing interface{} with any by @vishr in https://github.com/labstack/echo/pull/2822 +* Fix typo in SetParamValues comment by @vishr in https://github.com/labstack/echo/pull/2828 +* Fix typo in ContextTimeout middleware comment by @vishr in https://github.com/labstack/echo/pull/2827 +* Improve BasicAuth middleware: use strings.Cut and RFC compliance by @vishr in https://github.com/labstack/echo/pull/2825 +* Fix duplicate plus operator in router backtracking logic by @yuya-morimoto in https://github.com/labstack/echo/pull/2832 +* Replace custom private IP range check with built-in net.IP.IsPrivate by @kumapower17 in https://github.com/labstack/echo/pull/2835 +* Ensure proxy connection is closed in proxyRaw function(#2837) by @kumapower17 in https://github.com/labstack/echo/pull/2838 +* Update deps by @aldas in https://github.com/labstack/echo/pull/2843 +* Update golang.org/x/* deps by @aldas in https://github.com/labstack/echo/pull/2850 + + + +## v4.13.4 - 2025-05-22 + +**Enhancements** + +* chore: fix some typos in comment by @zhuhaicity in https://github.com/labstack/echo/pull/2735 +* CI: test with Go 1.24 by @aldas in https://github.com/labstack/echo/pull/2748 +* Add support for TLS WebSocket proxy by @t-ibayashi-safie in https://github.com/labstack/echo/pull/2762 + +**Security** + +* Update dependencies for [GO-2025-3487](https://pkg.go.dev/vuln/GO-2025-3487), [GO-2025-3503](https://pkg.go.dev/vuln/GO-2025-3503) and [GO-2025-3595](https://pkg.go.dev/vuln/GO-2025-3595) in https://github.com/labstack/echo/pull/2780 + + +## v4.13.3 - 2024-12-19 + +**Security** + +* Update golang.org/x/net dependency [GO-2024-3333](https://pkg.go.dev/vuln/GO-2024-3333) in https://github.com/labstack/echo/pull/2722 + + +## v4.13.2 - 2024-12-12 + +**Security** + +* Update dependencies (dependabot reports [GO-2024-3321](https://pkg.go.dev/vuln/GO-2024-3321)) in https://github.com/labstack/echo/pull/2721 + + +## v4.13.1 - 2024-12-11 + +**Fixes** + +* Fix BindBody ignoring `Transfer-Encoding: chunked` requests by @178inaba in https://github.com/labstack/echo/pull/2717 + + + +## v4.13.0 - 2024-12-04 + +**BREAKING CHANGE** JWT Middleware Removed from Core use [labstack/echo-jwt](https://github.com/labstack/echo-jwt) instead + +The JWT middleware has been **removed from Echo core** due to another security vulnerability, [CVE-2024-51744](https://nvd.nist.gov/vuln/detail/CVE-2024-51744). For more details, refer to issue [#2699](https://github.com/labstack/echo/issues/2699). A drop-in replacement is available in the [labstack/echo-jwt](https://github.com/labstack/echo-jwt) repository. + +**Important**: Direct assignments like `token := c.Get("user").(*jwt.Token)` will now cause a panic due to an invalid cast. Update your code accordingly. Replace the current imports from `"github.com/golang-jwt/jwt"` in your handlers to the new middleware version using `"github.com/golang-jwt/jwt/v5"`. + + +Background: + +The version of `golang-jwt/jwt` (v3.2.2) previously used in Echo core has been in an unmaintained state for some time. This is not the first vulnerability affecting this library; earlier issues were addressed in [PR #1946](https://github.com/labstack/echo/pull/1946). +JWT middleware was marked as deprecated in Echo core as of [v4.10.0](https://github.com/labstack/echo/releases/tag/v4.10.0) on 2022-12-27. If you did not notice that, consider leveraging tools like [Staticcheck](https://staticcheck.dev/) to catch such deprecations earlier in you dev/CI flow. For bonus points - check out [gosec](https://github.com/securego/gosec). + +We sincerely apologize for any inconvenience caused by this change. While we strive to maintain backward compatibility within Echo core, recurring security issues with third-party dependencies have forced this decision. + +**Enhancements** + +* remove jwt middleware by @stevenwhitehead in https://github.com/labstack/echo/pull/2701 +* optimization: struct alignment by @behnambm in https://github.com/labstack/echo/pull/2636 +* bind: Maintain backwards compatibility for map[string]interface{} binding by @thesaltree in https://github.com/labstack/echo/pull/2656 +* Add Go 1.23 to CI by @aldas in https://github.com/labstack/echo/pull/2675 +* improve `MultipartForm` test by @martinyonatann in https://github.com/labstack/echo/pull/2682 +* `bind` : add support of multipart multi files by @martinyonatann in https://github.com/labstack/echo/pull/2684 +* Add TemplateRenderer struct to ease creating renderers for `html/template` and `text/template` packages. by @aldas in https://github.com/labstack/echo/pull/2690 +* Refactor TestBasicAuth to utilize table-driven test format by @ErikOlson in https://github.com/labstack/echo/pull/2688 +* Remove broken header by @aldas in https://github.com/labstack/echo/pull/2705 +* fix(bind body): content-length can be -1 by @phamvinhdat in https://github.com/labstack/echo/pull/2710 +* CORS middleware should compile allowOrigin regexp at creation by @aldas in https://github.com/labstack/echo/pull/2709 +* Shorten Github issue template and add test example by @aldas in https://github.com/labstack/echo/pull/2711 + + ## v4.12.0 - 2024-04-15 **Security** diff --git a/vendor/github.com/labstack/echo/v4/CLAUDE.md b/vendor/github.com/labstack/echo/v4/CLAUDE.md new file mode 100644 index 00000000..decbf079 --- /dev/null +++ b/vendor/github.com/labstack/echo/v4/CLAUDE.md @@ -0,0 +1,99 @@ +# CLAUDE.md + +This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository. + +## About This Project + +Echo is a high performance, minimalist Go web framework. This is the main repository for Echo v4, which is available as a Go module at `github.com/labstack/echo/v4`. + +## Development Commands + +The project uses a Makefile for common development tasks: + +- `make check` - Run linting, vetting, and race condition tests (default target) +- `make init` - Install required linting tools (golint, staticcheck) +- `make lint` - Run staticcheck and golint +- `make vet` - Run go vet +- `make test` - Run short tests +- `make race` - Run tests with race detector +- `make benchmark` - Run benchmarks + +Example commands for development: +```bash +# Setup development environment +make init + +# Run all checks (lint, vet, race) +make check + +# Run specific tests +go test ./middleware/... +go test -race ./... + +# Run benchmarks +make benchmark +``` + +## Code Architecture + +### Core Components + +**Echo Instance (`echo.go`)** +- The `Echo` struct is the top-level framework instance +- Contains router, middleware stacks, and server configuration +- Not goroutine-safe for mutations after server start + +**Context (`context.go`)** +- The `Context` interface represents HTTP request/response context +- Provides methods for request/response handling, path parameters, data binding +- Core abstraction for request processing + +**Router (`router.go`)** +- Radix tree-based HTTP router with smart route prioritization +- Supports static routes, parameterized routes (`/users/:id`), and wildcard routes (`/static/*`) +- Each HTTP method has its own routing tree + +**Middleware (`middleware/`)** +- Extensive middleware system with 50+ built-in middlewares +- Middleware can be applied at Echo, Group, or individual route level +- Common middleware: Logger, Recover, CORS, JWT, Rate Limiting, etc. + +### Key Patterns + +**Middleware Chain** +- Pre-middleware runs before routing +- Regular middleware runs after routing but before handlers +- Middleware functions have signature `func(next echo.HandlerFunc) echo.HandlerFunc` + +**Route Groups** +- Routes can be grouped with common prefixes and middleware +- Groups support nested sub-groups +- Defined in `group.go` + +**Data Binding** +- Automatic binding of request data (JSON, XML, form) to Go structs +- Implemented in `binder.go` with support for custom binders + +**Error Handling** +- Centralized error handling via `HTTPErrorHandler` +- Automatic panic recovery with stack traces + +## File Organization + +- Root directory: Core Echo functionality (echo.go, context.go, router.go, etc.) +- `middleware/`: All built-in middleware implementations +- `_test/`: Test fixtures and utilities +- `_fixture/`: Test data files + +## Code Style + +- Go code uses tabs for indentation (per .editorconfig) +- Follows standard Go conventions and formatting +- Uses gofmt, golint, and staticcheck for code quality + +## Testing + +- Standard Go testing with `testing` package +- Tests include unit tests, integration tests, and benchmarks +- Race condition testing is required (`make race`) +- Test files follow `*_test.go` naming convention \ No newline at end of file diff --git a/vendor/github.com/labstack/echo/v4/Makefile b/vendor/github.com/labstack/echo/v4/Makefile index f9e5afb0..cbd78f1b 100644 --- a/vendor/github.com/labstack/echo/v4/Makefile +++ b/vendor/github.com/labstack/echo/v4/Makefile @@ -31,6 +31,7 @@ benchmark: ## Run benchmarks help: ## Display this help screen @grep -h -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' -goversion ?= "1.19" -test_version: ## Run tests inside Docker with given version (defaults to 1.19 oldest supported). Example: make test_version goversion=1.19 - @docker run --rm -it -v $(shell pwd):/project golang:$(goversion) /bin/sh -c "cd /project && make init check" +goversion ?= "1.22" +docker_user ?= "1000" +test_version: ## Run tests inside Docker with given version (defaults to 1.22 oldest supported). Example: make test_version goversion=1.22 + @docker run --rm -it --user $(docker_user) -e HOME=/tmp -e GOCACHE=/tmp/go-cache -v $(shell pwd):/project golang:$(goversion) /bin/sh -c "mkdir -p /tmp/go-cache /tmp/.cache && cd /project && make init check" diff --git a/vendor/github.com/labstack/echo/v4/README.md b/vendor/github.com/labstack/echo/v4/README.md index 351ba3c5..5e52d1d4 100644 --- a/vendor/github.com/labstack/echo/v4/README.md +++ b/vendor/github.com/labstack/echo/v4/README.md @@ -1,5 +1,3 @@ - - [![Sourcegraph](https://sourcegraph.com/github.com/labstack/echo/-/badge.svg?style=flat-square)](https://sourcegraph.com/github.com/labstack/echo?badge) [![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://pkg.go.dev/github.com/labstack/echo/v4) [![Go Report Card](https://goreportcard.com/badge/github.com/labstack/echo?style=flat-square)](https://goreportcard.com/report/github.com/labstack/echo) @@ -48,17 +46,6 @@ Help and questions: [Github Discussions](https://github.com/labstack/echo/discus Click [here](https://github.com/sponsors/labstack) for more information on sponsorship. -## Benchmarks - -Date: 2020/11/11
-Source: https://github.com/vishr/web-framework-benchmark
-Lower is better! - - - - -The benchmarks above were run on an Intel(R) Core(TM) i7-6820HQ CPU @ 2.70GHz - ## [Guide](https://echo.labstack.com/guide) ### Installation @@ -77,6 +64,7 @@ package main import ( "github.com/labstack/echo/v4" "github.com/labstack/echo/v4/middleware" + "log/slog" "net/http" ) @@ -85,14 +73,16 @@ func main() { e := echo.New() // Middleware - e.Use(middleware.Logger()) - e.Use(middleware.Recover()) + e.Use(middleware.RequestLogger()) // use the default RequestLogger middleware with slog logger + e.Use(middleware.Recover()) // recover panics as errors for proper error handling // Routes e.GET("/", hello) // Start server - e.Logger.Fatal(e.Start(":1323")) + if err := e.Start(":8080"); err != nil && !errors.Is(err, http.ErrServerClosed) { + slog.Error("failed to start server", "error", err) + } } // Handler diff --git a/vendor/github.com/labstack/echo/v4/bind.go b/vendor/github.com/labstack/echo/v4/bind.go index 507def3e..1d4fe6f0 100644 --- a/vendor/github.com/labstack/echo/v4/bind.go +++ b/vendor/github.com/labstack/echo/v4/bind.go @@ -8,10 +8,12 @@ import ( "encoding/xml" "errors" "fmt" + "mime/multipart" "net/http" "reflect" "strconv" "strings" + "time" ) // Binder is the interface that wraps the Bind method. @@ -38,6 +40,13 @@ type bindMultipleUnmarshaler interface { } // BindPathParams binds path params to bindable object +// +// Time format support: time.Time fields can use `format` tags to specify custom parsing layouts. +// Example: `param:"created" format:"2006-01-02T15:04"` for datetime-local format +// Example: `param:"date" format:"2006-01-02"` for date format +// Uses Go's standard time format reference time: Mon Jan 2 15:04:05 MST 2006 +// Works with form data, query parameters, and path parameters (not JSON body) +// Falls back to default time.Time parsing if no format tag is specified func (b *DefaultBinder) BindPathParams(c Context, i interface{}) error { names := c.ParamNames() values := c.ParamValues() @@ -45,7 +54,7 @@ func (b *DefaultBinder) BindPathParams(c Context, i interface{}) error { for i, name := range names { params[name] = []string{values[i]} } - if err := b.bindData(i, params, "param"); err != nil { + if err := b.bindData(i, params, "param", nil); err != nil { return NewHTTPError(http.StatusBadRequest, err.Error()).SetInternal(err) } return nil @@ -53,7 +62,7 @@ func (b *DefaultBinder) BindPathParams(c Context, i interface{}) error { // BindQueryParams binds query params to bindable object func (b *DefaultBinder) BindQueryParams(c Context, i interface{}) error { - if err := b.bindData(i, c.QueryParams(), "query"); err != nil { + if err := b.bindData(i, c.QueryParams(), "query", nil); err != nil { return NewHTTPError(http.StatusBadRequest, err.Error()).SetInternal(err) } return nil @@ -70,9 +79,12 @@ func (b *DefaultBinder) BindBody(c Context, i interface{}) (err error) { return } - ctype := req.Header.Get(HeaderContentType) - switch { - case strings.HasPrefix(ctype, MIMEApplicationJSON): + // mediatype is found like `mime.ParseMediaType()` does it + base, _, _ := strings.Cut(req.Header.Get(HeaderContentType), ";") + mediatype := strings.TrimSpace(base) + + switch mediatype { + case MIMEApplicationJSON: if err = c.Echo().JSONSerializer.Deserialize(c, i); err != nil { switch err.(type) { case *HTTPError: @@ -81,7 +93,7 @@ func (b *DefaultBinder) BindBody(c Context, i interface{}) (err error) { return NewHTTPError(http.StatusBadRequest, err.Error()).SetInternal(err) } } - case strings.HasPrefix(ctype, MIMEApplicationXML), strings.HasPrefix(ctype, MIMETextXML): + case MIMEApplicationXML, MIMETextXML: if err = xml.NewDecoder(req.Body).Decode(i); err != nil { if ute, ok := err.(*xml.UnsupportedTypeError); ok { return NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Unsupported type error: type=%v, error=%v", ute.Type, ute.Error())).SetInternal(err) @@ -90,12 +102,20 @@ func (b *DefaultBinder) BindBody(c Context, i interface{}) (err error) { } return NewHTTPError(http.StatusBadRequest, err.Error()).SetInternal(err) } - case strings.HasPrefix(ctype, MIMEApplicationForm), strings.HasPrefix(ctype, MIMEMultipartForm): + case MIMEApplicationForm: params, err := c.FormParams() if err != nil { return NewHTTPError(http.StatusBadRequest, err.Error()).SetInternal(err) } - if err = b.bindData(i, params, "form"); err != nil { + if err = b.bindData(i, params, "form", nil); err != nil { + return NewHTTPError(http.StatusBadRequest, err.Error()).SetInternal(err) + } + case MIMEMultipartForm: + params, err := c.MultipartForm() + if err != nil { + return NewHTTPError(http.StatusBadRequest, err.Error()).SetInternal(err) + } + if err = b.bindData(i, params.Value, "form", params.File); err != nil { return NewHTTPError(http.StatusBadRequest, err.Error()).SetInternal(err) } default: @@ -106,7 +126,7 @@ func (b *DefaultBinder) BindBody(c Context, i interface{}) (err error) { // BindHeaders binds HTTP headers to a bindable object func (b *DefaultBinder) BindHeaders(c Context, i interface{}) error { - if err := b.bindData(i, c.Request().Header, "header"); err != nil { + if err := b.bindData(i, c.Request().Header, "header", nil); err != nil { return NewHTTPError(http.StatusBadRequest, err.Error()).SetInternal(err) } return nil @@ -132,10 +152,11 @@ func (b *DefaultBinder) Bind(i interface{}, c Context) (err error) { } // bindData will bind data ONLY fields in destination struct that have EXPLICIT tag -func (b *DefaultBinder) bindData(destination interface{}, data map[string][]string, tag string) error { - if destination == nil || len(data) == 0 { +func (b *DefaultBinder) bindData(destination interface{}, data map[string][]string, tag string, dataFiles map[string][]*multipart.FileHeader) error { + if destination == nil || (len(data) == 0 && len(dataFiles) == 0) { return nil } + hasFiles := len(dataFiles) > 0 typ := reflect.TypeOf(destination).Elem() val := reflect.ValueOf(destination).Elem() @@ -159,6 +180,10 @@ func (b *DefaultBinder) bindData(destination interface{}, data map[string][]stri for k, v := range data { if isElemString { val.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(v[0])) + } else if isElemInterface { + // To maintain backward compatibility, we always bind to the first string value + // and not the slice of strings when dealing with map[string]interface{}{} + val.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(v[0])) } else { val.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(v)) } @@ -175,7 +200,7 @@ func (b *DefaultBinder) bindData(destination interface{}, data map[string][]stri return errors.New("binding element must be a struct") } - for i := 0; i < typ.NumField(); i++ { + for i := 0; i < typ.NumField(); i++ { // iterate over all destination fields typeField := typ.Field(i) structField := val.Field(i) if typeField.Anonymous { @@ -194,10 +219,10 @@ func (b *DefaultBinder) bindData(destination interface{}, data map[string][]stri } if inputFieldName == "" { - // If tag is nil, we inspect if the field is a not BindUnmarshaler struct and try to bind data into it (might contains fields with tags). + // If tag is nil, we inspect if the field is a not BindUnmarshaler struct and try to bind data into it (might contain fields with tags). // structs that implement BindUnmarshaler are bound only when they have explicit tag if _, ok := structField.Addr().Interface().(BindUnmarshaler); !ok && structFieldKind == reflect.Struct { - if err := b.bindData(structField.Addr().Interface(), data, tag); err != nil { + if err := b.bindData(structField.Addr().Interface(), data, tag, dataFiles); err != nil { return err } } @@ -205,10 +230,20 @@ func (b *DefaultBinder) bindData(destination interface{}, data map[string][]stri continue } + if hasFiles { + if ok, err := isFieldMultipartFile(structField.Type()); err != nil { + return err + } else if ok { + if ok := setMultipartFileHeaderTypes(structField, inputFieldName, dataFiles); ok { + continue + } + } + } + inputValue, exists := data[inputFieldName] if !exists { - // Go json.Unmarshal supports case insensitive binding. However the - // url params are bound case sensitive which is inconsistent. To + // Go json.Unmarshal supports case-insensitive binding. However the + // url params are bound case-sensitive which is inconsistent. To // fix this we must check all of the map values in a // case-insensitive search. for k, v := range data { @@ -235,14 +270,15 @@ func (b *DefaultBinder) bindData(destination interface{}, data map[string][]stri continue } - if ok, err := unmarshalInputToField(typeField.Type.Kind(), inputValue[0], structField); ok { + formatTag := typeField.Tag.Get("format") + if ok, err := unmarshalInputToField(typeField.Type.Kind(), inputValue[0], structField, formatTag); ok { if err != nil { return err } continue } - // we could be dealing with pointer to slice `*[]string` so dereference it. There are wierd OpenAPI generators + // we could be dealing with pointer to slice `*[]string` so dereference it. There are weird OpenAPI generators // that could create struct fields like that. if structFieldKind == reflect.Pointer { structFieldKind = structField.Elem().Kind() @@ -271,7 +307,8 @@ func (b *DefaultBinder) bindData(destination interface{}, data map[string][]stri func setWithProperType(valueKind reflect.Kind, val string, structField reflect.Value) error { // But also call it here, in case we're dealing with an array of BindUnmarshalers - if ok, err := unmarshalInputToField(valueKind, val, structField); ok { + // Note: format tag not available in this context, so empty string is passed + if ok, err := unmarshalInputToField(valueKind, val, structField, ""); ok { return err } @@ -328,7 +365,7 @@ func unmarshalInputsToField(valueKind reflect.Kind, values []string, field refle return true, unmarshaler.UnmarshalParams(values) } -func unmarshalInputToField(valueKind reflect.Kind, val string, field reflect.Value) (bool, error) { +func unmarshalInputToField(valueKind reflect.Kind, val string, field reflect.Value, formatTag string) (bool, error) { if valueKind == reflect.Ptr { if field.IsNil() { field.Set(reflect.New(field.Type().Elem())) @@ -337,6 +374,19 @@ func unmarshalInputToField(valueKind reflect.Kind, val string, field reflect.Val } fieldIValue := field.Addr().Interface() + + // Handle time.Time with custom format tag + if formatTag != "" { + if _, isTime := fieldIValue.(*time.Time); isTime { + t, err := time.Parse(formatTag, val) + if err != nil { + return true, err + } + field.Set(reflect.ValueOf(t)) + return true, nil + } + } + switch unmarshaler := fieldIValue.(type) { case BindUnmarshaler: return true, unmarshaler.UnmarshalParam(val) @@ -390,3 +440,50 @@ func setFloatField(value string, bitSize int, field reflect.Value) error { } return err } + +var ( + // NOT supported by bind as you can NOT check easily empty struct being actual file or not + multipartFileHeaderType = reflect.TypeFor[multipart.FileHeader]() + // supported by bind as you can check by nil value if file existed or not + multipartFileHeaderPointerType = reflect.TypeFor[*multipart.FileHeader]() + multipartFileHeaderSliceType = reflect.TypeFor[[]multipart.FileHeader]() + multipartFileHeaderPointerSliceType = reflect.TypeFor[[]*multipart.FileHeader]() +) + +func isFieldMultipartFile(field reflect.Type) (bool, error) { + switch field { + case multipartFileHeaderPointerType, + multipartFileHeaderSliceType, + multipartFileHeaderPointerSliceType: + return true, nil + case multipartFileHeaderType: + return true, errors.New("binding to multipart.FileHeader struct is not supported, use pointer to struct") + default: + return false, nil + } +} + +func setMultipartFileHeaderTypes(structField reflect.Value, inputFieldName string, files map[string][]*multipart.FileHeader) bool { + fileHeaders := files[inputFieldName] + if len(fileHeaders) == 0 { + return false + } + + result := true + switch structField.Type() { + case multipartFileHeaderPointerSliceType: + structField.Set(reflect.ValueOf(fileHeaders)) + case multipartFileHeaderSliceType: + headers := make([]multipart.FileHeader, len(fileHeaders)) + for i, fileHeader := range fileHeaders { + headers[i] = *fileHeader + } + structField.Set(reflect.ValueOf(headers)) + case multipartFileHeaderPointerType: + structField.Set(reflect.ValueOf(fileHeaders[0])) + default: + result = false + } + + return result +} diff --git a/vendor/github.com/labstack/echo/v4/binder.go b/vendor/github.com/labstack/echo/v4/binder.go index ebabeaf9..da15ae82 100644 --- a/vendor/github.com/labstack/echo/v4/binder.go +++ b/vendor/github.com/labstack/echo/v4/binder.go @@ -69,9 +69,9 @@ import ( type BindingError struct { // Field is the field name where value binding failed Field string `json:"field"` + *HTTPError // Values of parameter that failed to bind. Values []string `json:"-"` - *HTTPError } // NewBindingError creates new instance of binding error @@ -94,16 +94,15 @@ func (be *BindingError) Error() string { // ValueBinder provides utility methods for binding query or path parameter to various Go built-in types type ValueBinder struct { - // failFast is flag for binding methods to return without attempting to bind when previous binding already failed - failFast bool - errors []error - // ValueFunc is used to get single parameter (first) value from request ValueFunc func(sourceParam string) string // ValuesFunc is used to get all values for parameter from request. i.e. `/api/search?ids=1&ids=2` ValuesFunc func(sourceParam string) []string // ErrorFunc is used to create errors. Allows you to use your own error type, that for example marshals to your specific json response ErrorFunc func(sourceParam string, values []string, message interface{}, internalError error) error + errors []error + // failFast is flag for binding methods to return without attempting to bind when previous binding already failed + failFast bool } // QueryParamsBinder creates query parameter value binder diff --git a/vendor/github.com/labstack/echo/v4/binder_generic.go b/vendor/github.com/labstack/echo/v4/binder_generic.go new file mode 100644 index 00000000..f4d45af7 --- /dev/null +++ b/vendor/github.com/labstack/echo/v4/binder_generic.go @@ -0,0 +1,573 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: © 2015 LabStack LLC and Echo contributors + +package echo + +import ( + "encoding" + "encoding/json" + "fmt" + "strconv" + "time" +) + +// TimeLayout specifies the format for parsing time values in request parameters. +// It can be a standard Go time layout string or one of the special Unix time layouts. +type TimeLayout string + +// TimeOpts is options for parsing time.Time values +type TimeOpts struct { + // Layout specifies the format for parsing time values in request parameters. + // It can be a standard Go time layout string or one of the special Unix time layouts. + // + // Parsing layout defaults to: echo.TimeLayout(time.RFC3339Nano) + // - To convert to custom layout use `echo.TimeLayout("2006-01-02")` + // - To convert unix timestamp (integer) to time.Time use `echo.TimeLayoutUnixTime` + // - To convert unix timestamp in milliseconds to time.Time use `echo.TimeLayoutUnixTimeMilli` + // - To convert unix timestamp in nanoseconds to time.Time use `echo.TimeLayoutUnixTimeNano` + Layout TimeLayout + + // ParseInLocation is location used with time.ParseInLocation for layout that do not contain + // timezone information to set output time in given location. + // Defaults to time.UTC + ParseInLocation *time.Location + + // ToInLocation is location to which parsed time is converted to after parsing. + // The parsed time will be converted using time.In(ToInLocation). + // Defaults to time.UTC + ToInLocation *time.Location +} + +// TimeLayout constants for parsing Unix timestamps in different precisions. +const ( + TimeLayoutUnixTime = TimeLayout("UnixTime") // Unix timestamp in seconds + TimeLayoutUnixTimeMilli = TimeLayout("UnixTimeMilli") // Unix timestamp in milliseconds + TimeLayoutUnixTimeNano = TimeLayout("UnixTimeNano") // Unix timestamp in nanoseconds +) + +// PathParam extracts and parses a path parameter from the context by name. +// It returns the typed value and an error if binding fails. Returns ErrNonExistentKey if parameter not found. +// +// Empty String Handling: +// +// If the parameter exists but has an empty value, the zero value of type T is returned +// with no error. For example, a path parameter with value "" returns (0, nil) for int types. +// This differs from standard library behavior where parsing empty strings returns errors. +// To treat empty values as errors, validate the result separately or check the raw value. +// +// See ParseValue for supported types and options +func PathParam[T any](c Context, paramName string, opts ...any) (T, error) { + for i, name := range c.ParamNames() { + if name == paramName { + pValues := c.ParamValues() + v, err := ParseValue[T](pValues[i], opts...) + if err != nil { + return v, NewBindingError(paramName, []string{pValues[i]}, "path param", err) + } + return v, nil + } + } + var zero T + return zero, ErrNonExistentKey +} + +// PathParamOr extracts and parses a path parameter from the context by name. +// Returns defaultValue if the parameter is not found or has an empty value. +// Returns an error only if parsing fails (e.g., "abc" for int type). +// +// Example: +// +// id, err := echo.PathParamOr[int](c, "id", 0) +// // If "id" is missing: returns (0, nil) +// // If "id" is "123": returns (123, nil) +// // If "id" is "abc": returns (0, BindingError) +// +// See ParseValue for supported types and options +func PathParamOr[T any](c Context, paramName string, defaultValue T, opts ...any) (T, error) { + for i, name := range c.ParamNames() { + if name == paramName { + pValues := c.ParamValues() + v, err := ParseValueOr[T](pValues[i], defaultValue, opts...) + if err != nil { + return v, NewBindingError(paramName, []string{pValues[i]}, "path param", err) + } + return v, nil + } + } + return defaultValue, nil +} + +// QueryParam extracts and parses a single query parameter from the request by key. +// It returns the typed value and an error if binding fails. Returns ErrNonExistentKey if parameter not found. +// +// Empty String Handling: +// +// If the parameter exists but has an empty value (?key=), the zero value of type T is returned +// with no error. For example, "?count=" returns (0, nil) for int types. +// This differs from standard library behavior where parsing empty strings returns errors. +// To treat empty values as errors, validate the result separately or check the raw value. +// +// Behavior Summary: +// - Missing key (?other=value): returns (zero, ErrNonExistentKey) +// - Empty value (?key=): returns (zero, nil) +// - Invalid value (?key=abc for int): returns (zero, BindingError) +// +// See ParseValue for supported types and options +func QueryParam[T any](c Context, key string, opts ...any) (T, error) { + values, ok := c.QueryParams()[key] + if !ok { + var zero T + return zero, ErrNonExistentKey + } + if len(values) == 0 { + var zero T + return zero, nil + } + value := values[0] + v, err := ParseValue[T](value, opts...) + if err != nil { + return v, NewBindingError(key, []string{value}, "query param", err) + } + return v, nil +} + +// QueryParamOr extracts and parses a single query parameter from the request by key. +// Returns defaultValue if the parameter is not found or has an empty value. +// Returns an error only if parsing fails (e.g., "abc" for int type). +// +// Example: +// +// page, err := echo.QueryParamOr[int](c, "page", 1) +// // If "page" is missing: returns (1, nil) +// // If "page" is "5": returns (5, nil) +// // If "page" is "abc": returns (1, BindingError) +// +// See ParseValue for supported types and options +func QueryParamOr[T any](c Context, key string, defaultValue T, opts ...any) (T, error) { + values, ok := c.QueryParams()[key] + if !ok { + return defaultValue, nil + } + if len(values) == 0 { + return defaultValue, nil + } + value := values[0] + v, err := ParseValueOr[T](value, defaultValue, opts...) + if err != nil { + return v, NewBindingError(key, []string{value}, "query param", err) + } + return v, nil +} + +// QueryParams extracts and parses all values for a query parameter key as a slice. +// It returns the typed slice and an error if binding any value fails. Returns ErrNonExistentKey if parameter not found. +// +// See ParseValues for supported types and options +func QueryParams[T any](c Context, key string, opts ...any) ([]T, error) { + values, ok := c.QueryParams()[key] + if !ok { + return nil, ErrNonExistentKey + } + + result, err := ParseValues[T](values, opts...) + if err != nil { + return nil, NewBindingError(key, values, "query params", err) + } + return result, nil +} + +// QueryParamsOr extracts and parses all values for a query parameter key as a slice. +// Returns defaultValue if the parameter is not found. +// Returns an error only if parsing any value fails. +// +// Example: +// +// ids, err := echo.QueryParamsOr[int](c, "ids", []int{}) +// // If "ids" is missing: returns ([], nil) +// // If "ids" is "1&ids=2": returns ([1, 2], nil) +// // If "ids" contains "abc": returns ([], BindingError) +// +// See ParseValues for supported types and options +func QueryParamsOr[T any](c Context, key string, defaultValue []T, opts ...any) ([]T, error) { + values, ok := c.QueryParams()[key] + if !ok { + return defaultValue, nil + } + + result, err := ParseValuesOr[T](values, defaultValue, opts...) + if err != nil { + return nil, NewBindingError(key, values, "query params", err) + } + return result, nil +} + +// FormParam extracts and parses a single form value from the request by key. +// It returns the typed value and an error if binding fails. Returns ErrNonExistentKey if parameter not found. +// +// Empty String Handling: +// +// If the form field exists but has an empty value, the zero value of type T is returned +// with no error. For example, an empty form field returns (0, nil) for int types. +// This differs from standard library behavior where parsing empty strings returns errors. +// To treat empty values as errors, validate the result separately or check the raw value. +// +// See ParseValue for supported types and options +func FormParam[T any](c Context, key string, opts ...any) (T, error) { + formValues, err := c.FormParams() + if err != nil { + var zero T + return zero, fmt.Errorf("failed to parse form param, key: %s, err: %w", key, err) + } + values, ok := formValues[key] + if !ok { + var zero T + return zero, ErrNonExistentKey + } + if len(values) == 0 { + var zero T + return zero, nil + } + value := values[0] + v, err := ParseValue[T](value, opts...) + if err != nil { + return v, NewBindingError(key, []string{value}, "form param", err) + } + return v, nil +} + +// FormParamOr extracts and parses a single form value from the request by key. +// Returns defaultValue if the parameter is not found or has an empty value. +// Returns an error only if parsing fails or form parsing errors occur. +// +// Example: +// +// limit, err := echo.FormValueOr[int](c, "limit", 100) +// // If "limit" is missing: returns (100, nil) +// // If "limit" is "50": returns (50, nil) +// // If "limit" is "abc": returns (100, BindingError) +// +// See ParseValue for supported types and options +func FormParamOr[T any](c Context, key string, defaultValue T, opts ...any) (T, error) { + formValues, err := c.FormParams() + if err != nil { + var zero T + return zero, fmt.Errorf("failed to parse form param, key: %s, err: %w", key, err) + } + values, ok := formValues[key] + if !ok { + return defaultValue, nil + } + if len(values) == 0 { + return defaultValue, nil + } + value := values[0] + v, err := ParseValueOr[T](value, defaultValue, opts...) + if err != nil { + return v, NewBindingError(key, []string{value}, "form param", err) + } + return v, nil +} + +// FormParams extracts and parses all values for a form values key as a slice. +// It returns the typed slice and an error if binding any value fails. Returns ErrNonExistentKey if parameter not found. +// +// See ParseValues for supported types and options +func FormParams[T any](c Context, key string, opts ...any) ([]T, error) { + formValues, err := c.FormParams() + if err != nil { + return nil, fmt.Errorf("failed to parse form params, key: %s, err: %w", key, err) + } + values, ok := formValues[key] + if !ok { + return nil, ErrNonExistentKey + } + result, err := ParseValues[T](values, opts...) + if err != nil { + return nil, NewBindingError(key, values, "form params", err) + } + return result, nil +} + +// FormParamsOr extracts and parses all values for a form values key as a slice. +// Returns defaultValue if the parameter is not found. +// Returns an error only if parsing any value fails or form parsing errors occur. +// +// Example: +// +// tags, err := echo.FormParamsOr[string](c, "tags", []string{}) +// // If "tags" is missing: returns ([], nil) +// // If form parsing fails: returns (nil, error) +// +// See ParseValues for supported types and options +func FormParamsOr[T any](c Context, key string, defaultValue []T, opts ...any) ([]T, error) { + formValues, err := c.FormParams() + if err != nil { + return nil, fmt.Errorf("failed to parse form params, key: %s, err: %w", key, err) + } + values, ok := formValues[key] + if !ok { + return defaultValue, nil + } + result, err := ParseValuesOr[T](values, defaultValue, opts...) + if err != nil { + return nil, NewBindingError(key, values, "form params", err) + } + return result, nil +} + +// ParseValues parses value to generic type slice. Same types are supported as ParseValue +// function but the result type is slice instead of scalar value. +// +// See ParseValue for supported types and options +func ParseValues[T any](values []string, opts ...any) ([]T, error) { + var zero []T + return ParseValuesOr(values, zero, opts...) +} + +// ParseValuesOr parses value to generic type slice, when value is empty defaultValue is returned. +// Same types are supported as ParseValue function but the result type is slice instead of scalar value. +// +// See ParseValue for supported types and options +func ParseValuesOr[T any](values []string, defaultValue []T, opts ...any) ([]T, error) { + if len(values) == 0 { + return defaultValue, nil + } + result := make([]T, 0, len(values)) + for _, v := range values { + tmp, err := ParseValue[T](v, opts...) + if err != nil { + return nil, err + } + result = append(result, tmp) + } + return result, nil +} + +// ParseValue parses value to generic type +// +// Types that are supported: +// - bool +// - float32 +// - float64 +// - int +// - int8 +// - int16 +// - int32 +// - int64 +// - uint +// - uint8/byte +// - uint16 +// - uint32 +// - uint64 +// - string +// - echo.BindUnmarshaler interface +// - encoding.TextUnmarshaler interface +// - json.Unmarshaler interface +// - time.Duration +// - time.Time use echo.TimeOpts or echo.TimeLayout to set time parsing configuration +func ParseValue[T any](value string, opts ...any) (T, error) { + var zero T + return ParseValueOr(value, zero, opts...) +} + +// ParseValueOr parses value to generic type, when value is empty defaultValue is returned. +// +// Types that are supported: +// - bool +// - float32 +// - float64 +// - int +// - int8 +// - int16 +// - int32 +// - int64 +// - uint +// - uint8/byte +// - uint16 +// - uint32 +// - uint64 +// - string +// - echo.BindUnmarshaler interface +// - encoding.TextUnmarshaler interface +// - json.Unmarshaler interface +// - time.Duration +// - time.Time use echo.TimeOpts or echo.TimeLayout to set time parsing configuration +func ParseValueOr[T any](value string, defaultValue T, opts ...any) (T, error) { + if len(value) == 0 { + return defaultValue, nil + } + var tmp T + if err := bindValue(value, &tmp, opts...); err != nil { + var zero T + return zero, fmt.Errorf("failed to parse value, err: %w", err) + } + return tmp, nil +} + +func bindValue(value string, dest any, opts ...any) error { + // NOTE: if this function is ever made public the dest should be checked for nil + // values when dealing with interfaces + if len(opts) > 0 { + if _, isTime := dest.(*time.Time); !isTime { + return fmt.Errorf("options are only supported for time.Time, got %T", dest) + } + } + + switch d := dest.(type) { + case *bool: + n, err := strconv.ParseBool(value) + if err != nil { + return err + } + *d = n + case *float32: + n, err := strconv.ParseFloat(value, 32) + if err != nil { + return err + } + *d = float32(n) + case *float64: + n, err := strconv.ParseFloat(value, 64) + if err != nil { + return err + } + *d = n + case *int: + n, err := strconv.ParseInt(value, 10, 0) + if err != nil { + return err + } + *d = int(n) + case *int8: + n, err := strconv.ParseInt(value, 10, 8) + if err != nil { + return err + } + *d = int8(n) + case *int16: + n, err := strconv.ParseInt(value, 10, 16) + if err != nil { + return err + } + *d = int16(n) + case *int32: + n, err := strconv.ParseInt(value, 10, 32) + if err != nil { + return err + } + *d = int32(n) + case *int64: + n, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return err + } + *d = n + case *uint: + n, err := strconv.ParseUint(value, 10, 0) + if err != nil { + return err + } + *d = uint(n) + case *uint8: + n, err := strconv.ParseUint(value, 10, 8) + if err != nil { + return err + } + *d = uint8(n) + case *uint16: + n, err := strconv.ParseUint(value, 10, 16) + if err != nil { + return err + } + *d = uint16(n) + case *uint32: + n, err := strconv.ParseUint(value, 10, 32) + if err != nil { + return err + } + *d = uint32(n) + case *uint64: + n, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return err + } + *d = n + case *string: + *d = value + case *time.Duration: + t, err := time.ParseDuration(value) + if err != nil { + return err + } + *d = t + case *time.Time: + to := TimeOpts{ + Layout: TimeLayout(time.RFC3339Nano), + ParseInLocation: time.UTC, + ToInLocation: time.UTC, + } + for _, o := range opts { + switch v := o.(type) { + case TimeOpts: + if v.Layout != "" { + to.Layout = v.Layout + } + if v.ParseInLocation != nil { + to.ParseInLocation = v.ParseInLocation + } + if v.ToInLocation != nil { + to.ToInLocation = v.ToInLocation + } + case TimeLayout: + to.Layout = v + } + } + var t time.Time + var err error + switch to.Layout { + case TimeLayoutUnixTime: + n, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return err + } + t = time.Unix(n, 0) + case TimeLayoutUnixTimeMilli: + n, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return err + } + t = time.UnixMilli(n) + case TimeLayoutUnixTimeNano: + n, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return err + } + t = time.Unix(0, n) + default: + if to.ParseInLocation != nil { + t, err = time.ParseInLocation(string(to.Layout), value, to.ParseInLocation) + } else { + t, err = time.Parse(string(to.Layout), value) + } + if err != nil { + return err + } + } + *d = t.In(to.ToInLocation) + case BindUnmarshaler: + if err := d.UnmarshalParam(value); err != nil { + return err + } + case encoding.TextUnmarshaler: + if err := d.UnmarshalText([]byte(value)); err != nil { + return err + } + case json.Unmarshaler: + if err := d.UnmarshalJSON([]byte(value)); err != nil { + return err + } + default: + return fmt.Errorf("unsupported value type: %T", dest) + } + return nil +} diff --git a/vendor/github.com/labstack/echo/v4/context.go b/vendor/github.com/labstack/echo/v4/context.go index 4edaa2ee..67e83181 100644 --- a/vendor/github.com/labstack/echo/v4/context.go +++ b/vendor/github.com/labstack/echo/v4/context.go @@ -97,22 +97,22 @@ type Context interface { Cookies() []*http.Cookie // Get retrieves data from the context. - Get(key string) interface{} + Get(key string) any // Set saves data in the context. - Set(key string, val interface{}) + Set(key string, val any) // Bind binds path params, query params and the request body into provided type `i`. The default binder // binds body based on Content-Type header. - Bind(i interface{}) error + Bind(i any) error // Validate validates provided `i`. It is usually called after `Context#Bind()`. // Validator must be registered using `Echo#Validator`. - Validate(i interface{}) error + Validate(i any) error // Render renders a template with data and sends a text/html response with status // code. Renderer must be registered using `Echo.Renderer`. - Render(code int, name string, data interface{}) error + Render(code int, name string, data any) error // HTML sends an HTTP response with status code. HTML(code int, html string) error @@ -124,27 +124,27 @@ type Context interface { String(code int, s string) error // JSON sends a JSON response with status code. - JSON(code int, i interface{}) error + JSON(code int, i any) error // JSONPretty sends a pretty-print JSON with status code. - JSONPretty(code int, i interface{}, indent string) error + JSONPretty(code int, i any, indent string) error // JSONBlob sends a JSON blob response with status code. JSONBlob(code int, b []byte) error // JSONP sends a JSONP response with status code. It uses `callback` to construct // the JSONP payload. - JSONP(code int, callback string, i interface{}) error + JSONP(code int, callback string, i any) error // JSONPBlob sends a JSONP blob response with status code. It uses `callback` // to construct the JSONP payload. JSONPBlob(code int, callback string, b []byte) error // XML sends an XML response with status code. - XML(code int, i interface{}) error + XML(code int, i any) error // XMLPretty sends a pretty-print XML with status code. - XMLPretty(code int, i interface{}, indent string) error + XMLPretty(code int, i any, indent string) error // XMLBlob sends an XML blob response with status code. XMLBlob(code int, b []byte) error @@ -200,31 +200,31 @@ type Context interface { } type context struct { + logger Logger request *http.Request response *Response query url.Values echo *Echo - logger Logger store Map lock sync.RWMutex // following fields are set by Router + handler HandlerFunc // path is route path that Router matched. It is empty string where there is no route match. // Route registered with RouteNotFound is considered as a match and path therefore is not empty. path string - // pnames length is tied to param count for the matched route - pnames []string - // Usually echo.Echo is sizing pvalues but there could be user created middlewares that decide to // overwrite parameter by calling SetParamNames + SetParamValues. // When echo.Echo allocated that slice it length/capacity is tied to echo.Echo.maxParam value. // // It is important that pvalues size is always equal or bigger to pnames length. pvalues []string - handler HandlerFunc + + // pnames length is tied to param count for the matched route + pnames []string } const ( @@ -359,7 +359,7 @@ func (c *context) ParamValues() []string { func (c *context) SetParamValues(values ...string) { // NOTE: Don't just set c.pvalues = values, because it has to have length c.echo.maxParam (or bigger) at all times - // It will brake the Router#Find code + // It will break the Router#Find code limit := len(values) if limit > len(c.pvalues) { c.pvalues = make([]string, limit) @@ -430,13 +430,13 @@ func (c *context) Cookies() []*http.Cookie { return c.request.Cookies() } -func (c *context) Get(key string) interface{} { +func (c *context) Get(key string) any { c.lock.RLock() defer c.lock.RUnlock() return c.store[key] } -func (c *context) Set(key string, val interface{}) { +func (c *context) Set(key string, val any) { c.lock.Lock() defer c.lock.Unlock() @@ -446,18 +446,18 @@ func (c *context) Set(key string, val interface{}) { c.store[key] = val } -func (c *context) Bind(i interface{}) error { +func (c *context) Bind(i any) error { return c.echo.Binder.Bind(i, c) } -func (c *context) Validate(i interface{}) error { +func (c *context) Validate(i any) error { if c.echo.Validator == nil { return ErrValidatorNotRegistered } return c.echo.Validator.Validate(i) } -func (c *context) Render(code int, name string, data interface{}) (err error) { +func (c *context) Render(code int, name string, data any) (err error) { if c.echo.Renderer == nil { return ErrRendererNotRegistered } @@ -480,7 +480,7 @@ func (c *context) String(code int, s string) (err error) { return c.Blob(code, MIMETextPlainCharsetUTF8, []byte(s)) } -func (c *context) jsonPBlob(code int, callback string, i interface{}) (err error) { +func (c *context) jsonPBlob(code int, callback string, i any) (err error) { indent := "" if _, pretty := c.QueryParams()["pretty"]; c.echo.Debug || pretty { indent = defaultIndent @@ -499,13 +499,13 @@ func (c *context) jsonPBlob(code int, callback string, i interface{}) (err error return } -func (c *context) json(code int, i interface{}, indent string) error { +func (c *context) json(code int, i any, indent string) error { c.writeContentType(MIMEApplicationJSON) c.response.Status = code return c.echo.JSONSerializer.Serialize(c, i, indent) } -func (c *context) JSON(code int, i interface{}) (err error) { +func (c *context) JSON(code int, i any) (err error) { indent := "" if _, pretty := c.QueryParams()["pretty"]; c.echo.Debug || pretty { indent = defaultIndent @@ -513,7 +513,7 @@ func (c *context) JSON(code int, i interface{}) (err error) { return c.json(code, i, indent) } -func (c *context) JSONPretty(code int, i interface{}, indent string) (err error) { +func (c *context) JSONPretty(code int, i any, indent string) (err error) { return c.json(code, i, indent) } @@ -521,7 +521,7 @@ func (c *context) JSONBlob(code int, b []byte) (err error) { return c.Blob(code, MIMEApplicationJSON, b) } -func (c *context) JSONP(code int, callback string, i interface{}) (err error) { +func (c *context) JSONP(code int, callback string, i any) (err error) { return c.jsonPBlob(code, callback, i) } @@ -538,7 +538,7 @@ func (c *context) JSONPBlob(code int, callback string, b []byte) (err error) { return } -func (c *context) xml(code int, i interface{}, indent string) (err error) { +func (c *context) xml(code int, i any, indent string) (err error) { c.writeContentType(MIMEApplicationXMLCharsetUTF8) c.response.WriteHeader(code) enc := xml.NewEncoder(c.response) @@ -551,7 +551,7 @@ func (c *context) xml(code int, i interface{}, indent string) (err error) { return enc.Encode(i) } -func (c *context) XML(code int, i interface{}) (err error) { +func (c *context) XML(code int, i any) (err error) { indent := "" if _, pretty := c.QueryParams()["pretty"]; c.echo.Debug || pretty { indent = defaultIndent @@ -559,7 +559,7 @@ func (c *context) XML(code int, i interface{}) (err error) { return c.xml(code, i, indent) } -func (c *context) XMLPretty(code int, i interface{}, indent string) (err error) { +func (c *context) XMLPretty(code int, i any, indent string) (err error) { return c.xml(code, i, indent) } diff --git a/vendor/github.com/labstack/echo/v4/context_generic.go b/vendor/github.com/labstack/echo/v4/context_generic.go new file mode 100644 index 00000000..f06041bb --- /dev/null +++ b/vendor/github.com/labstack/echo/v4/context_generic.go @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: © 2015 LabStack LLC and Echo contributors + +package echo + +import "errors" + +// ErrNonExistentKey is error that is returned when key does not exist +var ErrNonExistentKey = errors.New("non existent key") + +// ErrInvalidKeyType is error that is returned when the value is not castable to expected type. +var ErrInvalidKeyType = errors.New("invalid key type") + +// ContextGet retrieves a value from the context store or ErrNonExistentKey error the key is missing. +// Returns ErrInvalidKeyType error if the value is not castable to type T. +func ContextGet[T any](c Context, key string) (T, error) { + val := c.Get(key) + if val == any(nil) { + var zero T + return zero, ErrNonExistentKey + } + + typed, ok := val.(T) + if !ok { + var zero T + return zero, ErrInvalidKeyType + } + + return typed, nil +} + +// ContextGetOr retrieves a value from the context store or returns a default value when the key +// is missing. Returns ErrInvalidKeyType error if the value is not castable to type T. +func ContextGetOr[T any](c Context, key string, defaultValue T) (T, error) { + typed, err := ContextGet[T](c, key) + if err == ErrNonExistentKey { + return defaultValue, nil + } + return typed, err +} diff --git a/vendor/github.com/labstack/echo/v4/echo.go b/vendor/github.com/labstack/echo/v4/echo.go index ab66b0da..ae2283f6 100644 --- a/vendor/github.com/labstack/echo/v4/echo.go +++ b/vendor/github.com/labstack/echo/v4/echo.go @@ -45,7 +45,6 @@ import ( "encoding/json" "errors" "fmt" - "io" stdLog "log" "net" "net/http" @@ -91,10 +90,6 @@ type Echo struct { Listener net.Listener TLSListener net.Listener AutoTLSManager autocert.Manager - DisableHTTP2 bool - Debug bool - HideBanner bool - HidePort bool HTTPErrorHandler HTTPErrorHandler Binder Binder JSONSerializer JSONSerializer @@ -106,6 +101,10 @@ type Echo struct { // OnAddRouteHandler is called when Echo adds new route to specific host router. OnAddRouteHandler func(host string, route Route, handler HandlerFunc, middleware []MiddlewareFunc) + DisableHTTP2 bool + Debug bool + HideBanner bool + HidePort bool } // Route contains a handler and information for matching against requests. @@ -117,9 +116,9 @@ type Route struct { // HTTPError represents an error that occurred while handling a request. type HTTPError struct { - Code int `json:"-"` - Message interface{} `json:"message"` Internal error `json:"-"` // Stores the error returned by an external dependency + Message interface{} `json:"message"` + Code int `json:"-"` } // MiddlewareFunc defines a function to process middleware. @@ -142,11 +141,6 @@ type JSONSerializer interface { Deserialize(c Context, i interface{}) error } -// Renderer is the interface that wraps the Render function. -type Renderer interface { - Render(io.Writer, string, interface{}, Context) error -} - // Map defines a generic map of type `map[string]interface{}`. type Map map[string]interface{} @@ -238,9 +232,12 @@ const ( HeaderXCorrelationID = "X-Correlation-Id" HeaderXRequestedWith = "X-Requested-With" HeaderServer = "Server" - HeaderOrigin = "Origin" - HeaderCacheControl = "Cache-Control" - HeaderConnection = "Connection" + + // HeaderOrigin request header indicates the origin (scheme, hostname, and port) that caused the request. + // See: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin + HeaderOrigin = "Origin" + HeaderCacheControl = "Cache-Control" + HeaderConnection = "Connection" // Access control HeaderAccessControlRequestMethod = "Access-Control-Request-Method" @@ -261,11 +258,16 @@ const ( HeaderContentSecurityPolicyReportOnly = "Content-Security-Policy-Report-Only" HeaderXCSRFToken = "X-CSRF-Token" HeaderReferrerPolicy = "Referrer-Policy" + + // HeaderSecFetchSite fetch metadata request header indicates the relationship between a request initiator's + // origin and the origin of the requested resource. + // See: https://developer.mozilla.org/en-US/docs/Web/HTTP/Reference/Headers/Sec-Fetch-Site + HeaderSecFetchSite = "Sec-Fetch-Site" ) const ( // Version of Echo - Version = "4.12.0" + Version = "4.15.0" website = "https://echo.labstack.com" // http://patorjk.com/software/taag/#p=display&f=Small%20Slant&t=Echo banner = ` diff --git a/vendor/github.com/labstack/echo/v4/echo_fs.go b/vendor/github.com/labstack/echo/v4/echo_fs.go index a7b231f3..0ffc4b0b 100644 --- a/vendor/github.com/labstack/echo/v4/echo_fs.go +++ b/vendor/github.com/labstack/echo/v4/echo_fs.go @@ -102,8 +102,8 @@ func StaticFileHandler(file string, filesystem fs.FS) HandlerFunc { // traverse up from current executable run path. // NB: private because you really should use fs.FS implementation instances type defaultFS struct { - prefix string fs fs.FS + prefix string } func newDefaultFS() *defaultFS { diff --git a/vendor/github.com/labstack/echo/v4/group.go b/vendor/github.com/labstack/echo/v4/group.go index eca25c94..cb37b123 100644 --- a/vendor/github.com/labstack/echo/v4/group.go +++ b/vendor/github.com/labstack/echo/v4/group.go @@ -14,8 +14,8 @@ type Group struct { common host string prefix string - middleware []MiddlewareFunc echo *Echo + middleware []MiddlewareFunc } // Use implements `Echo#Use()` for sub-routes within the Group. diff --git a/vendor/github.com/labstack/echo/v4/ip.go b/vendor/github.com/labstack/echo/v4/ip.go index 6aed8d60..dce51f55 100644 --- a/vendor/github.com/labstack/echo/v4/ip.go +++ b/vendor/github.com/labstack/echo/v4/ip.go @@ -24,7 +24,7 @@ To retrieve IP address reliably/securely, you must let your application be aware In Echo, this can be done by configuring `Echo#IPExtractor` appropriately. This guides show you why and how. -> Note: if you dont' set `Echo#IPExtractor` explicitly, Echo fallback to legacy behavior, which is not a good choice. +> Note: if you don't set `Echo#IPExtractor` explicitly, Echo fallback to legacy behavior, which is not a good choice. Let's start from two questions to know the right direction: @@ -134,10 +134,10 @@ Private IPv6 address ranges: */ type ipChecker struct { + trustExtraRanges []*net.IPNet trustLoopback bool trustLinkLocal bool trustPrivateNet bool - trustExtraRanges []*net.IPNet } // TrustOption is config for which IP address to trust @@ -179,16 +179,6 @@ func newIPChecker(configs []TrustOption) *ipChecker { return checker } -// Go1.16+ added `ip.IsPrivate()` but until that use this implementation -func isPrivateIPRange(ip net.IP) bool { - if ip4 := ip.To4(); ip4 != nil { - return ip4[0] == 10 || - ip4[0] == 172 && ip4[1]&0xf0 == 16 || - ip4[0] == 192 && ip4[1] == 168 - } - return len(ip) == net.IPv6len && ip[0]&0xfe == 0xfc -} - func (c *ipChecker) trust(ip net.IP) bool { if c.trustLoopback && ip.IsLoopback() { return true @@ -196,7 +186,7 @@ func (c *ipChecker) trust(ip net.IP) bool { if c.trustLinkLocal && ip.IsLinkLocalUnicast() { return true } - if c.trustPrivateNet && isPrivateIPRange(ip) { + if c.trustPrivateNet && ip.IsPrivate() { return true } for _, trustedRange := range c.trustExtraRanges { @@ -219,8 +209,14 @@ func ExtractIPDirect() IPExtractor { } func extractIP(req *http.Request) string { - ra, _, _ := net.SplitHostPort(req.RemoteAddr) - return ra + host, _, err := net.SplitHostPort(req.RemoteAddr) + if err != nil { + if net.ParseIP(req.RemoteAddr) != nil { + return req.RemoteAddr + } + return "" + } + return host } // ExtractIPFromRealIPHeader extracts IP address using x-real-ip header. diff --git a/vendor/github.com/labstack/echo/v4/middleware/basic_auth.go b/vendor/github.com/labstack/echo/v4/middleware/basic_auth.go index 9285f29f..4a46098e 100644 --- a/vendor/github.com/labstack/echo/v4/middleware/basic_auth.go +++ b/vendor/github.com/labstack/echo/v4/middleware/basic_auth.go @@ -66,6 +66,9 @@ func BasicAuthWithConfig(config BasicAuthConfig) echo.MiddlewareFunc { config.Realm = defaultRealm } + // Pre-compute the quoted realm for WWW-Authenticate header (RFC 7617) + quotedRealm := strconv.Quote(config.Realm) + return func(next echo.HandlerFunc) echo.HandlerFunc { return func(c echo.Context) error { if config.Skipper(c) { @@ -84,27 +87,21 @@ func BasicAuthWithConfig(config BasicAuthConfig) echo.MiddlewareFunc { } cred := string(b) - for i := 0; i < len(cred); i++ { - if cred[i] == ':' { - // Verify credentials - valid, err := config.Validator(cred[:i], cred[i+1:], c) - if err != nil { - return err - } else if valid { - return next(c) - } - break + user, pass, ok := strings.Cut(cred, ":") + if ok { + // Verify credentials + valid, err := config.Validator(user, pass, c) + if err != nil { + return err + } else if valid { + return next(c) } } } - realm := defaultRealm - if config.Realm != defaultRealm { - realm = strconv.Quote(config.Realm) - } - // Need to return `401` for browsers to pop-up login box. - c.Response().Header().Set(echo.HeaderWWWAuthenticate, basic+" realm="+realm) + // Realm is case-insensitive, so we can use "basic" directly. See RFC 7617. + c.Response().Header().Set(echo.HeaderWWWAuthenticate, basic+" realm="+quotedRealm) return echo.ErrUnauthorized } } diff --git a/vendor/github.com/labstack/echo/v4/middleware/body_dump.go b/vendor/github.com/labstack/echo/v4/middleware/body_dump.go index b06f7620..add778d6 100644 --- a/vendor/github.com/labstack/echo/v4/middleware/body_dump.go +++ b/vendor/github.com/labstack/echo/v4/middleware/body_dump.go @@ -66,8 +66,12 @@ func BodyDumpWithConfig(config BodyDumpConfig) echo.MiddlewareFunc { // Request reqBody := []byte{} - if c.Request().Body != nil { // Read - reqBody, _ = io.ReadAll(c.Request().Body) + if c.Request().Body != nil { + var readErr error + reqBody, readErr = io.ReadAll(c.Request().Body) + if readErr != nil { + return readErr + } } c.Request().Body = io.NopCloser(bytes.NewBuffer(reqBody)) // Reset @@ -98,14 +102,14 @@ func (w *bodyDumpResponseWriter) Write(b []byte) (int, error) { } func (w *bodyDumpResponseWriter) Flush() { - err := responseControllerFlush(w.ResponseWriter) + err := http.NewResponseController(w.ResponseWriter).Flush() if err != nil && errors.Is(err, http.ErrNotSupported) { panic(errors.New("response writer flushing is not supported")) } } func (w *bodyDumpResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { - return responseControllerHijack(w.ResponseWriter) + return http.NewResponseController(w.ResponseWriter).Hijack() } func (w *bodyDumpResponseWriter) Unwrap() http.ResponseWriter { diff --git a/vendor/github.com/labstack/echo/v4/middleware/body_limit.go b/vendor/github.com/labstack/echo/v4/middleware/body_limit.go index 7d3c665f..d13ad2c4 100644 --- a/vendor/github.com/labstack/echo/v4/middleware/body_limit.go +++ b/vendor/github.com/labstack/echo/v4/middleware/body_limit.go @@ -6,6 +6,7 @@ package middleware import ( "fmt" "io" + "net/http" "sync" "github.com/labstack/echo/v4" @@ -77,7 +78,10 @@ func BodyLimitWithConfig(config BodyLimitConfig) echo.MiddlewareFunc { } // Based on content read - r := pool.Get().(*limitedReader) + r, ok := pool.Get().(*limitedReader) + if !ok { + return echo.NewHTTPError(http.StatusInternalServerError, "invalid pool object") + } r.Reset(req.Body) defer pool.Put(r) req.Body = r diff --git a/vendor/github.com/labstack/echo/v4/middleware/compress.go b/vendor/github.com/labstack/echo/v4/middleware/compress.go index 557bdc8e..48ccc985 100644 --- a/vendor/github.com/labstack/echo/v4/middleware/compress.go +++ b/vendor/github.com/labstack/echo/v4/middleware/compress.go @@ -96,7 +96,7 @@ func GzipWithConfig(config GzipConfig) echo.MiddlewareFunc { i := pool.Get() w, ok := i.(*gzip.Writer) if !ok { - return echo.NewHTTPError(http.StatusInternalServerError, i.(error).Error()) + return echo.NewHTTPError(http.StatusInternalServerError, "invalid pool object") } rw := res.Writer w.Reset(rw) @@ -189,8 +189,10 @@ func (w *gzipResponseWriter) Flush() { w.Writer.Write(w.buffer.Bytes()) } - w.Writer.(*gzip.Writer).Flush() - _ = responseControllerFlush(w.ResponseWriter) + if gw, ok := w.Writer.(*gzip.Writer); ok { + gw.Flush() + } + _ = http.NewResponseController(w.ResponseWriter).Flush() } func (w *gzipResponseWriter) Unwrap() http.ResponseWriter { @@ -198,7 +200,7 @@ func (w *gzipResponseWriter) Unwrap() http.ResponseWriter { } func (w *gzipResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { - return responseControllerHijack(w.ResponseWriter) + return http.NewResponseController(w.ResponseWriter).Hijack() } func (w *gzipResponseWriter) Push(target string, opts *http.PushOptions) error { diff --git a/vendor/github.com/labstack/echo/v4/middleware/context_timeout.go b/vendor/github.com/labstack/echo/v4/middleware/context_timeout.go index e67173f2..5d9ae975 100644 --- a/vendor/github.com/labstack/echo/v4/middleware/context_timeout.go +++ b/vendor/github.com/labstack/echo/v4/middleware/context_timeout.go @@ -11,12 +11,45 @@ import ( "github.com/labstack/echo/v4" ) +// ContextTimeout Middleware +// +// ContextTimeout provides request timeout functionality using Go's context mechanism. +// It is the recommended replacement for the deprecated Timeout middleware. +// +// +// Basic Usage: +// +// e.Use(middleware.ContextTimeout(30 * time.Second)) +// +// With Configuration: +// +// e.Use(middleware.ContextTimeoutWithConfig(middleware.ContextTimeoutConfig{ +// Timeout: 30 * time.Second, +// Skipper: middleware.DefaultSkipper, +// })) +// +// Handler Example: +// +// e.GET("/task", func(c echo.Context) error { +// ctx := c.Request().Context() +// +// result, err := performTaskWithContext(ctx) +// if err != nil { +// if errors.Is(err, context.DeadlineExceeded) { +// return echo.NewHTTPError(http.StatusServiceUnavailable, "timeout") +// } +// return err +// } +// +// return c.JSON(http.StatusOK, result) +// }) + // ContextTimeoutConfig defines the config for ContextTimeout middleware. type ContextTimeoutConfig struct { // Skipper defines a function to skip middleware. Skipper Skipper - // ErrorHandler is a function when error aries in middleware execution. + // ErrorHandler is a function when error arises in middleware execution. ErrorHandler func(err error, c echo.Context) error // Timeout configures a timeout for the middleware, defaults to 0 for no timeout diff --git a/vendor/github.com/labstack/echo/v4/middleware/cors.go b/vendor/github.com/labstack/echo/v4/middleware/cors.go index 7af6a76f..a1f44532 100644 --- a/vendor/github.com/labstack/echo/v4/middleware/cors.go +++ b/vendor/github.com/labstack/echo/v4/middleware/cors.go @@ -147,13 +147,25 @@ func CORSWithConfig(config CORSConfig) echo.MiddlewareFunc { config.AllowMethods = DefaultCORSConfig.AllowMethods } - allowOriginPatterns := []string{} + allowOriginPatterns := make([]*regexp.Regexp, 0, len(config.AllowOrigins)) for _, origin := range config.AllowOrigins { + if origin == "*" { + continue // "*" is handled differently and does not need regexp + } pattern := regexp.QuoteMeta(origin) pattern = strings.ReplaceAll(pattern, "\\*", ".*") pattern = strings.ReplaceAll(pattern, "\\?", ".") pattern = "^" + pattern + "$" - allowOriginPatterns = append(allowOriginPatterns, pattern) + + re, err := regexp.Compile(pattern) + if err != nil { + // this is to preserve previous behaviour - invalid patterns were just ignored. + // If we would turn this to panic, users with invalid patterns + // would have applications crashing in production due unrecovered panic. + // TODO: this should be turned to error/panic in `v5` + continue + } + allowOriginPatterns = append(allowOriginPatterns, re) } allowMethods := strings.Join(config.AllowMethods, ",") @@ -239,7 +251,7 @@ func CORSWithConfig(config CORSConfig) echo.MiddlewareFunc { } if checkPatterns { for _, re := range allowOriginPatterns { - if match, _ := regexp.MatchString(re, origin); match { + if match := re.MatchString(origin); match { allowOrigin = origin break } diff --git a/vendor/github.com/labstack/echo/v4/middleware/csrf.go b/vendor/github.com/labstack/echo/v4/middleware/csrf.go index 92f4019d..1a35da63 100644 --- a/vendor/github.com/labstack/echo/v4/middleware/csrf.go +++ b/vendor/github.com/labstack/echo/v4/middleware/csrf.go @@ -6,16 +6,43 @@ package middleware import ( "crypto/subtle" "net/http" + "slices" + "strings" "time" "github.com/labstack/echo/v4" ) +// CSRFUsingSecFetchSite is a context key for CSRF middleware what is set when the client browser is using Sec-Fetch-Site +// header and the request is deemed safe. +// It is a dummy token value that can be used to render CSRF token for form by handlers. +// +// We know that the client is using a browser that supports Sec-Fetch-Site header, so when the form is submitted in +// the future with this dummy token value it is OK. Although the request is safe, the template rendered by the +// handler may need this value to render CSRF token for form. +const CSRFUsingSecFetchSite = "_echo_csrf_using_sec_fetch_site_" + // CSRFConfig defines the config for CSRF middleware. type CSRFConfig struct { // Skipper defines a function to skip middleware. Skipper Skipper + // TrustedOrigin permits any request with `Sec-Fetch-Site` header whose `Origin` header + // exactly matches the specified value. + // Values should be formated as Origin header "scheme://host[:port]". + // + // See [Origin]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin + // See [Sec-Fetch-Site]: https://cheatsheetseries.owasp.org/cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#fetch-metadata-headers + TrustedOrigins []string + + // AllowSecFetchSameSite allows custom behaviour for `Sec-Fetch-Site` requests that are about to + // fail with CRSF error, to be allowed or replaced with custom error. + // This function applies to `Sec-Fetch-Site` values: + // - `same-site` same registrable domain (subdomain and/or different port) + // - `cross-site` request originates from different site + // See [Sec-Fetch-Site]: https://cheatsheetseries.owasp.org/cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#fetch-metadata-headers + AllowSecFetchSiteFunc func(c echo.Context) (bool, error) + // TokenLength is the length of the generated token. TokenLength uint8 `yaml:"token_length"` // Optional. Default value 32. @@ -65,6 +92,8 @@ type CSRFConfig struct { // ErrorHandler defines a function which is executed for returning custom errors. ErrorHandler CSRFErrorHandler + + generator func(length uint8) string } // CSRFErrorHandler is a function which is executed for creating custom errors. @@ -94,7 +123,11 @@ func CSRF() echo.MiddlewareFunc { // CSRFWithConfig returns a CSRF middleware with config. // See `CSRF()`. func CSRFWithConfig(config CSRFConfig) echo.MiddlewareFunc { - // Defaults + return toMiddlewareOrPanic(config) +} + +// ToMiddleware converts CSRFConfig to middleware or returns an error for invalid configuration +func (config CSRFConfig) ToMiddleware() (echo.MiddlewareFunc, error) { if config.Skipper == nil { config.Skipper = DefaultCSRFConfig.Skipper } @@ -117,10 +150,20 @@ func CSRFWithConfig(config CSRFConfig) echo.MiddlewareFunc { if config.CookieSameSite == http.SameSiteNoneMode { config.CookieSecure = true } + if len(config.TrustedOrigins) > 0 { + if vErr := validateOrigins(config.TrustedOrigins, "trusted origin"); vErr != nil { + return nil, vErr + } + config.TrustedOrigins = append([]string(nil), config.TrustedOrigins...) + } + tokenGenerator := randomString + if config.generator != nil { + tokenGenerator = config.generator + } extractors, cErr := CreateExtractors(config.TokenLookup) if cErr != nil { - panic(cErr) + return nil, cErr } return func(next echo.HandlerFunc) echo.HandlerFunc { @@ -129,9 +172,20 @@ func CSRFWithConfig(config CSRFConfig) echo.MiddlewareFunc { return next(c) } + // use the `Sec-Fetch-Site` header as part of a modern approach to CSRF protection + allow, err := config.checkSecFetchSiteRequest(c) + if err != nil { + return err + } + if allow { + return next(c) + } + + // Fallback to legacy token based CSRF protection + token := "" if k, err := c.Cookie(config.CookieName); err != nil { - token = randomString(config.TokenLength) + token = tokenGenerator(config.TokenLength) } else { token = k.Value // Reuse token } @@ -210,9 +264,60 @@ func CSRFWithConfig(config CSRFConfig) echo.MiddlewareFunc { return next(c) } - } + }, nil } func validateCSRFToken(token, clientToken string) bool { return subtle.ConstantTimeCompare([]byte(token), []byte(clientToken)) == 1 } + +var safeMethods = []string{http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodTrace} + +func (config CSRFConfig) checkSecFetchSiteRequest(c echo.Context) (bool, error) { + // https://cheatsheetseries.owasp.org/cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#fetch-metadata-headers + // Sec-Fetch-Site values are: + // - `same-origin` exact origin match - allow always + // - `same-site` same registrable domain (subdomain and/or different port) - block, unless explicitly trusted + // - `cross-site` request originates from different site - block, unless explicitly trusted + // - `none` direct navigation (URL bar, bookmark) - allow always + secFetchSite := c.Request().Header.Get(echo.HeaderSecFetchSite) + if secFetchSite == "" { + return false, nil + } + + if len(config.TrustedOrigins) > 0 { + // trusted sites ala OAuth callbacks etc. should be let through + origin := c.Request().Header.Get(echo.HeaderOrigin) + if origin != "" { + for _, trustedOrigin := range config.TrustedOrigins { + if strings.EqualFold(origin, trustedOrigin) { + return true, nil + } + } + } + } + isSafe := slices.Contains(safeMethods, c.Request().Method) + if !isSafe { // for state-changing request check SecFetchSite value + isSafe = secFetchSite == "same-origin" || secFetchSite == "none" + } + + if isSafe { + // This helps handlers that support older token-based CSRF protection. + // We know that the client is using a browser that supports Sec-Fetch-Site header, so when the form is submitted in + // the future with this dummy token value it is OK. Although the request is safe, the template rendered by the + // handler may need this value to render CSRF token for form. + c.Set(config.ContextKey, CSRFUsingSecFetchSite) + return true, nil + } + // we are here when request is state-changing and `cross-site` or `same-site` + + // Note: if you want to block `same-site` use config.TrustedOrigins or `config.AllowSecFetchSiteFunc` + if config.AllowSecFetchSiteFunc != nil { + return config.AllowSecFetchSiteFunc(c) + } + + if secFetchSite == "same-site" { + return false, nil // fall back to legacy token + } + return false, echo.NewHTTPError(http.StatusForbidden, "cross-site request blocked by CSRF") +} diff --git a/vendor/github.com/labstack/echo/v4/middleware/jwt.go b/vendor/github.com/labstack/echo/v4/middleware/jwt.go deleted file mode 100644 index a6bf16f9..00000000 --- a/vendor/github.com/labstack/echo/v4/middleware/jwt.go +++ /dev/null @@ -1,303 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: © 2015 LabStack LLC and Echo contributors - -//go:build go1.15 -// +build go1.15 - -package middleware - -import ( - "errors" - "fmt" - "github.com/golang-jwt/jwt" - "github.com/labstack/echo/v4" - "net/http" - "reflect" -) - -// JWTConfig defines the config for JWT middleware. -type JWTConfig struct { - // Skipper defines a function to skip middleware. - Skipper Skipper - - // BeforeFunc defines a function which is executed just before the middleware. - BeforeFunc BeforeFunc - - // SuccessHandler defines a function which is executed for a valid token before middleware chain continues with next - // middleware or handler. - SuccessHandler JWTSuccessHandler - - // ErrorHandler defines a function which is executed for an invalid token. - // It may be used to define a custom JWT error. - ErrorHandler JWTErrorHandler - - // ErrorHandlerWithContext is almost identical to ErrorHandler, but it's passed the current context. - ErrorHandlerWithContext JWTErrorHandlerWithContext - - // ContinueOnIgnoredError allows the next middleware/handler to be called when ErrorHandlerWithContext decides to - // ignore the error (by returning `nil`). - // This is useful when parts of your site/api allow public access and some authorized routes provide extra functionality. - // In that case you can use ErrorHandlerWithContext to set a default public JWT token value in the request context - // and continue. Some logic down the remaining execution chain needs to check that (public) token value then. - ContinueOnIgnoredError bool - - // Signing key to validate token. - // This is one of the three options to provide a token validation key. - // The order of precedence is a user-defined KeyFunc, SigningKeys and SigningKey. - // Required if neither user-defined KeyFunc nor SigningKeys is provided. - SigningKey interface{} - - // Map of signing keys to validate token with kid field usage. - // This is one of the three options to provide a token validation key. - // The order of precedence is a user-defined KeyFunc, SigningKeys and SigningKey. - // Required if neither user-defined KeyFunc nor SigningKey is provided. - SigningKeys map[string]interface{} - - // Signing method used to check the token's signing algorithm. - // Optional. Default value HS256. - SigningMethod string - - // Context key to store user information from the token into context. - // Optional. Default value "user". - ContextKey string - - // Claims are extendable claims data defining token content. Used by default ParseTokenFunc implementation. - // Not used if custom ParseTokenFunc is set. - // Optional. Default value jwt.MapClaims - Claims jwt.Claims - - // TokenLookup is a string in the form of ":" or ":,:" that is used - // to extract token from the request. - // Optional. Default value "header:Authorization". - // Possible values: - // - "header:" or "header::" - // `` is argument value to cut/trim prefix of the extracted value. This is useful if header - // value has static prefix like `Authorization: ` where part that we - // want to cut is ` ` note the space at the end. - // In case of JWT tokens `Authorization: Bearer ` prefix we cut is `Bearer `. - // If prefix is left empty the whole value is returned. - // - "query:" - // - "param:" - // - "cookie:" - // - "form:" - // Multiple sources example: - // - "header:Authorization,cookie:myowncookie" - TokenLookup string - - // TokenLookupFuncs defines a list of user-defined functions that extract JWT token from the given context. - // This is one of the two options to provide a token extractor. - // The order of precedence is user-defined TokenLookupFuncs, and TokenLookup. - // You can also provide both if you want. - TokenLookupFuncs []ValuesExtractor - - // AuthScheme to be used in the Authorization header. - // Optional. Default value "Bearer". - AuthScheme string - - // KeyFunc defines a user-defined function that supplies the public key for a token validation. - // The function shall take care of verifying the signing algorithm and selecting the proper key. - // A user-defined KeyFunc can be useful if tokens are issued by an external party. - // Used by default ParseTokenFunc implementation. - // - // When a user-defined KeyFunc is provided, SigningKey, SigningKeys, and SigningMethod are ignored. - // This is one of the three options to provide a token validation key. - // The order of precedence is a user-defined KeyFunc, SigningKeys and SigningKey. - // Required if neither SigningKeys nor SigningKey is provided. - // Not used if custom ParseTokenFunc is set. - // Default to an internal implementation verifying the signing algorithm and selecting the proper key. - KeyFunc jwt.Keyfunc - - // ParseTokenFunc defines a user-defined function that parses token from given auth. Returns an error when token - // parsing fails or parsed token is invalid. - // Defaults to implementation using `github.com/golang-jwt/jwt` as JWT implementation library - ParseTokenFunc func(auth string, c echo.Context) (interface{}, error) -} - -// JWTSuccessHandler defines a function which is executed for a valid token. -type JWTSuccessHandler func(c echo.Context) - -// JWTErrorHandler defines a function which is executed for an invalid token. -type JWTErrorHandler func(err error) error - -// JWTErrorHandlerWithContext is almost identical to JWTErrorHandler, but it's passed the current context. -type JWTErrorHandlerWithContext func(err error, c echo.Context) error - -// Algorithms -const ( - AlgorithmHS256 = "HS256" -) - -// ErrJWTMissing is error that is returned when no JWToken was extracted from the request. -var ErrJWTMissing = echo.NewHTTPError(http.StatusBadRequest, "missing or malformed jwt") - -// ErrJWTInvalid is error that is returned when middleware could not parse JWT correctly. -var ErrJWTInvalid = echo.NewHTTPError(http.StatusUnauthorized, "invalid or expired jwt") - -// DefaultJWTConfig is the default JWT auth middleware config. -var DefaultJWTConfig = JWTConfig{ - Skipper: DefaultSkipper, - SigningMethod: AlgorithmHS256, - ContextKey: "user", - TokenLookup: "header:" + echo.HeaderAuthorization, - TokenLookupFuncs: nil, - AuthScheme: "Bearer", - Claims: jwt.MapClaims{}, - KeyFunc: nil, -} - -// JWT returns a JSON Web Token (JWT) auth middleware. -// -// For valid token, it sets the user in context and calls next handler. -// For invalid token, it returns "401 - Unauthorized" error. -// For missing token, it returns "400 - Bad Request" error. -// -// See: https://jwt.io/introduction -// See `JWTConfig.TokenLookup` -// -// Deprecated: Please use https://github.com/labstack/echo-jwt instead -func JWT(key interface{}) echo.MiddlewareFunc { - c := DefaultJWTConfig - c.SigningKey = key - return JWTWithConfig(c) -} - -// JWTWithConfig returns a JWT auth middleware with config. -// See: `JWT()`. -// -// Deprecated: Please use https://github.com/labstack/echo-jwt instead -func JWTWithConfig(config JWTConfig) echo.MiddlewareFunc { - // Defaults - if config.Skipper == nil { - config.Skipper = DefaultJWTConfig.Skipper - } - if config.SigningKey == nil && len(config.SigningKeys) == 0 && config.KeyFunc == nil && config.ParseTokenFunc == nil { - panic("echo: jwt middleware requires signing key") - } - if config.SigningMethod == "" { - config.SigningMethod = DefaultJWTConfig.SigningMethod - } - if config.ContextKey == "" { - config.ContextKey = DefaultJWTConfig.ContextKey - } - if config.Claims == nil { - config.Claims = DefaultJWTConfig.Claims - } - if config.TokenLookup == "" && len(config.TokenLookupFuncs) == 0 { - config.TokenLookup = DefaultJWTConfig.TokenLookup - } - if config.AuthScheme == "" { - config.AuthScheme = DefaultJWTConfig.AuthScheme - } - if config.KeyFunc == nil { - config.KeyFunc = config.defaultKeyFunc - } - if config.ParseTokenFunc == nil { - config.ParseTokenFunc = config.defaultParseToken - } - - extractors, cErr := createExtractors(config.TokenLookup, config.AuthScheme) - if cErr != nil { - panic(cErr) - } - if len(config.TokenLookupFuncs) > 0 { - extractors = append(config.TokenLookupFuncs, extractors...) - } - - return func(next echo.HandlerFunc) echo.HandlerFunc { - return func(c echo.Context) error { - if config.Skipper(c) { - return next(c) - } - - if config.BeforeFunc != nil { - config.BeforeFunc(c) - } - - var lastExtractorErr error - var lastTokenErr error - for _, extractor := range extractors { - auths, err := extractor(c) - if err != nil { - lastExtractorErr = ErrJWTMissing // backwards compatibility: all extraction errors are same (unlike KeyAuth) - continue - } - for _, auth := range auths { - token, err := config.ParseTokenFunc(auth, c) - if err != nil { - lastTokenErr = err - continue - } - // Store user information from token into context. - c.Set(config.ContextKey, token) - if config.SuccessHandler != nil { - config.SuccessHandler(c) - } - return next(c) - } - } - // we are here only when we did not successfully extract or parse any of the tokens - err := lastTokenErr - if err == nil { // prioritize token errors over extracting errors - err = lastExtractorErr - } - if config.ErrorHandler != nil { - return config.ErrorHandler(err) - } - if config.ErrorHandlerWithContext != nil { - tmpErr := config.ErrorHandlerWithContext(err, c) - if config.ContinueOnIgnoredError && tmpErr == nil { - return next(c) - } - return tmpErr - } - - // backwards compatible errors codes - if lastTokenErr != nil { - return &echo.HTTPError{ - Code: ErrJWTInvalid.Code, - Message: ErrJWTInvalid.Message, - Internal: err, - } - } - return err // this is lastExtractorErr value - } - } -} - -func (config *JWTConfig) defaultParseToken(auth string, c echo.Context) (interface{}, error) { - var token *jwt.Token - var err error - // Issue #647, #656 - if _, ok := config.Claims.(jwt.MapClaims); ok { - token, err = jwt.Parse(auth, config.KeyFunc) - } else { - t := reflect.ValueOf(config.Claims).Type().Elem() - claims := reflect.New(t).Interface().(jwt.Claims) - token, err = jwt.ParseWithClaims(auth, claims, config.KeyFunc) - } - if err != nil { - return nil, err - } - if !token.Valid { - return nil, errors.New("invalid token") - } - return token, nil -} - -// defaultKeyFunc returns a signing key of the given token. -func (config *JWTConfig) defaultKeyFunc(t *jwt.Token) (interface{}, error) { - // Check the signing method - if t.Method.Alg() != config.SigningMethod { - return nil, fmt.Errorf("unexpected jwt signing method=%v", t.Header["alg"]) - } - if len(config.SigningKeys) > 0 { - if kid, ok := t.Header["kid"].(string); ok { - if key, ok := config.SigningKeys[kid]; ok { - return key, nil - } - } - return nil, fmt.Errorf("unexpected jwt key id=%v", t.Header["kid"]) - } - - return config.SigningKey, nil -} diff --git a/vendor/github.com/labstack/echo/v4/middleware/logger.go b/vendor/github.com/labstack/echo/v4/middleware/logger.go index 910fce8c..59020955 100644 --- a/vendor/github.com/labstack/echo/v4/middleware/logger.go +++ b/vendor/github.com/labstack/echo/v4/middleware/logger.go @@ -5,7 +5,6 @@ package middleware import ( "bytes" - "encoding/json" "io" "strconv" "strings" @@ -18,60 +17,186 @@ import ( ) // LoggerConfig defines the config for Logger middleware. +// +// # Configuration Examples +// +// ## Basic Usage with Default Settings +// +// e.Use(middleware.Logger()) +// +// This uses the default JSON format that logs all common request/response details. +// +// ## Custom Simple Format +// +// e.Use(middleware.LoggerWithConfig(middleware.LoggerConfig{ +// Format: "${time_rfc3339_nano} ${status} ${method} ${uri} ${latency_human}\n", +// })) +// +// ## JSON Format with Custom Fields +// +// e.Use(middleware.LoggerWithConfig(middleware.LoggerConfig{ +// Format: `{"timestamp":"${time_rfc3339_nano}","level":"info","remote_ip":"${remote_ip}",` + +// `"method":"${method}","uri":"${uri}","status":${status},"latency":"${latency_human}",` + +// `"user_agent":"${user_agent}","error":"${error}"}` + "\n", +// })) +// +// ## Custom Time Format +// +// e.Use(middleware.LoggerWithConfig(middleware.LoggerConfig{ +// Format: "${time_custom} ${method} ${uri} ${status}\n", +// CustomTimeFormat: "2006-01-02 15:04:05", +// })) +// +// ## Logging Headers and Parameters +// +// e.Use(middleware.LoggerWithConfig(middleware.LoggerConfig{ +// Format: `{"time":"${time_rfc3339_nano}","method":"${method}","uri":"${uri}",` + +// `"status":${status},"auth":"${header:Authorization}","user":"${query:user}",` + +// `"form_data":"${form:action}","session":"${cookie:session_id}"}` + "\n", +// })) +// +// ## Custom Output (File Logging) +// +// file, err := os.OpenFile("app.log", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) +// if err != nil { +// log.Fatal(err) +// } +// defer file.Close() +// +// e.Use(middleware.LoggerWithConfig(middleware.LoggerConfig{ +// Output: file, +// })) +// +// ## Custom Tag Function +// +// e.Use(middleware.LoggerWithConfig(middleware.LoggerConfig{ +// Format: `{"time":"${time_rfc3339_nano}","user_id":"${custom}","method":"${method}"}` + "\n", +// CustomTagFunc: func(c echo.Context, buf *bytes.Buffer) (int, error) { +// userID := getUserIDFromContext(c) // Your custom logic +// return buf.WriteString(strconv.Itoa(userID)) +// }, +// })) +// +// ## Conditional Logging (Skip Certain Requests) +// +// e.Use(middleware.LoggerWithConfig(middleware.LoggerConfig{ +// Skipper: func(c echo.Context) bool { +// // Skip logging for health check endpoints +// return c.Request().URL.Path == "/health" || c.Request().URL.Path == "/metrics" +// }, +// })) +// +// ## Integration with External Logging Service +// +// logBuffer := &SyncBuffer{} // Thread-safe buffer for external service +// +// e.Use(middleware.LoggerWithConfig(middleware.LoggerConfig{ +// Format: `{"timestamp":"${time_rfc3339_nano}","service":"my-api","level":"info",` + +// `"method":"${method}","uri":"${uri}","status":${status},"latency_ms":${latency},` + +// `"remote_ip":"${remote_ip}","user_agent":"${user_agent}","error":"${error}"}` + "\n", +// Output: logBuffer, +// })) +// +// # Available Tags +// +// ## Time Tags +// - time_unix: Unix timestamp (seconds) +// - time_unix_milli: Unix timestamp (milliseconds) +// - time_unix_micro: Unix timestamp (microseconds) +// - time_unix_nano: Unix timestamp (nanoseconds) +// - time_rfc3339: RFC3339 format (2006-01-02T15:04:05Z07:00) +// - time_rfc3339_nano: RFC3339 with nanoseconds +// - time_custom: Uses CustomTimeFormat field +// +// ## Request Information +// - id: Request ID from X-Request-ID header +// - remote_ip: Client IP address (respects proxy headers) +// - uri: Full request URI with query parameters +// - host: Host header value +// - method: HTTP method (GET, POST, etc.) +// - path: URL path without query parameters +// - route: Echo route pattern (e.g., /users/:id) +// - protocol: HTTP protocol version +// - referer: Referer header value +// - user_agent: User-Agent header value +// +// ## Response Information +// - status: HTTP status code +// - error: Error message if request failed +// - latency: Request processing time in nanoseconds +// - latency_human: Human-readable processing time +// - bytes_in: Request body size in bytes +// - bytes_out: Response body size in bytes +// +// ## Dynamic Tags +// - header:: Value of specific header (e.g., header:Authorization) +// - query:: Value of specific query parameter (e.g., query:user_id) +// - form:: Value of specific form field (e.g., form:username) +// - cookie:: Value of specific cookie (e.g., cookie:session_id) +// - custom: Output from CustomTagFunc +// +// # Troubleshooting +// +// ## Common Issues +// +// 1. **Missing logs**: Check if Skipper function is filtering out requests +// 2. **Invalid JSON**: Ensure CustomTagFunc outputs valid JSON content +// 3. **Performance issues**: Consider using a buffered writer for high-traffic applications +// 4. **File permission errors**: Ensure write permissions when logging to files +// +// ## Performance Tips +// +// - Use time_unix formats for better performance than time_rfc3339 +// - Minimize the number of dynamic tags (header:, query:, form:, cookie:) +// - Use Skipper to exclude high-frequency, low-value requests (health checks, etc.) +// - Consider async logging for very high-traffic applications type LoggerConfig struct { // Skipper defines a function to skip middleware. + // Use this to exclude certain requests from logging (e.g., health checks). + // + // Example: + // Skipper: func(c echo.Context) bool { + // return c.Request().URL.Path == "/health" + // }, Skipper Skipper - // Tags to construct the logger format. + // Format defines the logging format using template tags. + // Tags are enclosed in ${} and replaced with actual values. + // See the detailed tag documentation above for all available options. // - // - time_unix - // - time_unix_milli - // - time_unix_micro - // - time_unix_nano - // - time_rfc3339 - // - time_rfc3339_nano - // - time_custom - // - id (Request ID) - // - remote_ip - // - uri - // - host - // - method - // - path - // - route - // - protocol - // - referer - // - user_agent - // - status - // - error - // - latency (In nanoseconds) - // - latency_human (Human readable) - // - bytes_in (Bytes received) - // - bytes_out (Bytes sent) - // - header: - // - query: - // - form: - // - custom (see CustomTagFunc field) - // - // Example "${remote_ip} ${status}" - // - // Optional. Default value DefaultLoggerConfig.Format. + // Default: JSON format with common fields + // Example: "${time_rfc3339_nano} ${status} ${method} ${uri} ${latency_human}\n" Format string `yaml:"format"` - // Optional. Default value DefaultLoggerConfig.CustomTimeFormat. + // CustomTimeFormat specifies the time format used by ${time_custom} tag. + // Uses Go's reference time: Mon Jan 2 15:04:05 MST 2006 + // + // Default: "2006-01-02 15:04:05.00000" + // Example: "2006-01-02 15:04:05" or "15:04:05.000" CustomTimeFormat string `yaml:"custom_time_format"` - // CustomTagFunc is function called for `${custom}` tag to output user implemented text by writing it to buf. - // Make sure that outputted text creates valid JSON string with other logged tags. - // Optional. + // CustomTagFunc is called when ${custom} tag is encountered. + // Use this to add application-specific information to logs. + // The function should write valid content for your log format. + // + // Example: + // CustomTagFunc: func(c echo.Context, buf *bytes.Buffer) (int, error) { + // userID := getUserFromContext(c) + // return buf.WriteString(`"user_id":"` + userID + `"`) + // }, CustomTagFunc func(c echo.Context, buf *bytes.Buffer) (int, error) - // Output is a writer where logs in JSON format are written. - // Optional. Default value os.Stdout. + // Output specifies where logs are written. + // Can be any io.Writer: files, buffers, network connections, etc. + // + // Default: os.Stdout + // Example: Custom file, syslog, or external logging service Output io.Writer template *fasttemplate.Template colorer *color.Color pool *sync.Pool + timeNow func() time.Time } // DefaultLoggerConfig is the default Logger middleware config. @@ -83,15 +208,62 @@ var DefaultLoggerConfig = LoggerConfig{ `,"bytes_in":${bytes_in},"bytes_out":${bytes_out}}` + "\n", CustomTimeFormat: "2006-01-02 15:04:05.00000", colorer: color.New(), + timeNow: time.Now, } -// Logger returns a middleware that logs HTTP requests. +// Logger returns a middleware that logs HTTP requests using the default configuration. +// +// The default format logs requests as JSON with the following fields: +// - time: RFC3339 nano timestamp +// - id: Request ID from X-Request-ID header +// - remote_ip: Client IP address +// - host: Host header +// - method: HTTP method +// - uri: Request URI +// - user_agent: User-Agent header +// - status: HTTP status code +// - error: Error message (if any) +// - latency: Processing time in nanoseconds +// - latency_human: Human-readable processing time +// - bytes_in: Request body size +// - bytes_out: Response body size +// +// Example output: +// +// {"time":"2023-01-15T10:30:45.123456789Z","id":"","remote_ip":"127.0.0.1", +// "host":"localhost:8080","method":"GET","uri":"/users/123","user_agent":"curl/7.81.0", +// "status":200,"error":"","latency":1234567,"latency_human":"1.234567ms", +// "bytes_in":0,"bytes_out":42} +// +// For custom configurations, use LoggerWithConfig instead. +// +// Deprecated: please use middleware.RequestLogger or middleware.RequestLoggerWithConfig instead. func Logger() echo.MiddlewareFunc { return LoggerWithConfig(DefaultLoggerConfig) } -// LoggerWithConfig returns a Logger middleware with config. -// See: `Logger()`. +// LoggerWithConfig returns a Logger middleware with custom configuration. +// +// This function allows you to customize all aspects of request logging including: +// - Log format and fields +// - Output destination +// - Time formatting +// - Custom tags and logic +// - Request filtering +// +// See LoggerConfig documentation for detailed configuration examples and options. +// +// Example: +// +// e.Use(middleware.LoggerWithConfig(middleware.LoggerConfig{ +// Format: "${time_rfc3339} ${status} ${method} ${uri} ${latency_human}\n", +// Output: customLogWriter, +// Skipper: func(c echo.Context) bool { +// return c.Request().URL.Path == "/health" +// }, +// })) +// +// Deprecated: please use middleware.RequestLoggerWithConfig instead. func LoggerWithConfig(config LoggerConfig) echo.MiddlewareFunc { // Defaults if config.Skipper == nil { @@ -100,9 +272,18 @@ func LoggerWithConfig(config LoggerConfig) echo.MiddlewareFunc { if config.Format == "" { config.Format = DefaultLoggerConfig.Format } + writeString := func(buf *bytes.Buffer, in string) (int, error) { return buf.WriteString(in) } + if config.Format[0] == '{' { // format looks like JSON, so we need to escape invalid characters + writeString = writeJSONSafeString + } + if config.Output == nil { config.Output = DefaultLoggerConfig.Output } + timeNow := DefaultLoggerConfig.timeNow + if config.timeNow != nil { + timeNow = config.timeNow + } config.template = fasttemplate.New(config.Format, "${", "}") config.colorer = color.New() @@ -138,49 +319,47 @@ func LoggerWithConfig(config LoggerConfig) echo.MiddlewareFunc { } return config.CustomTagFunc(c, buf) case "time_unix": - return buf.WriteString(strconv.FormatInt(time.Now().Unix(), 10)) + return buf.WriteString(strconv.FormatInt(timeNow().Unix(), 10)) case "time_unix_milli": - // go 1.17 or later, it supports time#UnixMilli() - return buf.WriteString(strconv.FormatInt(time.Now().UnixNano()/1000000, 10)) + return buf.WriteString(strconv.FormatInt(timeNow().UnixMilli(), 10)) case "time_unix_micro": - // go 1.17 or later, it supports time#UnixMicro() - return buf.WriteString(strconv.FormatInt(time.Now().UnixNano()/1000, 10)) + return buf.WriteString(strconv.FormatInt(timeNow().UnixMicro(), 10)) case "time_unix_nano": - return buf.WriteString(strconv.FormatInt(time.Now().UnixNano(), 10)) + return buf.WriteString(strconv.FormatInt(timeNow().UnixNano(), 10)) case "time_rfc3339": - return buf.WriteString(time.Now().Format(time.RFC3339)) + return buf.WriteString(timeNow().Format(time.RFC3339)) case "time_rfc3339_nano": - return buf.WriteString(time.Now().Format(time.RFC3339Nano)) + return buf.WriteString(timeNow().Format(time.RFC3339Nano)) case "time_custom": - return buf.WriteString(time.Now().Format(config.CustomTimeFormat)) + return buf.WriteString(timeNow().Format(config.CustomTimeFormat)) case "id": id := req.Header.Get(echo.HeaderXRequestID) if id == "" { id = res.Header().Get(echo.HeaderXRequestID) } - return buf.WriteString(id) + return writeString(buf, id) case "remote_ip": - return buf.WriteString(c.RealIP()) + return writeString(buf, c.RealIP()) case "host": - return buf.WriteString(req.Host) + return writeString(buf, req.Host) case "uri": - return buf.WriteString(req.RequestURI) + return writeString(buf, req.RequestURI) case "method": - return buf.WriteString(req.Method) + return writeString(buf, req.Method) case "path": p := req.URL.Path if p == "" { p = "/" } - return buf.WriteString(p) + return writeString(buf, p) case "route": - return buf.WriteString(c.Path()) + return writeString(buf, c.Path()) case "protocol": - return buf.WriteString(req.Proto) + return writeString(buf, req.Proto) case "referer": - return buf.WriteString(req.Referer()) + return writeString(buf, req.Referer()) case "user_agent": - return buf.WriteString(req.UserAgent()) + return writeString(buf, req.UserAgent()) case "status": n := res.Status s := config.colorer.Green(n) @@ -195,10 +374,7 @@ func LoggerWithConfig(config LoggerConfig) echo.MiddlewareFunc { return buf.WriteString(s) case "error": if err != nil { - // Error may contain invalid JSON e.g. `"` - b, _ := json.Marshal(err.Error()) - b = b[1 : len(b)-1] - return buf.Write(b) + return writeJSONSafeString(buf, err.Error()) } case "latency": l := stop.Sub(start) @@ -210,17 +386,17 @@ func LoggerWithConfig(config LoggerConfig) echo.MiddlewareFunc { if cl == "" { cl = "0" } - return buf.WriteString(cl) + return writeString(buf, cl) case "bytes_out": return buf.WriteString(strconv.FormatInt(res.Size, 10)) default: switch { case strings.HasPrefix(tag, "header:"): - return buf.Write([]byte(c.Request().Header.Get(tag[7:]))) + return writeString(buf, c.Request().Header.Get(tag[7:])) case strings.HasPrefix(tag, "query:"): - return buf.Write([]byte(c.QueryParam(tag[6:]))) + return writeString(buf, c.QueryParam(tag[6:])) case strings.HasPrefix(tag, "form:"): - return buf.Write([]byte(c.FormValue(tag[5:]))) + return writeString(buf, c.FormValue(tag[5:])) case strings.HasPrefix(tag, "cookie:"): cookie, err := c.Cookie(tag[7:]) if err == nil { diff --git a/vendor/github.com/labstack/echo/v4/middleware/logger_strings.go b/vendor/github.com/labstack/echo/v4/middleware/logger_strings.go new file mode 100644 index 00000000..8476cb04 --- /dev/null +++ b/vendor/github.com/labstack/echo/v4/middleware/logger_strings.go @@ -0,0 +1,242 @@ +// SPDX-License-Identifier: BSD-3-Clause +// SPDX-FileCopyrightText: Copyright 2010 The Go Authors +// +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// +// Go LICENSE https://raw.githubusercontent.com/golang/go/36bca3166e18db52687a4d91ead3f98ffe6d00b8/LICENSE +/** +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +package middleware + +import ( + "bytes" + "unicode/utf8" +) + +// This function is modified copy from Go standard library encoding/json/encode.go `appendString` function +// Source: https://github.com/golang/go/blob/36bca3166e18db52687a4d91ead3f98ffe6d00b8/src/encoding/json/encode.go#L999 +func writeJSONSafeString(buf *bytes.Buffer, src string) (int, error) { + const hex = "0123456789abcdef" + + written := 0 + start := 0 + for i := 0; i < len(src); { + if b := src[i]; b < utf8.RuneSelf { + if safeSet[b] { + i++ + continue + } + + n, err := buf.Write([]byte(src[start:i])) + written += n + if err != nil { + return written, err + } + switch b { + case '\\', '"': + n, err := buf.Write([]byte{'\\', b}) + written += n + if err != nil { + return written, err + } + case '\b': + n, err := buf.Write([]byte{'\\', 'b'}) + written += n + if err != nil { + return n, err + } + case '\f': + n, err := buf.Write([]byte{'\\', 'f'}) + written += n + if err != nil { + return written, err + } + case '\n': + n, err := buf.Write([]byte{'\\', 'n'}) + written += n + if err != nil { + return written, err + } + case '\r': + n, err := buf.Write([]byte{'\\', 'r'}) + written += n + if err != nil { + return written, err + } + case '\t': + n, err := buf.Write([]byte{'\\', 't'}) + written += n + if err != nil { + return written, err + } + default: + // This encodes bytes < 0x20 except for \b, \f, \n, \r and \t. + n, err := buf.Write([]byte{'\\', 'u', '0', '0', hex[b>>4], hex[b&0xF]}) + written += n + if err != nil { + return written, err + } + } + i++ + start = i + continue + } + srcN := min(len(src)-i, utf8.UTFMax) + c, size := utf8.DecodeRuneInString(src[i : i+srcN]) + if c == utf8.RuneError && size == 1 { + n, err := buf.Write([]byte(src[start:i])) + written += n + if err != nil { + return written, err + } + n, err = buf.Write([]byte(`\ufffd`)) + written += n + if err != nil { + return written, err + } + i += size + start = i + continue + } + i += size + } + n, err := buf.Write([]byte(src[start:])) + written += n + return written, err +} + +// safeSet holds the value true if the ASCII character with the given array +// position can be represented inside a JSON string without any further +// escaping. +// +// All values are true except for the ASCII control characters (0-31), the +// double quote ("), and the backslash character ("\"). +var safeSet = [utf8.RuneSelf]bool{ + ' ': true, + '!': true, + '"': false, + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '(': true, + ')': true, + '*': true, + '+': true, + ',': true, + '-': true, + '.': true, + '/': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + ':': true, + ';': true, + '<': true, + '=': true, + '>': true, + '?': true, + '@': true, + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'V': true, + 'W': true, + 'X': true, + 'Y': true, + 'Z': true, + '[': true, + '\\': false, + ']': true, + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + '{': true, + '|': true, + '}': true, + '~': true, + '\u007f': true, +} diff --git a/vendor/github.com/labstack/echo/v4/middleware/middleware.go b/vendor/github.com/labstack/echo/v4/middleware/middleware.go index 6f33cc5c..164e52b4 100644 --- a/vendor/github.com/labstack/echo/v4/middleware/middleware.go +++ b/vendor/github.com/labstack/echo/v4/middleware/middleware.go @@ -88,3 +88,13 @@ func rewriteURL(rewriteRegex map[*regexp.Regexp]string, req *http.Request) error func DefaultSkipper(echo.Context) bool { return false } + +func toMiddlewareOrPanic(config interface { + ToMiddleware() (echo.MiddlewareFunc, error) +}) echo.MiddlewareFunc { + mw, err := config.ToMiddleware() + if err != nil { + panic(err) + } + return mw +} diff --git a/vendor/github.com/labstack/echo/v4/middleware/proxy.go b/vendor/github.com/labstack/echo/v4/middleware/proxy.go index 495970ac..f2687007 100644 --- a/vendor/github.com/labstack/echo/v4/middleware/proxy.go +++ b/vendor/github.com/labstack/echo/v4/middleware/proxy.go @@ -5,6 +5,7 @@ package middleware import ( "context" + "crypto/tls" "fmt" "io" "math/rand" @@ -130,7 +131,21 @@ var DefaultProxyConfig = ProxyConfig{ ContextKey: "target", } -func proxyRaw(t *ProxyTarget, c echo.Context) http.Handler { +func proxyRaw(t *ProxyTarget, c echo.Context, config ProxyConfig) http.Handler { + var dialFunc func(ctx context.Context, network, addr string) (net.Conn, error) + if transport, ok := config.Transport.(*http.Transport); ok { + if transport.TLSClientConfig != nil { + d := tls.Dialer{ + Config: transport.TLSClientConfig, + } + dialFunc = d.DialContext + } + } + if dialFunc == nil { + var d net.Dialer + dialFunc = d.DialContext + } + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { in, _, err := c.Response().Hijack() if err != nil { @@ -138,8 +153,7 @@ func proxyRaw(t *ProxyTarget, c echo.Context) http.Handler { return } defer in.Close() - - out, err := net.Dial("tcp", t.URL.Host) + out, err := dialFunc(c.Request().Context(), "tcp", t.URL.Host) if err != nil { c.Set("_error", echo.NewHTTPError(http.StatusBadGateway, fmt.Sprintf("proxy raw, dial error=%v, url=%s", err, t.URL))) return @@ -155,15 +169,21 @@ func proxyRaw(t *ProxyTarget, c echo.Context) http.Handler { errCh := make(chan error, 2) cp := func(dst io.Writer, src io.Reader) { - _, err = io.Copy(dst, src) - errCh <- err + _, copyErr := io.Copy(dst, src) + errCh <- copyErr } go cp(out, in) go cp(in, out) - err = <-errCh - if err != nil && err != io.EOF { - c.Set("_error", fmt.Errorf("proxy raw, copy body error=%w, url=%s", err, t.URL)) + + // Wait for BOTH goroutines to complete + err1 := <-errCh + err2 := <-errCh + + if err1 != nil && err1 != io.EOF { + c.Set("_error", fmt.Errorf("proxy raw, copy body error=%w, url=%s", err1, t.URL)) + } else if err2 != nil && err2 != io.EOF { + c.Set("_error", fmt.Errorf("proxy raw, copy body error=%w, url=%s", err2, t.URL)) } }) } @@ -365,7 +385,7 @@ func ProxyWithConfig(config ProxyConfig) echo.MiddlewareFunc { // Proxy switch { case c.IsWebSocket(): - proxyRaw(tgt, c).ServeHTTP(res, req) + proxyRaw(tgt, c, config).ServeHTTP(res, req) default: // even SSE requests proxyHTTP(tgt, c, config).ServeHTTP(res, req) } diff --git a/vendor/github.com/labstack/echo/v4/middleware/rate_limiter.go b/vendor/github.com/labstack/echo/v4/middleware/rate_limiter.go index d4724fd2..2746a3de 100644 --- a/vendor/github.com/labstack/echo/v4/middleware/rate_limiter.go +++ b/vendor/github.com/labstack/echo/v4/middleware/rate_limiter.go @@ -4,6 +4,7 @@ package middleware import ( + "math" "net/http" "sync" "time" @@ -191,7 +192,7 @@ NewRateLimiterMemoryStoreWithConfig returns an instance of RateLimiterMemoryStor with the provided configuration. Rate must be provided. Burst will be set to the rounded down value of the configured rate if not provided or set to 0. -The build-in memory store is usually capable for modest loads. For higher loads other +The built-in memory store is usually capable for modest loads. For higher loads other store implementations should be considered. Characteristics: @@ -215,7 +216,7 @@ func NewRateLimiterMemoryStoreWithConfig(config RateLimiterMemoryStoreConfig) (s store.expiresIn = DefaultRateLimiterMemoryStoreConfig.ExpiresIn } if config.Burst == 0 { - store.burst = int(config.Rate) + store.burst = int(math.Max(1, math.Ceil(float64(config.Rate)))) } store.visitors = make(map[string]*Visitor) store.timeNow = time.Now @@ -249,8 +250,9 @@ func (store *RateLimiterMemoryStore) Allow(identifier string) (bool, error) { if now.Sub(store.lastCleanup) > store.expiresIn { store.cleanupStaleVisitors() } + allowed := limiter.AllowN(now, 1) store.mutex.Unlock() - return limiter.AllowN(store.timeNow(), 1), nil + return allowed, nil } /* diff --git a/vendor/github.com/labstack/echo/v4/middleware/request_logger.go b/vendor/github.com/labstack/echo/v4/middleware/request_logger.go index 7c18200b..211abf46 100644 --- a/vendor/github.com/labstack/echo/v4/middleware/request_logger.go +++ b/vendor/github.com/labstack/echo/v4/middleware/request_logger.go @@ -4,7 +4,9 @@ package middleware import ( + "context" "errors" + "log/slog" "net/http" "time" @@ -247,6 +249,72 @@ func RequestLoggerWithConfig(config RequestLoggerConfig) echo.MiddlewareFunc { return mw } +// RequestLogger returns a RequestLogger middleware with default configuration which +// uses default slog.slog logger. +// +// To customize slog output format replace slog default logger: +// For JSON format: `slog.SetDefault(slog.New(slog.NewJSONHandler(os.Stdout, nil)))` +func RequestLogger() echo.MiddlewareFunc { + config := RequestLoggerConfig{ + LogLatency: true, + LogProtocol: false, + LogRemoteIP: true, + LogHost: true, + LogMethod: true, + LogURI: true, + LogURIPath: false, + LogRoutePath: false, + LogRequestID: true, + LogReferer: false, + LogUserAgent: true, + LogStatus: true, + LogError: true, + LogContentLength: true, + LogResponseSize: true, + LogHeaders: nil, + LogQueryParams: nil, + LogFormValues: nil, + HandleError: true, // forwards error to the global error handler, so it can decide appropriate status code + LogValuesFunc: func(c echo.Context, v RequestLoggerValues) error { + if v.Error == nil { + slog.LogAttrs(context.Background(), slog.LevelInfo, "REQUEST", + slog.String("method", v.Method), + slog.String("uri", v.URI), + slog.Int("status", v.Status), + slog.Duration("latency", v.Latency), + slog.String("host", v.Host), + slog.String("bytes_in", v.ContentLength), + slog.Int64("bytes_out", v.ResponseSize), + slog.String("user_agent", v.UserAgent), + slog.String("remote_ip", v.RemoteIP), + slog.String("request_id", v.RequestID), + ) + } else { + slog.LogAttrs(context.Background(), slog.LevelError, "REQUEST_ERROR", + slog.String("method", v.Method), + slog.String("uri", v.URI), + slog.Int("status", v.Status), + slog.Duration("latency", v.Latency), + slog.String("host", v.Host), + slog.String("bytes_in", v.ContentLength), + slog.Int64("bytes_out", v.ResponseSize), + slog.String("user_agent", v.UserAgent), + slog.String("remote_ip", v.RemoteIP), + slog.String("request_id", v.RequestID), + + slog.String("error", v.Error.Error()), + ) + } + return nil + }, + } + mw, err := config.ToMiddleware() + if err != nil { + panic(err) + } + return mw +} + // ToMiddleware converts RequestLoggerConfig into middleware or returns an error for invalid configuration. func (config RequestLoggerConfig) ToMiddleware() (echo.MiddlewareFunc, error) { if config.Skipper == nil { diff --git a/vendor/github.com/labstack/echo/v4/middleware/responsecontroller_1.19.go b/vendor/github.com/labstack/echo/v4/middleware/responsecontroller_1.19.go deleted file mode 100644 index ddf6b64c..00000000 --- a/vendor/github.com/labstack/echo/v4/middleware/responsecontroller_1.19.go +++ /dev/null @@ -1,44 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: © 2015 LabStack LLC and Echo contributors - -//go:build !go1.20 - -package middleware - -import ( - "bufio" - "fmt" - "net" - "net/http" -) - -// TODO: remove when Go 1.23 is released and we do not support 1.19 anymore -func responseControllerFlush(rw http.ResponseWriter) error { - for { - switch t := rw.(type) { - case interface{ FlushError() error }: - return t.FlushError() - case http.Flusher: - t.Flush() - return nil - case interface{ Unwrap() http.ResponseWriter }: - rw = t.Unwrap() - default: - return fmt.Errorf("%w", http.ErrNotSupported) - } - } -} - -// TODO: remove when Go 1.23 is released and we do not support 1.19 anymore -func responseControllerHijack(rw http.ResponseWriter) (net.Conn, *bufio.ReadWriter, error) { - for { - switch t := rw.(type) { - case http.Hijacker: - return t.Hijack() - case interface{ Unwrap() http.ResponseWriter }: - rw = t.Unwrap() - default: - return nil, nil, fmt.Errorf("%w", http.ErrNotSupported) - } - } -} diff --git a/vendor/github.com/labstack/echo/v4/middleware/responsecontroller_1.20.go b/vendor/github.com/labstack/echo/v4/middleware/responsecontroller_1.20.go deleted file mode 100644 index bc03059b..00000000 --- a/vendor/github.com/labstack/echo/v4/middleware/responsecontroller_1.20.go +++ /dev/null @@ -1,20 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: © 2015 LabStack LLC and Echo contributors - -//go:build go1.20 - -package middleware - -import ( - "bufio" - "net" - "net/http" -) - -func responseControllerFlush(rw http.ResponseWriter) error { - return http.NewResponseController(rw).Flush() -} - -func responseControllerHijack(rw http.ResponseWriter) (net.Conn, *bufio.ReadWriter, error) { - return http.NewResponseController(rw).Hijack() -} diff --git a/vendor/github.com/labstack/echo/v4/middleware/static.go b/vendor/github.com/labstack/echo/v4/middleware/static.go index 1016f1b0..2d946c17 100644 --- a/vendor/github.com/labstack/echo/v4/middleware/static.go +++ b/vendor/github.com/labstack/echo/v4/middleware/static.go @@ -174,6 +174,12 @@ func StaticWithConfig(config StaticConfig) echo.MiddlewareFunc { if err != nil { return } + // Security: We use path.Clean() (not filepath.Clean()) because: + // 1. HTTP URLs always use forward slashes, regardless of server OS + // 2. path.Clean() provides platform-independent behavior for URL paths + // 3. The "/" prefix forces absolute path interpretation, removing ".." components + // 4. Backslashes are treated as literal characters (not path separators), preventing traversal + // See static_windows.go for Go 1.20+ filepath.Clean compatibility notes name := path.Join(config.Root, path.Clean("/"+p)) // "/"+ for security if config.IgnoreBase { diff --git a/vendor/github.com/labstack/echo/v4/middleware/timeout.go b/vendor/github.com/labstack/echo/v4/middleware/timeout.go index c2aebef3..c0a77a4b 100644 --- a/vendor/github.com/labstack/echo/v4/middleware/timeout.go +++ b/vendor/github.com/labstack/echo/v4/middleware/timeout.go @@ -59,6 +59,12 @@ import ( // // TimeoutConfig defines the config for Timeout middleware. +// +// Deprecated: Use ContextTimeoutConfig with ContextTimeout or ContextTimeoutWithConfig instead. +// The Timeout middleware has architectural issues that cause data races due to response writer +// manipulation across goroutines. It must be the first middleware in the chain, making it fragile. +// The ContextTimeout middleware provides timeout functionality using Go's context mechanism, +// which is race-free and can be placed anywhere in the middleware chain. type TimeoutConfig struct { // Skipper defines a function to skip middleware. Skipper Skipper @@ -89,11 +95,38 @@ var DefaultTimeoutConfig = TimeoutConfig{ // Timeout returns a middleware which returns error (503 Service Unavailable error) to client immediately when handler // call runs for longer than its time limit. NB: timeout does not stop handler execution. +// +// Deprecated: Use ContextTimeout instead. This middleware has known data race issues due to response writer +// manipulation. See https://github.com/labstack/echo/blob/master/middleware/context_timeout.go for the +// recommended alternative. +// +// Example migration: +// +// // Before: +// e.Use(middleware.Timeout()) +// +// // After: +// e.Use(middleware.ContextTimeout(30 * time.Second)) func Timeout() echo.MiddlewareFunc { return TimeoutWithConfig(DefaultTimeoutConfig) } // TimeoutWithConfig returns a Timeout middleware with config or panics on invalid configuration. +// +// Deprecated: Use ContextTimeoutWithConfig instead. This middleware has architectural data race issues. +// See the ContextTimeout middleware for a race-free alternative that uses Go's context mechanism. +// +// Example migration: +// +// // Before: +// e.Use(middleware.TimeoutWithConfig(middleware.TimeoutConfig{ +// Timeout: 30 * time.Second, +// })) +// +// // After: +// e.Use(middleware.ContextTimeoutWithConfig(middleware.ContextTimeoutConfig{ +// Timeout: 30 * time.Second, +// })) func TimeoutWithConfig(config TimeoutConfig) echo.MiddlewareFunc { mw, err := config.ToMiddleware() if err != nil { @@ -103,6 +136,8 @@ func TimeoutWithConfig(config TimeoutConfig) echo.MiddlewareFunc { } // ToMiddleware converts Config to middleware or returns an error for invalid configuration +// +// Deprecated: Use ContextTimeoutConfig.ToMiddleware instead. func (config TimeoutConfig) ToMiddleware() (echo.MiddlewareFunc, error) { if config.Skipper == nil { config.Skipper = DefaultTimeoutConfig.Skipper diff --git a/vendor/github.com/labstack/echo/v4/middleware/util.go b/vendor/github.com/labstack/echo/v4/middleware/util.go index 09428eb0..5813990a 100644 --- a/vendor/github.com/labstack/echo/v4/middleware/util.go +++ b/vendor/github.com/labstack/echo/v4/middleware/util.go @@ -6,7 +6,9 @@ package middleware import ( "bufio" "crypto/rand" + "fmt" "io" + "net/url" "strings" "sync" ) @@ -101,3 +103,26 @@ func randomString(length uint8) string { } } } + +func validateOrigins(origins []string, what string) error { + for _, o := range origins { + if err := validateOrigin(o, what); err != nil { + return err + } + } + return nil +} + +func validateOrigin(origin string, what string) error { + u, err := url.Parse(origin) + if err != nil { + return fmt.Errorf("can not parse %s: %w", what, err) + } + if u.Scheme == "" || u.Host == "" { + return fmt.Errorf("%s is missing scheme or host: %s", what, origin) + } + if u.Path != "" || u.RawQuery != "" || u.Fragment != "" { + return fmt.Errorf("%s can not have path, query, and fragments: %s", what, origin) + } + return nil +} diff --git a/vendor/github.com/labstack/echo/v4/renderer.go b/vendor/github.com/labstack/echo/v4/renderer.go new file mode 100644 index 00000000..44e038f3 --- /dev/null +++ b/vendor/github.com/labstack/echo/v4/renderer.go @@ -0,0 +1,29 @@ +package echo + +import "io" + +// Renderer is the interface that wraps the Render function. +type Renderer interface { + Render(io.Writer, string, interface{}, Context) error +} + +// TemplateRenderer is helper to ease creating renderers for `html/template` and `text/template` packages. +// Example usage: +// +// e.Renderer = &echo.TemplateRenderer{ +// Template: template.Must(template.ParseGlob("templates/*.html")), +// } +// +// e.Renderer = &echo.TemplateRenderer{ +// Template: template.Must(template.New("hello").Parse("Hello, {{.}}!")), +// } +type TemplateRenderer struct { + Template interface { + ExecuteTemplate(wr io.Writer, name string, data any) error + } +} + +// Render renders the template with given data. +func (t *TemplateRenderer) Render(w io.Writer, name string, data interface{}, c Context) error { + return t.Template.ExecuteTemplate(w, name, data) +} diff --git a/vendor/github.com/labstack/echo/v4/response.go b/vendor/github.com/labstack/echo/v4/response.go index a795ce36..0c61c973 100644 --- a/vendor/github.com/labstack/echo/v4/response.go +++ b/vendor/github.com/labstack/echo/v4/response.go @@ -6,6 +6,7 @@ package echo import ( "bufio" "errors" + "fmt" "net" "net/http" ) @@ -14,10 +15,10 @@ import ( // by an HTTP handler to construct an HTTP response. // See: https://golang.org/pkg/net/http/#ResponseWriter type Response struct { + Writer http.ResponseWriter echo *Echo beforeFuncs []func() afterFuncs []func() - Writer http.ResponseWriter Status int Size int64 Committed bool @@ -86,9 +87,9 @@ func (r *Response) Write(b []byte) (n int, err error) { // buffered data to the client. // See [http.Flusher](https://golang.org/pkg/net/http/#Flusher) func (r *Response) Flush() { - err := responseControllerFlush(r.Writer) + err := http.NewResponseController(r.Writer).Flush() if err != nil && errors.Is(err, http.ErrNotSupported) { - panic(errors.New("response writer flushing is not supported")) + panic(fmt.Errorf("echo: response writer %T does not support flushing (http.Flusher interface)", r.Writer)) } } @@ -96,7 +97,7 @@ func (r *Response) Flush() { // take over the connection. // See [http.Hijacker](https://golang.org/pkg/net/http/#Hijacker) func (r *Response) Hijack() (net.Conn, *bufio.ReadWriter, error) { - return responseControllerHijack(r.Writer) + return http.NewResponseController(r.Writer).Hijack() } // Unwrap returns the original http.ResponseWriter. diff --git a/vendor/github.com/labstack/echo/v4/responsecontroller_1.19.go b/vendor/github.com/labstack/echo/v4/responsecontroller_1.19.go deleted file mode 100644 index 782dab3a..00000000 --- a/vendor/github.com/labstack/echo/v4/responsecontroller_1.19.go +++ /dev/null @@ -1,44 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: © 2015 LabStack LLC and Echo contributors - -//go:build !go1.20 - -package echo - -import ( - "bufio" - "fmt" - "net" - "net/http" -) - -// TODO: remove when Go 1.23 is released and we do not support 1.19 anymore -func responseControllerFlush(rw http.ResponseWriter) error { - for { - switch t := rw.(type) { - case interface{ FlushError() error }: - return t.FlushError() - case http.Flusher: - t.Flush() - return nil - case interface{ Unwrap() http.ResponseWriter }: - rw = t.Unwrap() - default: - return fmt.Errorf("%w", http.ErrNotSupported) - } - } -} - -// TODO: remove when Go 1.23 is released and we do not support 1.19 anymore -func responseControllerHijack(rw http.ResponseWriter) (net.Conn, *bufio.ReadWriter, error) { - for { - switch t := rw.(type) { - case http.Hijacker: - return t.Hijack() - case interface{ Unwrap() http.ResponseWriter }: - rw = t.Unwrap() - default: - return nil, nil, fmt.Errorf("%w", http.ErrNotSupported) - } - } -} diff --git a/vendor/github.com/labstack/echo/v4/responsecontroller_1.20.go b/vendor/github.com/labstack/echo/v4/responsecontroller_1.20.go deleted file mode 100644 index 6d77c07f..00000000 --- a/vendor/github.com/labstack/echo/v4/responsecontroller_1.20.go +++ /dev/null @@ -1,20 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: © 2015 LabStack LLC and Echo contributors - -//go:build go1.20 - -package echo - -import ( - "bufio" - "net" - "net/http" -) - -func responseControllerFlush(rw http.ResponseWriter) error { - return http.NewResponseController(rw).Flush() -} - -func responseControllerHijack(rw http.ResponseWriter) (net.Conn, *bufio.ReadWriter, error) { - return http.NewResponseController(rw).Hijack() -} diff --git a/vendor/github.com/labstack/echo/v4/router.go b/vendor/github.com/labstack/echo/v4/router.go index 03267315..912cfeac 100644 --- a/vendor/github.com/labstack/echo/v4/router.go +++ b/vendor/github.com/labstack/echo/v4/router.go @@ -18,32 +18,31 @@ type Router struct { } type node struct { - kind kind - label byte - prefix string - parent *node - staticChildren children - originalPath string - methods *routeMethods - paramChild *node - anyChild *node - paramsCount int + methods *routeMethods + parent *node + paramChild *node + anyChild *node + // notFoundHandler is handler registered with RouteNotFound method and is executed for 404 cases + notFoundHandler *routeMethod + prefix string + originalPath string + staticChildren children + paramsCount int + label byte + kind kind // isLeaf indicates that node does not have child routes isLeaf bool // isHandler indicates that node has at least one handler registered to it isHandler bool - - // notFoundHandler is handler registered with RouteNotFound method and is executed for 404 cases - notFoundHandler *routeMethod } type kind uint8 type children []*node type routeMethod struct { + handler HandlerFunc ppath string pnames []string - handler HandlerFunc } type routeMethods struct { @@ -242,18 +241,18 @@ func (r *Router) insert(method, path string, h HandlerFunc) { if i == lcpIndex { // path node is last fragment of route path. ie. `/users/:id` - r.insertNode(method, path[:i], paramKind, routeMethod{ppath, pnames, h}) + r.insertNode(method, path[:i], paramKind, routeMethod{ppath: ppath, pnames: pnames, handler: h}) } else { r.insertNode(method, path[:i], paramKind, routeMethod{}) } } else if path[i] == '*' { r.insertNode(method, path[:i], staticKind, routeMethod{}) pnames = append(pnames, "*") - r.insertNode(method, path[:i+1], anyKind, routeMethod{ppath, pnames, h}) + r.insertNode(method, path[:i+1], anyKind, routeMethod{ppath: ppath, pnames: pnames, handler: h}) } } - r.insertNode(method, path, staticKind, routeMethod{ppath, pnames, h}) + r.insertNode(method, path, staticKind, routeMethod{ppath: ppath, pnames: pnames, handler: h}) } func (r *Router) insertNode(method, path string, t kind, rm routeMethod) { @@ -693,7 +692,7 @@ func (r *Router) Find(method, path string, c Context) { // update indexes/search in case we need to backtrack when no handler match is found paramIndex++ - searchIndex += +len(search) + searchIndex += len(search) search = "" if h := currentNode.findMethod(method); h != nil { diff --git a/vendor/github.com/mattn/go-colorable/colorable_appengine.go b/vendor/github.com/mattn/go-colorable/colorable_appengine.go deleted file mode 100644 index 416d1bbb..00000000 --- a/vendor/github.com/mattn/go-colorable/colorable_appengine.go +++ /dev/null @@ -1,38 +0,0 @@ -//go:build appengine -// +build appengine - -package colorable - -import ( - "io" - "os" - - _ "github.com/mattn/go-isatty" -) - -// NewColorable returns new instance of Writer which handles escape sequence. -func NewColorable(file *os.File) io.Writer { - if file == nil { - panic("nil passed instead of *os.File to NewColorable()") - } - - return file -} - -// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout. -func NewColorableStdout() io.Writer { - return os.Stdout -} - -// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr. -func NewColorableStderr() io.Writer { - return os.Stderr -} - -// EnableColorsStdout enable colors if possible. -func EnableColorsStdout(enabled *bool) func() { - if enabled != nil { - *enabled = true - } - return func() {} -} diff --git a/vendor/github.com/mattn/go-colorable/colorable_others.go b/vendor/github.com/mattn/go-colorable/colorable_others.go index 766d9460..c1a78aa9 100644 --- a/vendor/github.com/mattn/go-colorable/colorable_others.go +++ b/vendor/github.com/mattn/go-colorable/colorable_others.go @@ -1,5 +1,5 @@ -//go:build !windows && !appengine -// +build !windows,!appengine +//go:build !windows || appengine +// +build !windows appengine package colorable diff --git a/vendor/github.com/mattn/go-colorable/colorable_windows.go b/vendor/github.com/mattn/go-colorable/colorable_windows.go index 1846ad5a..2df7b859 100644 --- a/vendor/github.com/mattn/go-colorable/colorable_windows.go +++ b/vendor/github.com/mattn/go-colorable/colorable_windows.go @@ -11,7 +11,7 @@ import ( "strconv" "strings" "sync" - "syscall" + syscall "golang.org/x/sys/windows" "unsafe" "github.com/mattn/go-isatty" @@ -73,7 +73,7 @@ type consoleCursorInfo struct { } var ( - kernel32 = syscall.NewLazyDLL("kernel32.dll") + kernel32 = syscall.NewLazySystemDLL("kernel32.dll") procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute") procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition") @@ -87,8 +87,8 @@ var ( procCreateConsoleScreenBuffer = kernel32.NewProc("CreateConsoleScreenBuffer") ) -// Writer provides colorable Writer to the console -type Writer struct { +// writer provides colorable Writer to the console +type writer struct { out io.Writer handle syscall.Handle althandle syscall.Handle @@ -98,7 +98,7 @@ type Writer struct { mutex sync.Mutex } -// NewColorable returns new instance of Writer which handles escape sequence from File. +// NewColorable returns new instance of writer which handles escape sequence from File. func NewColorable(file *os.File) io.Writer { if file == nil { panic("nil passed instead of *os.File to NewColorable()") @@ -112,17 +112,17 @@ func NewColorable(file *os.File) io.Writer { var csbi consoleScreenBufferInfo handle := syscall.Handle(file.Fd()) procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) - return &Writer{out: file, handle: handle, oldattr: csbi.attributes, oldpos: coord{0, 0}} + return &writer{out: file, handle: handle, oldattr: csbi.attributes, oldpos: coord{0, 0}} } return file } -// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout. +// NewColorableStdout returns new instance of writer which handles escape sequence for stdout. func NewColorableStdout() io.Writer { return NewColorable(os.Stdout) } -// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr. +// NewColorableStderr returns new instance of writer which handles escape sequence for stderr. func NewColorableStderr() io.Writer { return NewColorable(os.Stderr) } @@ -434,7 +434,7 @@ func atoiWithDefault(s string, def int) (int, error) { } // Write writes data on console -func (w *Writer) Write(data []byte) (n int, err error) { +func (w *writer) Write(data []byte) (n int, err error) { w.mutex.Lock() defer w.mutex.Unlock() var csbi consoleScreenBufferInfo @@ -560,7 +560,7 @@ loop: } procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) case 'E': - n, err = strconv.Atoi(buf.String()) + n, err = atoiWithDefault(buf.String(), 1) if err != nil { continue } @@ -569,7 +569,7 @@ loop: csbi.cursorPosition.y += short(n) procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) case 'F': - n, err = strconv.Atoi(buf.String()) + n, err = atoiWithDefault(buf.String(), 1) if err != nil { continue } diff --git a/vendor/github.com/moby/sys/user/LICENSE b/vendor/github.com/moby/sys/user/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/moby/sys/user/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go b/vendor/github.com/moby/sys/user/lookup_unix.go similarity index 100% rename from vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go rename to vendor/github.com/moby/sys/user/lookup_unix.go diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/user.go b/vendor/github.com/moby/sys/user/user.go similarity index 100% rename from vendor/github.com/opencontainers/runc/libcontainer/user/user.go rename to vendor/github.com/moby/sys/user/user.go diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/user_fuzzer.go b/vendor/github.com/moby/sys/user/user_fuzzer.go similarity index 100% rename from vendor/github.com/opencontainers/runc/libcontainer/user/user_fuzzer.go rename to vendor/github.com/moby/sys/user/user_fuzzer.go diff --git a/vendor/github.com/opencontainers/runc/NOTICE b/vendor/github.com/opencontainers/runc/NOTICE index 5c97abce..c29775c0 100644 --- a/vendor/github.com/opencontainers/runc/NOTICE +++ b/vendor/github.com/opencontainers/runc/NOTICE @@ -8,9 +8,9 @@ The following is courtesy of our legal counsel: Use and transfer of Docker may be subject to certain restrictions by the -United States and other governments. +United States and other governments. It is your responsibility to ensure that your use and/or transfer does not -violate applicable laws. +violate applicable laws. For more information, please see http://www.bis.doc.gov diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_deprecated.go b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_deprecated.go new file mode 100644 index 00000000..c6cd4434 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_deprecated.go @@ -0,0 +1,81 @@ +package user + +import ( + "io" + + "github.com/moby/sys/user" +) + +// LookupUser looks up a user by their username in /etc/passwd. If the user +// cannot be found (or there is no /etc/passwd file on the filesystem), then +// LookupUser returns an error. +func LookupUser(username string) (user.User, error) { + return user.LookupUser(username) +} + +// LookupUid looks up a user by their user id in /etc/passwd. If the user cannot +// be found (or there is no /etc/passwd file on the filesystem), then LookupId +// returns an error. +func LookupUid(uid int) (user.User, error) { //nolint:revive // ignore var-naming: func LookupUid should be LookupUID + return user.LookupUid(uid) +} + +// LookupGroup looks up a group by its name in /etc/group. If the group cannot +// be found (or there is no /etc/group file on the filesystem), then LookupGroup +// returns an error. +func LookupGroup(groupname string) (user.Group, error) { + return user.LookupGroup(groupname) +} + +// LookupGid looks up a group by its group id in /etc/group. If the group cannot +// be found (or there is no /etc/group file on the filesystem), then LookupGid +// returns an error. +func LookupGid(gid int) (user.Group, error) { + return user.LookupGid(gid) +} + +func GetPasswdPath() (string, error) { + return user.GetPasswdPath() +} + +func GetPasswd() (io.ReadCloser, error) { + return user.GetPasswd() +} + +func GetGroupPath() (string, error) { + return user.GetGroupPath() +} + +func GetGroup() (io.ReadCloser, error) { + return user.GetGroup() +} + +// CurrentUser looks up the current user by their user id in /etc/passwd. If the +// user cannot be found (or there is no /etc/passwd file on the filesystem), +// then CurrentUser returns an error. +func CurrentUser() (user.User, error) { + return user.CurrentUser() +} + +// CurrentGroup looks up the current user's group by their primary group id's +// entry in /etc/passwd. If the group cannot be found (or there is no +// /etc/group file on the filesystem), then CurrentGroup returns an error. +func CurrentGroup() (user.Group, error) { + return user.CurrentGroup() +} + +func CurrentUserSubUIDs() ([]user.SubID, error) { + return user.CurrentUserSubUIDs() +} + +func CurrentUserSubGIDs() ([]user.SubID, error) { + return user.CurrentUserSubGIDs() +} + +func CurrentProcessUIDMap() ([]user.IDMap, error) { + return user.CurrentProcessUIDMap() +} + +func CurrentProcessGIDMap() ([]user.IDMap, error) { + return user.CurrentProcessGIDMap() +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/user_deprecated.go b/vendor/github.com/opencontainers/runc/libcontainer/user/user_deprecated.go new file mode 100644 index 00000000..3c29f3d1 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/user/user_deprecated.go @@ -0,0 +1,146 @@ +// Package user is an alias for [github.com/moby/sys/user]. +// +// Deprecated: use [github.com/moby/sys/user]. +package user + +import ( + "io" + + "github.com/moby/sys/user" +) + +var ( + // ErrNoPasswdEntries is returned if no matching entries were found in /etc/group. + ErrNoPasswdEntries = user.ErrNoPasswdEntries + // ErrNoGroupEntries is returned if no matching entries were found in /etc/passwd. + ErrNoGroupEntries = user.ErrNoGroupEntries + // ErrRange is returned if a UID or GID is outside of the valid range. + ErrRange = user.ErrRange +) + +type ( + User = user.User + + Group = user.Group + + // SubID represents an entry in /etc/sub{u,g}id. + SubID = user.SubID + + // IDMap represents an entry in /proc/PID/{u,g}id_map. + IDMap = user.IDMap + + ExecUser = user.ExecUser +) + +func ParsePasswdFile(path string) ([]user.User, error) { + return user.ParsePasswdFile(path) +} + +func ParsePasswd(passwd io.Reader) ([]user.User, error) { + return user.ParsePasswd(passwd) +} + +func ParsePasswdFileFilter(path string, filter func(user.User) bool) ([]user.User, error) { + return user.ParsePasswdFileFilter(path, filter) +} + +func ParsePasswdFilter(r io.Reader, filter func(user.User) bool) ([]user.User, error) { + return user.ParsePasswdFilter(r, filter) +} + +func ParseGroupFile(path string) ([]user.Group, error) { + return user.ParseGroupFile(path) +} + +func ParseGroup(group io.Reader) ([]user.Group, error) { + return user.ParseGroup(group) +} + +func ParseGroupFileFilter(path string, filter func(user.Group) bool) ([]user.Group, error) { + return user.ParseGroupFileFilter(path, filter) +} + +func ParseGroupFilter(r io.Reader, filter func(user.Group) bool) ([]user.Group, error) { + return user.ParseGroupFilter(r, filter) +} + +// GetExecUserPath is a wrapper for GetExecUser. It reads data from each of the +// given file paths and uses that data as the arguments to GetExecUser. If the +// files cannot be opened for any reason, the error is ignored and a nil +// io.Reader is passed instead. +func GetExecUserPath(userSpec string, defaults *user.ExecUser, passwdPath, groupPath string) (*user.ExecUser, error) { + return user.GetExecUserPath(userSpec, defaults, passwdPath, groupPath) +} + +// GetExecUser parses a user specification string (using the passwd and group +// readers as sources for /etc/passwd and /etc/group data, respectively). In +// the case of blank fields or missing data from the sources, the values in +// defaults is used. +// +// GetExecUser will return an error if a user or group literal could not be +// found in any entry in passwd and group respectively. +// +// Examples of valid user specifications are: +// - "" +// - "user" +// - "uid" +// - "user:group" +// - "uid:gid +// - "user:gid" +// - "uid:group" +// +// It should be noted that if you specify a numeric user or group id, they will +// not be evaluated as usernames (only the metadata will be filled). So attempting +// to parse a user with user.Name = "1337" will produce the user with a UID of +// 1337. +func GetExecUser(userSpec string, defaults *user.ExecUser, passwd, group io.Reader) (*user.ExecUser, error) { + return user.GetExecUser(userSpec, defaults, passwd, group) +} + +// GetAdditionalGroups looks up a list of groups by name or group id +// against the given /etc/group formatted data. If a group name cannot +// be found, an error will be returned. If a group id cannot be found, +// or the given group data is nil, the id will be returned as-is +// provided it is in the legal range. +func GetAdditionalGroups(additionalGroups []string, group io.Reader) ([]int, error) { + return user.GetAdditionalGroups(additionalGroups, group) +} + +// GetAdditionalGroupsPath is a wrapper around GetAdditionalGroups +// that opens the groupPath given and gives it as an argument to +// GetAdditionalGroups. +func GetAdditionalGroupsPath(additionalGroups []string, groupPath string) ([]int, error) { + return user.GetAdditionalGroupsPath(additionalGroups, groupPath) +} + +func ParseSubIDFile(path string) ([]user.SubID, error) { + return user.ParseSubIDFile(path) +} + +func ParseSubID(subid io.Reader) ([]user.SubID, error) { + return user.ParseSubID(subid) +} + +func ParseSubIDFileFilter(path string, filter func(user.SubID) bool) ([]user.SubID, error) { + return user.ParseSubIDFileFilter(path, filter) +} + +func ParseSubIDFilter(r io.Reader, filter func(user.SubID) bool) ([]user.SubID, error) { + return user.ParseSubIDFilter(r, filter) +} + +func ParseIDMapFile(path string) ([]user.IDMap, error) { + return user.ParseIDMapFile(path) +} + +func ParseIDMap(r io.Reader) ([]user.IDMap, error) { + return user.ParseIDMap(r) +} + +func ParseIDMapFileFilter(path string, filter func(user.IDMap) bool) ([]user.IDMap, error) { + return user.ParseIDMapFileFilter(path, filter) +} + +func ParseIDMapFilter(r io.Reader, filter func(user.IDMap) bool) ([]user.IDMap, error) { + return user.ParseIDMapFilter(r, filter) +} diff --git a/vendor/github.com/ory/dockertest/v3/CONTRIBUTING.md b/vendor/github.com/ory/dockertest/v3/CONTRIBUTING.md index b2e9dc23..0e0405fc 100644 --- a/vendor/github.com/ory/dockertest/v3/CONTRIBUTING.md +++ b/vendor/github.com/ory/dockertest/v3/CONTRIBUTING.md @@ -10,7 +10,7 @@ - [FAQ](#faq) - [How can I contribute?](#how-can-i-contribute) - [Communication](#communication) -- [Contribute examples](#contribute-examples) +- [Contribute examples or community projects](#contribute-examples-or-community-projects) - [Contribute code](#contribute-code) - [Contribute documentation](#contribute-documentation) - [Disclosing vulnerabilities](#disclosing-vulnerabilities) @@ -67,7 +67,7 @@ or the [Ory Chat](https://www.ory.sh/chat). [a Contributors License Agreement?](https://cla-assistant.io/ory/dockertest) - I would like updates about new versions of Ory Dockertest. - [How are new releases announced?](https://ory.us10.list-manage.com/subscribe?u=ffb1a878e4ec6c0ed312a3480&id=f605a41b53) + [How are new releases announced?](https://www.ory.sh/l/sign-up-newsletter) ## How can I contribute? @@ -125,32 +125,16 @@ the projects that you are interested in. Also, [follow us on Twitter](https://twitter.com/orycorp). -## Contribute examples +## Contribute examples or community projects -One of the most impactful ways to contribute is by adding examples. You can find -an overview of examples using Ory services on the -[documentation examples page](https://www.ory.sh/docs/examples). Source code for -examples can be found in most cases in the -[ory/examples](https://github.com/ory/examples) repository. +One of the most impactful ways to contribute is by adding code examples or other +Ory-related code. You can find an overview of community code in the +[awesome-ory](https://github.com/ory/awesome-ory) repository. _If you would like to contribute a new example, we would love to hear from you!_ -Please [open an issue](https://github.com/ory/examples/issues/new/choose) to -describe your example before you start working on it. We would love to provide -guidance to make for a pleasant contribution experience. Go through this -checklist to contribute an example: - -1. Create a GitHub issue proposing a new example and make sure it's different - from an existing one. -1. Fork the repo and create a feature branch off of `master` so that changes do - not get mixed up. -1. Add a descriptive prefix to commits. This ensures a uniform commit history - and helps structure the changelog. Please refer to this - [Convential Commits configuration](https://github.com/ory/dockertest/blob/master/.github/workflows/conventional_commits.yml) - for the list of accepted prefixes. You can read more about the Conventional Commit specification [at their site](https://www.conventionalcommits.org/en/v1.0.0/). -1. Create a `README.md` that explains how to use the example. (Use - [the README template](https://github.com/ory/examples/blob/master/_common/README.md)). -1. Open a pull request and maintainers will review and merge your example. +Please [open a pull request at awesome-ory](https://github.com/ory/awesome-ory/) +to add your example or Ory-related project to the awesome-ory README. ## Contribute code @@ -175,7 +159,9 @@ request, go through this checklist: 1. Add a descriptive prefix to commits. This ensures a uniform commit history and helps structure the changelog. Please refer to this [Convential Commits configuration](https://github.com/ory/dockertest/blob/master/.github/workflows/conventional_commits.yml) - for the list of accepted prefixes. You can read more about the Conventional Commit specification [at their site](https://www.conventionalcommits.org/en/v1.0.0/). + for the list of accepted prefixes. You can read more about the Conventional + Commit specification + [at their site](https://www.conventionalcommits.org/en/v1.0.0/). If a pull request is not ready to be reviewed yet [it should be marked as a "Draft"](https://docs.github.com/en/github/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/changing-the-stage-of-a-pull-request). diff --git a/vendor/github.com/ory/dockertest/v3/SECURITY.md b/vendor/github.com/ory/dockertest/v3/SECURITY.md index 7a05c1cf..61045148 100644 --- a/vendor/github.com/ory/dockertest/v3/SECURITY.md +++ b/vendor/github.com/ory/dockertest/v3/SECURITY.md @@ -1,30 +1,56 @@ - - +# Ory Security Policy -- [Security Policy](#security-policy) - - [Supported Versions](#supported-versions) - - [Reporting a Vulnerability](#reporting-a-vulnerability) +This policy outlines Ory's security commitments and practices for users across +different licensing and deployment models. - +To learn more about Ory's security service level agreements (SLAs) and +processes, please [contact us](https://www.ory.sh/contact/). -# Security Policy +## Ory Network Users -## Supported Versions +- **Security SLA:** Ory addresses vulnerabilities in the Ory Network according + to the following guidelines: + - Critical: Typically addressed within 14 days. + - High: Typically addressed within 30 days. + - Medium: Typically addressed within 90 days. + - Low: Typically addressed within 180 days. + - Informational: Addressed as necessary. + These timelines are targets and may vary based on specific circumstances. +- **Release Schedule:** Updates are deployed to the Ory Network as + vulnerabilities are resolved. +- **Version Support:** The Ory Network always runs the latest version, ensuring + up-to-date security fixes. -We release patches for security vulnerabilities. Which versions are eligible for -receiving such patches depends on the CVSS v3.0 Rating: +## Ory Enterprise License Customers -| CVSS v3.0 | Supported Versions | -| --------- | ----------------------------------------- | -| 9.0-10.0 | Releases within the previous three months | -| 4.0-8.9 | Most recent release | +- **Security SLA:** Ory addresses vulnerabilities based on their severity: + - Critical: Typically addressed within 14 days. + - High: Typically addressed within 30 days. + - Medium: Typically addressed within 90 days. + - Low: Typically addressed within 180 days. + - Informational: Addressed as necessary. + These timelines are targets and may vary based on specific circumstances. +- **Release Schedule:** Updates are made available as vulnerabilities are + resolved. Ory works closely with enterprise customers to ensure timely updates + that align with their operational needs. +- **Version Support:** Ory may provide security support for multiple versions, + depending on the terms of the enterprise agreement. + +## Apache 2.0 License Users + +- **Security SLA:** Ory does not provide a formal SLA for security issues under + the Apache 2.0 License. +- **Release Schedule:** Releases prioritize new functionality and include fixes + for known security vulnerabilities at the time of release. While major + releases typically occur one to two times per year, Ory does not guarantee a + fixed release schedule. +- **Version Support:** Security patches are only provided for the latest release + version. ## Reporting a Vulnerability -Please report (suspected) security vulnerabilities to -**[security@ory.sh](mailto:security@ory.sh)**. You will receive a response from -us within 48 hours. If the issue is confirmed, we will release a patch as soon -as possible depending on complexity but historically within a few days. +For details on how to report security vulnerabilities, visit our +[security policy documentation](https://www.ory.sh/docs/ecosystem/security). diff --git a/vendor/github.com/ory/dockertest/v3/docker/auth.go b/vendor/github.com/ory/dockertest/v3/docker/auth.go index 96a171b6..149d1a23 100644 --- a/vendor/github.com/ory/dockertest/v3/docker/auth.go +++ b/vendor/github.com/ory/dockertest/v3/docker/auth.go @@ -12,9 +12,10 @@ import ( "encoding/base64" "encoding/json" "errors" - "fmt" "io" + "io/ioutil" "os" + "os/exec" "path" "strings" ) @@ -29,7 +30,22 @@ type AuthConfiguration struct { Password string `json:"password,omitempty"` Email string `json:"email,omitempty"` ServerAddress string `json:"serveraddress,omitempty"` + + // IdentityToken can be supplied with the identitytoken response of the AuthCheck call + // see https://pkg.go.dev/github.com/docker/docker/api/types?tab=doc#AuthConfig + // It can be used in place of password not in conjunction with it IdentityToken string `json:"identitytoken,omitempty"` + + // RegistryToken can be supplied with the registrytoken + RegistryToken string `json:"registrytoken,omitempty"` +} + +func (c AuthConfiguration) isEmpty() bool { + return c == AuthConfiguration{} +} + +func (c AuthConfiguration) headerKey() string { + return "X-Registry-Auth" } // AuthConfigurations represents authentication options to use for the @@ -38,16 +54,46 @@ type AuthConfigurations struct { Configs map[string]AuthConfiguration `json:"configs"` } +func (c AuthConfigurations) isEmpty() bool { + return len(c.Configs) == 0 +} + +func (AuthConfigurations) headerKey() string { + return "X-Registry-Config" +} + +// merge updates the configuration. If a key is defined in both maps, the one +// in c.Configs takes precedence. +func (c *AuthConfigurations) merge(other AuthConfigurations) { + for k, v := range other.Configs { + if c.Configs == nil { + c.Configs = make(map[string]AuthConfiguration) + } + if _, ok := c.Configs[k]; !ok { + c.Configs[k] = v + } + } +} + // AuthConfigurations119 is used to serialize a set of AuthConfigurations // for Docker API >= 1.19. type AuthConfigurations119 map[string]AuthConfiguration +func (c AuthConfigurations119) isEmpty() bool { + return len(c) == 0 +} + +func (c AuthConfigurations119) headerKey() string { + return "X-Registry-Config" +} + // dockerConfig represents a registry authentation configuration from the // .dockercfg file. type dockerConfig struct { Auth string `json:"auth"` Email string `json:"email"` IdentityToken string `json:"identitytoken"` + RegistryToken string `json:"registrytoken"` } // NewAuthConfigurationsFromFile returns AuthConfigurations from a path containing JSON @@ -63,32 +109,59 @@ func NewAuthConfigurationsFromFile(path string) (*AuthConfigurations, error) { func cfgPaths(dockerConfigEnv string, homeEnv string) []string { var paths []string if dockerConfigEnv != "" { + paths = append(paths, path.Join(dockerConfigEnv, "plaintext-passwords.json")) paths = append(paths, path.Join(dockerConfigEnv, "config.json")) } if homeEnv != "" { + paths = append(paths, path.Join(homeEnv, ".docker", "plaintext-passwords.json")) paths = append(paths, path.Join(homeEnv, ".docker", "config.json")) paths = append(paths, path.Join(homeEnv, ".dockercfg")) } return paths } -// NewAuthConfigurationsFromDockerCfg returns AuthConfigurations from -// system config files. The following files are checked in the order listed: -// - $DOCKER_CONFIG/config.json if DOCKER_CONFIG set in the environment, +// NewAuthConfigurationsFromDockerCfg returns AuthConfigurations from system +// config files. The following files are checked in the order listed: +// +// If the environment variable DOCKER_CONFIG is set to a non-empty string: +// +// - $DOCKER_CONFIG/plaintext-passwords.json +// - $DOCKER_CONFIG/config.json +// +// Otherwise, it looks for files in the $HOME directory and the legacy +// location: +// +// - $HOME/.docker/plaintext-passwords.json // - $HOME/.docker/config.json // - $HOME/.dockercfg func NewAuthConfigurationsFromDockerCfg() (*AuthConfigurations, error) { - err := fmt.Errorf("No docker configuration found") + var err error var auths *AuthConfigurations + var result *AuthConfigurations pathsToTry := cfgPaths(os.Getenv("DOCKER_CONFIG"), os.Getenv("HOME")) + if len(pathsToTry) < 1 { + return nil, errors.New("no docker configuration found") + } + for _, path := range pathsToTry { auths, err = NewAuthConfigurationsFromFile(path) - if err == nil { - return auths, nil + if err != nil { + continue + } + + if result == nil { + result = auths + } else { + result.merge(*auths) } } - return auths, err + + if result != nil { + return result, nil + + } + return nil, err } // NewAuthConfigurations returns AuthConfigurations from a JSON encoded string in the @@ -138,8 +211,16 @@ func authConfigs(confs map[string]dockerConfig) (*AuthConfigurations, error) { c.Configs[reg] = AuthConfiguration{ IdentityToken: conf.IdentityToken, } + case conf.RegistryToken != "": + c.Configs[reg] = AuthConfiguration{ + RegistryToken: conf.RegistryToken, + } case conf.Auth != "": + // support both padded and unpadded encoding data, err := base64.StdEncoding.DecodeString(conf.Auth) + if err != nil { + data, err = base64.StdEncoding.WithPadding(base64.NoPadding).DecodeString(conf.Auth) + } if err != nil { return nil, err } @@ -191,3 +272,102 @@ func (c *Client) AuthCheck(conf *AuthConfiguration) (AuthStatus, error) { } return authStatus, nil } + +// helperCredentials represents credentials commit from an helper +type helperCredentials struct { + Username string `json:"Username,omitempty"` + Secret string `json:"Secret,omitempty"` +} + +// NewAuthConfigurationsFromCredsHelpers returns AuthConfigurations from +// installed credentials helpers +func NewAuthConfigurationsFromCredsHelpers(registry string) (*AuthConfiguration, error) { + // Load docker configuration file in order to find a possible helper provider + pathsToTry := cfgPaths(os.Getenv("DOCKER_CONFIG"), os.Getenv("HOME")) + if len(pathsToTry) < 1 { + return nil, errors.New("no docker configuration found") + } + + provider, err := getHelperProviderFromDockerCfg(pathsToTry, registry) + if err != nil { + return nil, err + } + + c, err := getCredentialsFromHelper(provider, registry) + if err != nil { + return nil, err + } + + creds := new(AuthConfiguration) + creds.Username = c.Username + creds.Password = c.Secret + return creds, nil +} + +func getHelperProviderFromDockerCfg(pathsToTry []string, registry string) (string, error) { + for _, path := range pathsToTry { + content, err := ioutil.ReadFile(path) + if err != nil { + // if we can't read the file keep going + continue + } + + provider, err := parseCredsDockerConfig(content, registry) + if err != nil { + continue + } + if provider != "" { + return provider, nil + } + } + return "", errors.New("no docker credentials provider found") +} + +func parseCredsDockerConfig(config []byte, registry string) (string, error) { + creds := struct { + CredsStore string `json:"credsStore,omitempty"` + CredHelpers map[string]string `json:"credHelpers,omitempty"` + }{} + err := json.Unmarshal(config, &creds) + if err != nil { + return "", err + } + + provider, ok := creds.CredHelpers[registry] + if ok { + return provider, nil + } + return creds.CredsStore, nil +} + +// Run and parse the found credential helper +func getCredentialsFromHelper(provider string, registry string) (*helperCredentials, error) { + helpercreds, err := runDockerCredentialsHelper(provider, registry) + if err != nil { + return nil, err + } + + c := new(helperCredentials) + err = json.Unmarshal(helpercreds, c) + if err != nil { + return nil, err + } + + return c, nil +} + +func runDockerCredentialsHelper(provider string, registry string) ([]byte, error) { + cmd := exec.Command("docker-credential-"+provider, "get") + + var stdout bytes.Buffer + + cmd.Stdin = bytes.NewBuffer([]byte(registry)) + cmd.Stdout = &stdout + + err := cmd.Run() + if err != nil { + return nil, err + } + + return stdout.Bytes(), nil +} diff --git a/vendor/github.com/ory/dockertest/v3/docker/client.go b/vendor/github.com/ory/dockertest/v3/docker/client.go index 4f04dcef..f84b1850 100644 --- a/vendor/github.com/ory/dockertest/v3/docker/client.go +++ b/vendor/github.com/ory/dockertest/v3/docker/client.go @@ -58,9 +58,12 @@ var ( ErrInactivityTimeout = errors.New("inactivity time exceeded timeout") apiVersion112, _ = NewAPIVersion("1.12") + apiVersion118, _ = NewAPIVersion("1.18") apiVersion119, _ = NewAPIVersion("1.19") + apiVersion121, _ = NewAPIVersion("1.21") apiVersion124, _ = NewAPIVersion("1.24") apiVersion125, _ = NewAPIVersion("1.25") + apiVersion135, _ = NewAPIVersion("1.35") ) // APIVersion is an internal representation of a version of the Remote API. @@ -267,16 +270,19 @@ func NewVersionedTLSClient(endpoint string, cert, key, ca, apiVersionString stri } // NewClientFromEnv returns a Client instance ready for communication created from -// Docker's default logic for the environment variables DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_CERT_PATH. +// Docker's default logic for the environment variables DOCKER_HOST, DOCKER_TLS_VERIFY, DOCKER_CERT_PATH, +// and DOCKER_API_VERSION. // // See https://github.com/docker/docker/blob/1f963af697e8df3a78217f6fdbf67b8123a7db94/docker/docker.go#L68. // See https://github.com/docker/compose/blob/81707ef1ad94403789166d2fe042c8a718a4c748/compose/cli/docker_client.py#L7. +// See https://github.com/moby/moby/blob/28d7dba41d0c0d9c7f0dafcc79d3c59f2b3f5dc3/client/options.go#L51 func NewClientFromEnv() (*Client, error) { - client, err := NewVersionedClientFromEnv("") + apiVersionString := os.Getenv("DOCKER_API_VERSION") + client, err := NewVersionedClientFromEnv(apiVersionString) if err != nil { return nil, err } - client.SkipServerVersionCheck = true + client.SkipServerVersionCheck = apiVersionString == "" return client, nil } @@ -539,7 +545,29 @@ func (c *Client) stream(method, path string, streamOptions streamOptions) error return err } } - req, err := http.NewRequest(method, c.getURL(path), streamOptions.in) + return c.streamURL(method, c.getURL(path), streamOptions) +} + +func (c *Client) streamURL(method, url string, streamOptions streamOptions) error { + if (method == http.MethodPost || method == http.MethodPut) && streamOptions.in == nil { + streamOptions.in = bytes.NewReader(nil) + } + if !c.SkipServerVersionCheck && c.expectedAPIVersion == nil { + err := c.checkAPIVersion() + if err != nil { + return err + } + } + + // make a sub-context so that our active cancellation does not affect parent + ctx := streamOptions.context + if ctx == nil { + ctx = context.Background() + } + subCtx, cancelRequest := context.WithCancel(ctx) + defer cancelRequest() + + req, err := http.NewRequestWithContext(ctx, method, url, streamOptions.in) if err != nil { return err } @@ -560,14 +588,6 @@ func (c *Client) stream(method, path string, streamOptions streamOptions) error streamOptions.stderr = io.Discard } - // make a sub-context so that our active cancellation does not affect parent - ctx := streamOptions.context - if ctx == nil { - ctx = context.Background() - } - subCtx, cancelRequest := context.WithCancel(ctx) - defer cancelRequest() - if protocol == unixProtocol || protocol == namedPipeProtocol { var dial net.Conn dial, err = c.Dialer.Dial(protocol, address) @@ -650,11 +670,7 @@ func handleStreamResponse(resp *http.Response, streamOptions *streamOptions) err _, err = io.Copy(streamOptions.stdout, resp.Body) return err } - if st, ok := streamOptions.stdout.(interface { - io.Writer - FD() uintptr - IsTerminal() bool - }); ok { + if st, ok := streamOptions.stdout.(stream); ok { err = jsonmessage.DisplayJSONMessagesToStream(resp.Body, st, nil) } else { err = jsonmessage.DisplayJSONMessagesStream(resp.Body, streamOptions.stdout, 0, false, nil) @@ -662,6 +678,12 @@ func handleStreamResponse(resp *http.Response, streamOptions *streamOptions) err return err } +type stream interface { + io.Writer + FD() uintptr + IsTerminal() bool +} + type proxyReader struct { io.ReadCloser calls uint64 @@ -771,6 +793,7 @@ func (c *Client) hijack(method, path string, hijackOptions hijackOptions) (Close errs := make(chan error, 1) quit := make(chan struct{}) go func() { + //lint:ignore SA1019 the alternative doesn't quite work, so keep using the deprecated thing. clientconn := httputil.NewClientConn(dial, nil) defer clientconn.Close() clientconn.Do(req) @@ -867,6 +890,29 @@ func (c *Client) getURL(path string) string { return fmt.Sprintf("%s%s", urlStr, path) } +func (c *Client) getPath(basepath string, opts interface{}) (string, error) { + queryStr, requiredAPIVersion := queryStringVersion(opts) + return c.pathVersionCheck(basepath, queryStr, requiredAPIVersion) +} + +func (c *Client) pathVersionCheck(basepath, queryStr string, requiredAPIVersion APIVersion) (string, error) { + urlStr := strings.TrimRight(c.endpointURL.String(), "/") + if c.endpointURL.Scheme == unixProtocol || c.endpointURL.Scheme == namedPipeProtocol { + urlStr = "" + } + if c.requestedAPIVersion != nil { + if c.requestedAPIVersion.GreaterThanOrEqualTo(requiredAPIVersion) { + return fmt.Sprintf("%s/v%s%s?%s", urlStr, c.requestedAPIVersion, basepath, queryStr), nil + } + return "", fmt.Errorf("API %s requires version %s, requested version %s is insufficient", + basepath, requiredAPIVersion, c.requestedAPIVersion) + } + if requiredAPIVersion != nil { + return fmt.Sprintf("%s/v%s%s?%s", urlStr, requiredAPIVersion, basepath, queryStr), nil + } + return fmt.Sprintf("%s%s?%s", urlStr, basepath, queryStr), nil +} + // getFakeNativeURL returns the URL needed to make an HTTP request over a UNIX // domain socket to the given path. func (c *Client) getFakeNativeURL(path string) string { @@ -883,17 +929,18 @@ func (c *Client) getFakeNativeURL(path string) string { return fmt.Sprintf("%s%s", urlStr, path) } -func queryString(opts interface{}) string { +func queryStringVersion(opts interface{}) (string, APIVersion) { if opts == nil { - return "" + return "", nil } value := reflect.ValueOf(opts) if value.Kind() == reflect.Ptr { value = value.Elem() } if value.Kind() != reflect.Struct { - return "" + return "", nil } + var apiVersion APIVersion items := url.Values(map[string][]string{}) for i := 0; i < value.NumField(); i++ { field := value.Type().Field(i) @@ -906,53 +953,80 @@ func queryString(opts interface{}) string { } else if key == "-" { continue } - addQueryStringValue(items, key, value.Field(i)) + if addQueryStringValue(items, key, value.Field(i)) { + verstr := field.Tag.Get("ver") + if verstr != "" { + ver, _ := NewAPIVersion(verstr) + if apiVersion == nil { + apiVersion = ver + } else if ver.GreaterThan(apiVersion) { + apiVersion = ver + } + } + } } - return items.Encode() + return items.Encode(), apiVersion } -func addQueryStringValue(items url.Values, key string, v reflect.Value) { +func queryString(opts interface{}) string { + s, _ := queryStringVersion(opts) + return s +} + +func addQueryStringValue(items url.Values, key string, v reflect.Value) bool { switch v.Kind() { case reflect.Bool: if v.Bool() { items.Add(key, "1") + return true } case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: if v.Int() > 0 { items.Add(key, strconv.FormatInt(v.Int(), 10)) + return true } case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: if v.Uint() > 0 { items.Add(key, strconv.FormatUint(v.Uint(), 10)) + return true } case reflect.Float32, reflect.Float64: if v.Float() > 0 { items.Add(key, strconv.FormatFloat(v.Float(), 'f', -1, 64)) + return true } case reflect.String: if v.String() != "" { items.Add(key, v.String()) + return true } case reflect.Ptr: if !v.IsNil() { if b, err := json.Marshal(v.Interface()); err == nil { items.Add(key, string(b)) + return true } } case reflect.Map: if len(v.MapKeys()) > 0 { if b, err := json.Marshal(v.Interface()); err == nil { items.Add(key, string(b)) + return true } } case reflect.Array, reflect.Slice: vLen := v.Len() + var valuesAdded int if vLen > 0 { for i := 0; i < vLen; i++ { - addQueryStringValue(items, key, v.Index(i)) + if addQueryStringValue(items, key, v.Index(i)) { + valuesAdded++ + } } } + return valuesAdded > 0 } + return false } // Error represents failures in the API. It represents a failure from the API. diff --git a/vendor/github.com/ory/dockertest/v3/docker/image.go b/vendor/github.com/ory/dockertest/v3/docker/image.go index f980dc57..ee7e5f53 100644 --- a/vendor/github.com/ory/dockertest/v3/docker/image.go +++ b/vendor/github.com/ory/dockertest/v3/docker/image.go @@ -8,7 +8,6 @@ package docker import ( - "bytes" "context" "encoding/base64" "encoding/json" @@ -133,6 +132,7 @@ type ImageHistory struct { Created int64 `json:"Created,omitempty" yaml:"Created,omitempty" toml:"Tags,omitempty"` CreatedBy string `json:"CreatedBy,omitempty" yaml:"CreatedBy,omitempty" toml:"CreatedBy,omitempty"` Size int64 `json:"Size,omitempty" yaml:"Size,omitempty" toml:"Size,omitempty"` + Comment string `json:"Comment,omitempty" yaml:"Comment,omitempty" toml:"Comment,omitempty"` } // ImageHistory returns the history of the image by its name or ID. @@ -323,12 +323,15 @@ func (c *Client) PullImage(opts PullImageOptions, auth AuthConfiguration) error opts.Repository = parts[0] opts.Tag = parts[1] } - return c.createImage(queryString(&opts), headers, nil, opts.OutputStream, opts.RawJSONStream, opts.InactivityTimeout, opts.Context) + return c.createImage(&opts, headers, nil, opts.OutputStream, opts.RawJSONStream, opts.InactivityTimeout, opts.Context) } -func (c *Client) createImage(qs string, headers map[string]string, in io.Reader, w io.Writer, rawJSONStream bool, timeout time.Duration, context context.Context) error { - path := "/images/create?" + qs - return c.stream("POST", path, streamOptions{ +func (c *Client) createImage(opts interface{}, headers map[string]string, in io.Reader, w io.Writer, rawJSONStream bool, timeout time.Duration, context context.Context) error { + url, err := c.getPath("/images/create", opts) + if err != nil { + return err + } + return c.streamURL(http.MethodPost, url, streamOptions{ setRawTerminal: true, headers: headers, in: in, @@ -399,7 +402,28 @@ func (c *Client) ExportImages(opts ExportImagesOptions) error { if opts.Names == nil || len(opts.Names) == 0 { return ErrMustSpecifyNames } - return c.stream("GET", "/images/get?"+queryString(&opts), streamOptions{ + // API < 1.25 allows multiple name values + // 1.25 says name must be a comma separated list + var err error + var exporturl string + if c.requestedAPIVersion.GreaterThanOrEqualTo(apiVersion125) { + str := opts.Names[0] + for _, val := range opts.Names[1:] { + str += "," + val + } + exporturl, err = c.getPath("/images/get", ExportImagesOptions{ + Names: []string{str}, + OutputStream: opts.OutputStream, + InactivityTimeout: opts.InactivityTimeout, + Context: opts.Context, + }) + } else { + exporturl, err = c.getPath("/images/get", &opts) + } + if err != nil { + return err + } + return c.streamURL(http.MethodGet, exporturl, streamOptions{ setRawTerminal: true, stdout: opts.OutputStream, inactivityTimeout: opts.InactivityTimeout, @@ -440,7 +464,7 @@ func (c *Client) ImportImage(opts ImportImageOptions) error { opts.InputStream = f opts.Source = "-" } - return c.createImage(queryString(&opts), nil, opts.InputStream, opts.OutputStream, opts.RawJSONStream, opts.InactivityTimeout, opts.Context) + return c.createImage(&opts, nil, opts.InputStream, opts.OutputStream, opts.RawJSONStream, opts.InactivityTimeout, opts.Context) } // BuildImageOptions present the set of informations available for building an @@ -450,19 +474,20 @@ func (c *Client) ImportImage(opts ImportImageOptions) error { // https://goo.gl/4nYHwV. type BuildImageOptions struct { Name string `qs:"t"` - Dockerfile string `qs:"dockerfile"` + Dockerfile string `qs:"dockerfile" ver:"1.25"` NoCache bool `qs:"nocache"` - CacheFrom []string `qs:"-"` + CacheFrom []string `qs:"-" ver:"1.25"` SuppressOutput bool `qs:"q"` - Pull bool `qs:"pull"` + Pull bool `qs:"pull" ver:"1.16"` RmTmpContainer bool `qs:"rm"` - ForceRmTmpContainer bool `qs:"forcerm"` + ForceRmTmpContainer bool `qs:"forcerm" ver:"1.12"` RawJSONStream bool `qs:"-"` Memory int64 `qs:"memory"` Memswap int64 `qs:"memswap"` + ShmSize int64 `qs:"shmsize"` CPUShares int64 `qs:"cpushares"` - CPUQuota int64 `qs:"cpuquota"` - CPUPeriod int64 `qs:"cpuperiod"` + CPUQuota int64 `qs:"cpuquota" ver:"1.21"` + CPUPeriod int64 `qs:"cpuperiod" ver:"1.21"` CPUSetCPUs string `qs:"cpusetcpus"` Labels map[string]string `qs:"labels"` InputStream io.Reader `qs:"-"` @@ -472,14 +497,17 @@ type BuildImageOptions struct { Auth AuthConfiguration `qs:"-"` // for older docker X-Registry-Auth header AuthConfigs AuthConfigurations `qs:"-"` // for newer docker X-Registry-Config header ContextDir string `qs:"-"` - Ulimits []ULimit `qs:"-"` - BuildArgs []BuildArg `qs:"-"` - NetworkMode string `qs:"networkmode"` + Ulimits []ULimit `qs:"-" ver:"1.18"` + BuildArgs []BuildArg `qs:"-" ver:"1.21"` + NetworkMode string `qs:"networkmode" ver:"1.25"` InactivityTimeout time.Duration `qs:"-"` CgroupParent string `qs:"cgroupparent"` SecurityOpt []string `qs:"securityopt"` Target string `gs:"target"` - Platform string `qs:"platform"` + Version string `qs:"version"` + Platform string `qs:"platform" ver:"1.32"` + Outputs string `qs:"outputs" ver:"1.40"` + ExtraHosts string `qs:"extrahosts" ver:"1.28"` Context context.Context } @@ -523,13 +551,16 @@ func (c *Client) BuildImage(opts BuildImageOptions) error { return err } } - qs := queryString(&opts) + qs, ver := queryStringVersion(&opts) - if c.serverAPIVersion.GreaterThanOrEqualTo(apiVersion125) && len(opts.CacheFrom) > 0 { + if len(opts.CacheFrom) > 0 { if b, err := json.Marshal(opts.CacheFrom); err == nil { item := url.Values(map[string][]string{}) item.Add("cachefrom", string(b)) qs = fmt.Sprintf("%s&%s", qs, item.Encode()) + if ver == nil || apiVersion125.GreaterThan(ver) { + ver = apiVersion125 + } } } @@ -538,6 +569,9 @@ func (c *Client) BuildImage(opts BuildImageOptions) error { item := url.Values(map[string][]string{}) item.Add("ulimits", string(b)) qs = fmt.Sprintf("%s&%s", qs, item.Encode()) + if ver == nil || apiVersion118.GreaterThan(ver) { + ver = apiVersion118 + } } } @@ -550,10 +584,18 @@ func (c *Client) BuildImage(opts BuildImageOptions) error { item := url.Values(map[string][]string{}) item.Add("buildargs", string(b)) qs = fmt.Sprintf("%s&%s", qs, item.Encode()) + if ver == nil || apiVersion121.GreaterThan(ver) { + ver = apiVersion121 + } } } - return c.stream("POST", fmt.Sprintf("/build?%s", qs), streamOptions{ + buildURL, err := c.pathVersionCheck("/build", qs, ver) + if err != nil { + return err + } + + return c.streamURL(http.MethodPost, buildURL, streamOptions{ setRawTerminal: true, rawJSONStream: opts.RawJSONStream, headers: headers, @@ -565,7 +607,7 @@ func (c *Client) BuildImage(opts BuildImageOptions) error { }) } -func (c *Client) versionedAuthConfigs(authConfigs AuthConfigurations) interface{} { +func (c *Client) versionedAuthConfigs(authConfigs AuthConfigurations) registryAuth { if c.serverAPIVersion == nil { c.checkAPIVersion() } @@ -617,24 +659,18 @@ func isURL(u string) bool { return p.Scheme == "http" || p.Scheme == "https" } -func headersWithAuth(auths ...interface{}) (map[string]string, error) { +func headersWithAuth(auths ...registryAuth) (map[string]string, error) { var headers = make(map[string]string) for _, auth := range auths { - switch auth.(type) { - case AuthConfiguration: - var buf bytes.Buffer - if err := json.NewEncoder(&buf).Encode(auth); err != nil { - return nil, err - } - headers["X-Registry-Auth"] = base64.URLEncoding.EncodeToString(buf.Bytes()) - case AuthConfigurations, AuthConfigurations119: - var buf bytes.Buffer - if err := json.NewEncoder(&buf).Encode(auth); err != nil { - return nil, err - } - headers["X-Registry-Config"] = base64.URLEncoding.EncodeToString(buf.Bytes()) + if auth.isEmpty() { + continue } + data, err := json.Marshal(auth) + if err != nil { + return nil, err + } + headers[auth.headerKey()] = base64.URLEncoding.EncodeToString(data) } return headers, nil diff --git a/vendor/github.com/ory/dockertest/v3/docker/registry_auth.go b/vendor/github.com/ory/dockertest/v3/docker/registry_auth.go new file mode 100644 index 00000000..bcd1986f --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/registry_auth.go @@ -0,0 +1,13 @@ +// Copyright © 2023 Ory Corp +// SPDX-License-Identifier: Apache-2.0 + +// Copyright 2013 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +type registryAuth interface { + isEmpty() bool + headerKey() string +} diff --git a/vendor/github.com/ory/dockertest/v3/dockertest.go b/vendor/github.com/ory/dockertest/v3/dockertest.go index 10bb4715..52bb08fb 100644 --- a/vendor/github.com/ory/dockertest/v3/dockertest.go +++ b/vendor/github.com/ory/dockertest/v3/dockertest.go @@ -331,6 +331,9 @@ type BuildOptions struct { ContextDir string BuildArgs []dc.BuildArg Platform string + // Version specifies the builder to use. "1" for classic, "2" for BuildKit + Version string + Auth dc.AuthConfigurations } // BuildAndRunWithBuildOptions builds and starts a docker container. @@ -343,6 +346,8 @@ func (d *Pool) BuildAndRunWithBuildOptions(buildOpts *BuildOptions, runOpts *Run ContextDir: buildOpts.ContextDir, BuildArgs: buildOpts.BuildArgs, Platform: buildOpts.Platform, + Version: buildOpts.Version, + AuthConfigs: buildOpts.Auth, }) if err != nil { @@ -421,11 +426,23 @@ func (d *Pool) RunWithOptions(opts *RunOptions, hcOpts ...func(*dc.HostConfig)) _, err := d.Client.InspectImage(fmt.Sprintf("%s:%s", repository, tag)) if err != nil { + var ( + auth = opts.Auth + parts = strings.SplitN(repository, "/", 3) + empty = opts.Auth == dc.AuthConfiguration{} + ) + if empty && len(parts) == 3 { + res, err := dc.NewAuthConfigurationsFromCredsHelpers(parts[0]) + if err == nil { + auth = *res + } + } + if err := d.Client.PullImage(dc.PullImageOptions{ Repository: repository, Tag: tag, Platform: opts.Platform, - }, opts.Auth); err != nil { + }, auth); err != nil { return nil, err } } @@ -473,7 +490,7 @@ func (d *Pool) RunWithOptions(opts *RunOptions, hcOpts ...func(*dc.HostConfig)) return nil, err } - c, err = d.Client.InspectContainer(c.ID) + c, err = d.inspectContainerWithRetries(c.ID) if err != nil { return nil, err } @@ -491,6 +508,37 @@ func (d *Pool) RunWithOptions(opts *RunOptions, hcOpts ...func(*dc.HostConfig)) }, nil } +// inspectContainerWithRetries will repeat the inspect call until the container has port bindings assigned. +func (d *Pool) inspectContainerWithRetries(id string) (*dc.Container, error) { + const maxRetries = 10 + var ( + retryNum int + c *dc.Container + err error + ) + for retryNum <= maxRetries { + if retryNum > 0 { + time.Sleep(100 * time.Millisecond) + } + c, err = d.Client.InspectContainer(id) + if err != nil { + return nil, err + } + if hasEmptyPortBindings := func() bool { + for _, bindings := range c.NetworkSettings.Ports { + if len(bindings) == 0 { + return true + } + } + return false + }(); !hasEmptyPortBindings { + return c, nil + } + retryNum++ + } + return c, err +} + // Run starts a docker container. // // pool.Run("mysql", "5.3", []string{"FOO=BAR", "BAR=BAZ"}) diff --git a/vendor/github.com/redis/go-redis/v9/.gitignore b/vendor/github.com/redis/go-redis/v9/.gitignore index 6f868895..93affec7 100644 --- a/vendor/github.com/redis/go-redis/v9/.gitignore +++ b/vendor/github.com/redis/go-redis/v9/.gitignore @@ -3,4 +3,17 @@ testdata/* .idea/ .DS_Store *.tar.gz -*.dic \ No newline at end of file +*.dic +redis8tests.sh +coverage.txt +**/coverage.txt +.vscode +tmp/* +*.test +extra/redisotel-native/metrics-collector-app/ +# maintenanceNotifications upgrade documentation (temporary) +maintenanceNotifications/docs/ + +# Docker-generated files (TLS certificates, cluster data, etc.) +dockers/*/tls/ +dockers/osscluster-tls/ diff --git a/vendor/github.com/redis/go-redis/v9/.golangci.yml b/vendor/github.com/redis/go-redis/v9/.golangci.yml index de514554..dd13c2c2 100644 --- a/vendor/github.com/redis/go-redis/v9/.golangci.yml +++ b/vendor/github.com/redis/go-redis/v9/.golangci.yml @@ -1,4 +1,36 @@ +version: "2" run: - concurrency: 8 - deadline: 5m + timeout: 5m tests: false +linters: + settings: + staticcheck: + checks: + - all + # Incorrect or missing package comment. + # https://staticcheck.dev/docs/checks/#ST1000 + - -ST1000 + # Omit embedded fields from selector expression. + # https://staticcheck.dev/docs/checks/#QF1008 + - -QF1008 + - -ST1003 + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + paths: + - third_party$ + - builtin$ + - examples$ +formatters: + enable: + - gofmt + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/vendor/github.com/redis/go-redis/v9/CHANGELOG.md b/vendor/github.com/redis/go-redis/v9/CHANGELOG.md deleted file mode 100644 index 297438a9..00000000 --- a/vendor/github.com/redis/go-redis/v9/CHANGELOG.md +++ /dev/null @@ -1,124 +0,0 @@ -## [9.0.5](https://github.com/redis/go-redis/compare/v9.0.4...v9.0.5) (2023-05-29) - - -### Features - -* Add ACL LOG ([#2536](https://github.com/redis/go-redis/issues/2536)) ([31ba855](https://github.com/redis/go-redis/commit/31ba855ddebc38fbcc69a75d9d4fb769417cf602)) -* add field protocol to setupClusterQueryParams ([#2600](https://github.com/redis/go-redis/issues/2600)) ([840c25c](https://github.com/redis/go-redis/commit/840c25cb6f320501886a82a5e75f47b491e46fbe)) -* add protocol option ([#2598](https://github.com/redis/go-redis/issues/2598)) ([3917988](https://github.com/redis/go-redis/commit/391798880cfb915c4660f6c3ba63e0c1a459e2af)) - - - -## [9.0.4](https://github.com/redis/go-redis/compare/v9.0.3...v9.0.4) (2023-05-01) - - -### Bug Fixes - -* reader float parser ([#2513](https://github.com/redis/go-redis/issues/2513)) ([46f2450](https://github.com/redis/go-redis/commit/46f245075e6e3a8bd8471f9ca67ea95fd675e241)) - - -### Features - -* add client info command ([#2483](https://github.com/redis/go-redis/issues/2483)) ([b8c7317](https://github.com/redis/go-redis/commit/b8c7317cc6af444603731f7017c602347c0ba61e)) -* no longer verify HELLO error messages ([#2515](https://github.com/redis/go-redis/issues/2515)) ([7b4f217](https://github.com/redis/go-redis/commit/7b4f2179cb5dba3d3c6b0c6f10db52b837c912c8)) -* read the structure to increase the judgment of the omitempty op… ([#2529](https://github.com/redis/go-redis/issues/2529)) ([37c057b](https://github.com/redis/go-redis/commit/37c057b8e597c5e8a0e372337f6a8ad27f6030af)) - - - -## [9.0.3](https://github.com/redis/go-redis/compare/v9.0.2...v9.0.3) (2023-04-02) - -### New Features - -- feat(scan): scan time.Time sets the default decoding (#2413) -- Add support for CLUSTER LINKS command (#2504) -- Add support for acl dryrun command (#2502) -- Add support for COMMAND GETKEYS & COMMAND GETKEYSANDFLAGS (#2500) -- Add support for LCS Command (#2480) -- Add support for BZMPOP (#2456) -- Adding support for ZMPOP command (#2408) -- Add support for LMPOP (#2440) -- feat: remove pool unused fields (#2438) -- Expiretime and PExpireTime (#2426) -- Implement `FUNCTION` group of commands (#2475) -- feat(zadd): add ZAddLT and ZAddGT (#2429) -- Add: Support for COMMAND LIST command (#2491) -- Add support for BLMPOP (#2442) -- feat: check pipeline.Do to prevent confusion with Exec (#2517) -- Function stats, function kill, fcall and fcall_ro (#2486) -- feat: Add support for CLUSTER SHARDS command (#2507) -- feat(cmd): support for adding byte,bit parameters to the bitpos command (#2498) - -### Fixed - -- fix: eval api cmd.SetFirstKeyPos (#2501) -- fix: limit the number of connections created (#2441) -- fixed #2462 v9 continue support dragonfly, it's Hello command return "NOAUTH Authentication required" error (#2479) -- Fix for internal/hscan/structmap.go:89:23: undefined: reflect.Pointer (#2458) -- fix: group lag can be null (#2448) - -### Maintenance - -- Updating to the latest version of redis (#2508) -- Allowing for running tests on a port other than the fixed 6380 (#2466) -- redis 7.0.8 in tests (#2450) -- docs: Update redisotel example for v9 (#2425) -- chore: update go mod, Upgrade golang.org/x/net version to 0.7.0 (#2476) -- chore: add Chinese translation (#2436) -- chore(deps): bump github.com/bsm/gomega from 1.20.0 to 1.26.0 (#2421) -- chore(deps): bump github.com/bsm/ginkgo/v2 from 2.5.0 to 2.7.0 (#2420) -- chore(deps): bump actions/setup-go from 3 to 4 (#2495) -- docs: add instructions for the HSet api (#2503) -- docs: add reading lag field comment (#2451) -- test: update go mod before testing(go mod tidy) (#2423) -- docs: fix comment typo (#2505) -- test: remove testify (#2463) -- refactor: change ListElementCmd to KeyValuesCmd. (#2443) -- fix(appendArg): appendArg case special type (#2489) - -## [9.0.2](https://github.com/redis/go-redis/compare/v9.0.1...v9.0.2) (2023-02-01) - -### Features - -* upgrade OpenTelemetry, use the new metrics API. ([#2410](https://github.com/redis/go-redis/issues/2410)) ([e29e42c](https://github.com/redis/go-redis/commit/e29e42cde2755ab910d04185025dc43ce6f59c65)) - -## v9 2023-01-30 - -### Breaking - -- Changed Pipelines to not be thread-safe any more. - -### Added - -- Added support for [RESP3](https://github.com/antirez/RESP3/blob/master/spec.md) protocol. It was - contributed by @monkey92t who has done the majority of work in this release. -- Added `ContextTimeoutEnabled` option that controls whether the client respects context timeouts - and deadlines. See - [Redis Timeouts](https://redis.uptrace.dev/guide/go-redis-debugging.html#timeouts) for details. -- Added `ParseClusterURL` to parse URLs into `ClusterOptions`, for example, - `redis://user:password@localhost:6789?dial_timeout=3&read_timeout=6s&addr=localhost:6790&addr=localhost:6791`. -- Added metrics instrumentation using `redisotel.IstrumentMetrics`. See - [documentation](https://redis.uptrace.dev/guide/go-redis-monitoring.html) -- Added `redis.HasErrorPrefix` to help working with errors. - -### Changed - -- Removed asynchronous cancellation based on the context timeout. It was racy in v8 and is - completely gone in v9. -- Reworked hook interface and added `DialHook`. -- Replaced `redisotel.NewTracingHook` with `redisotel.InstrumentTracing`. See - [example](example/otel) and - [documentation](https://redis.uptrace.dev/guide/go-redis-monitoring.html). -- Replaced `*redis.Z` with `redis.Z` since it is small enough to be passed as value without making - an allocation. -- Renamed the option `MaxConnAge` to `ConnMaxLifetime`. -- Renamed the option `IdleTimeout` to `ConnMaxIdleTime`. -- Removed connection reaper in favor of `MaxIdleConns`. -- Removed `WithContext` since `context.Context` can be passed directly as an arg. -- Removed `Pipeline.Close` since there is no real need to explicitly manage pipeline resources and - it can be safely reused via `sync.Pool` etc. `Pipeline.Discard` is still available if you want to - reset commands for some reason. - -### Fixed - -- Improved and fixed pipeline retries. -- As usually, added support for more commands and fixed some bugs. diff --git a/vendor/github.com/redis/go-redis/v9/CONTRIBUTING.md b/vendor/github.com/redis/go-redis/v9/CONTRIBUTING.md index 90030b89..8c68c522 100644 --- a/vendor/github.com/redis/go-redis/v9/CONTRIBUTING.md +++ b/vendor/github.com/redis/go-redis/v9/CONTRIBUTING.md @@ -32,20 +32,33 @@ Here's how to get started with your code contribution: 1. Create your own fork of go-redis 2. Do the changes in your fork -3. If you need a development environment, run `make test`. Note: this clones and builds the latest release of [redis](https://redis.io). You also need a redis-stack-server docker, in order to run the capabilities tests. This can be started by running: - ```docker run -p 6379:6379 -it redis/redis-stack-server:edge``` -4. While developing, make sure the tests pass by running `make tests` +3. If you need a development environment, run `make docker.start`. + +> Note: this clones and builds the docker containers specified in `docker-compose.yml`, to understand more about +> the infrastructure that will be started you can check the `docker-compose.yml`. You also have the possiblity +> to specify the redis image that will be pulled with the env variable `CLIENT_LIBS_TEST_IMAGE`. +> By default the docker image that will be pulled and started is `redislabs/client-libs-test:8.2.1-pre`. +> If you want to test with newer Redis version, using a newer version of `redislabs/client-libs-test` should work out of the box. + +4. While developing, make sure the tests pass by running `make test` (if you have the docker containers running, `make test.ci` may be sufficient). +> Note: `make test` will try to start all containers, run the tests with `make test.ci` and then stop all containers. 5. If you like the change and think the project could use it, send a pull request To see what else is part of the automation, run `invoke -l` + ## Testing -Call `make test` to run all tests, including linters. +### Setting up Docker +To run the tests, you need to have Docker installed and running. If you are using a host OS that does not support +docker host networks out of the box (e.g. Windows, OSX), you need to set up a docker desktop and enable docker host networks. + +### Running tests +Call `make test` to run all tests. Continuous Integration uses these same wrappers to run all of these -tests against multiple versions of python. Feel free to test your +tests against multiple versions of redis. Feel free to test your changes against all the go versions supported, as declared by the [build.yml](./.github/workflows/build.yml) file. @@ -99,3 +112,7 @@ The core team regularly looks at pull requests. We will provide feedback as soon as possible. After receiving our feedback, please respond within two weeks. After that time, we may close your PR if it isn't showing any activity. + +## Support + +Maintainers can provide limited support to contributors on discord: https://discord.gg/W4txy5AeKM diff --git a/vendor/github.com/redis/go-redis/v9/Makefile b/vendor/github.com/redis/go-redis/v9/Makefile index dc2fe780..370f3880 100644 --- a/vendor/github.com/redis/go-redis/v9/Makefile +++ b/vendor/github.com/redis/go-redis/v9/Makefile @@ -1,36 +1,114 @@ GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort) +REDIS_VERSION ?= 8.6 +RE_CLUSTER ?= false +RCE_DOCKER ?= true +CLIENT_LIBS_TEST_IMAGE ?= redislabs/client-libs-test:custom-21860421418-debian-amd64 -test: testdeps +docker.start: + export RE_CLUSTER=$(RE_CLUSTER) && \ + export RCE_DOCKER=$(RCE_DOCKER) && \ + export REDIS_VERSION=$(REDIS_VERSION) && \ + export CLIENT_LIBS_TEST_IMAGE=$(CLIENT_LIBS_TEST_IMAGE) && \ + docker compose --profile all up -d --quiet-pull + +docker.stop: + docker compose --profile all down + +docker.e2e.start: + @echo "Starting Redis and cae-resp-proxy for E2E tests..." + docker compose --profile e2e up -d --quiet-pull + @echo "Waiting for services to be ready..." + @sleep 3 + @echo "Services ready!" + +docker.e2e.stop: + @echo "Stopping E2E services..." + docker compose --profile e2e down + +test: + $(MAKE) docker.start + @if [ -z "$(REDIS_VERSION)" ]; then \ + echo "REDIS_VERSION not set, running all tests"; \ + $(MAKE) test.ci; \ + else \ + MAJOR_VERSION=$$(echo "$(REDIS_VERSION)" | cut -d. -f1); \ + if [ "$$MAJOR_VERSION" -ge 8 ]; then \ + echo "REDIS_VERSION $(REDIS_VERSION) >= 8, running all tests"; \ + $(MAKE) test.ci; \ + else \ + echo "REDIS_VERSION $(REDIS_VERSION) < 8, skipping vector_sets tests"; \ + $(MAKE) test.ci.skip-vectorsets; \ + fi; \ + fi + $(MAKE) docker.stop + +test.ci: set -e; for dir in $(GO_MOD_DIRS); do \ echo "go test in $${dir}"; \ (cd "$${dir}" && \ + export RE_CLUSTER=$(RE_CLUSTER) && \ + export RCE_DOCKER=$(RCE_DOCKER) && \ + export REDIS_VERSION=$(REDIS_VERSION) && \ go mod tidy -compat=1.18 && \ - go test && \ - go test ./... -short -race && \ - go test ./... -run=NONE -bench=. -benchmem && \ - env GOOS=linux GOARCH=386 go test && \ - go vet); \ + go vet && \ + go test -v -coverprofile=coverage.txt -covermode=atomic ./... -race -skip Example); \ done cd internal/customvet && go build . go vet -vettool ./internal/customvet/customvet -testdeps: testdata/redis/src/redis-server +test.ci.skip-vectorsets: + set -e; for dir in $(GO_MOD_DIRS); do \ + echo "go test in $${dir} (skipping vector sets)"; \ + (cd "$${dir}" && \ + export RE_CLUSTER=$(RE_CLUSTER) && \ + export RCE_DOCKER=$(RCE_DOCKER) && \ + export REDIS_VERSION=$(REDIS_VERSION) && \ + go mod tidy -compat=1.18 && \ + go vet && \ + go test -v -coverprofile=coverage.txt -covermode=atomic ./... -race \ + -run '^(?!.*(?:VectorSet|vectorset|ExampleClient_vectorset)).*$$' -skip Example); \ + done + cd internal/customvet && go build . + go vet -vettool ./internal/customvet/customvet -bench: testdeps - go test ./... -test.run=NONE -test.bench=. -test.benchmem +bench: + export RE_CLUSTER=$(RE_CLUSTER) && \ + export RCE_DOCKER=$(RCE_DOCKER) && \ + export REDIS_VERSION=$(REDIS_VERSION) && \ + go test ./... -test.run=NONE -test.bench=. -test.benchmem -skip Example -.PHONY: all test testdeps bench fmt +test.e2e: + @echo "Running E2E tests with auto-start proxy..." + $(MAKE) docker.e2e.start + @echo "Running tests..." + @E2E_SCENARIO_TESTS=true go test -v ./maintnotifications/e2e/ -timeout 30m || ($(MAKE) docker.e2e.stop && exit 1) + $(MAKE) docker.e2e.stop + @echo "E2E tests completed!" + +test.e2e.docker: + @echo "Running Docker-compatible E2E tests..." + $(MAKE) docker.e2e.start + @echo "Running unified injector tests..." + @E2E_SCENARIO_TESTS=true go test -v -run "TestUnifiedInjector|TestCreateTestFaultInjectorLogic|TestFaultInjectorClientCreation" ./maintnotifications/e2e/ -timeout 10m || ($(MAKE) docker.e2e.stop && exit 1) + $(MAKE) docker.e2e.stop + @echo "Docker E2E tests completed!" + +test.e2e.logic: + @echo "Running E2E logic tests (no proxy required)..." + @E2E_SCENARIO_TESTS=true \ + REDIS_ENDPOINTS_CONFIG_PATH=/tmp/test_endpoints_verify.json \ + FAULT_INJECTION_API_URL=http://localhost:8080 \ + go test -v -run "TestCreateTestFaultInjectorLogic|TestFaultInjectorClientCreation" ./maintnotifications/e2e/ + @echo "Logic tests completed!" + +.PHONY: all test test.ci test.ci.skip-vectorsets bench fmt test.e2e test.e2e.logic docker.e2e.start docker.e2e.stop build: + export RE_CLUSTER=$(RE_CLUSTER) && \ + export RCE_DOCKER=$(RCE_DOCKER) && \ + export REDIS_VERSION=$(REDIS_VERSION) && \ go build . -testdata/redis: - mkdir -p $@ - wget -qO- https://download.redis.io/releases/redis-7.2.1.tar.gz | tar xvz --strip-components=1 -C $@ - -testdata/redis/src/redis-server: testdata/redis - cd $< && make all - fmt: gofumpt -w ./ goimports -w -local github.com/redis/go-redis ./ diff --git a/vendor/github.com/redis/go-redis/v9/README.md b/vendor/github.com/redis/go-redis/v9/README.md index 98edbe21..160714ab 100644 --- a/vendor/github.com/redis/go-redis/v9/README.md +++ b/vendor/github.com/redis/go-redis/v9/README.md @@ -2,16 +2,31 @@ [![build workflow](https://github.com/redis/go-redis/actions/workflows/build.yml/badge.svg)](https://github.com/redis/go-redis/actions) [![PkgGoDev](https://pkg.go.dev/badge/github.com/redis/go-redis/v9)](https://pkg.go.dev/github.com/redis/go-redis/v9?tab=doc) -[![Documentation](https://img.shields.io/badge/redis-documentation-informational)](https://redis.uptrace.dev/) -[![Chat](https://discordapp.com/api/guilds/752070105847955518/widget.png)](https://discord.gg/rWtp5Aj) +[![Documentation](https://img.shields.io/badge/redis-documentation-informational)](https://redis.io/docs/latest/develop/clients/go/) +[![Go Report Card](https://goreportcard.com/badge/github.com/redis/go-redis/v9)](https://goreportcard.com/report/github.com/redis/go-redis/v9) +[![codecov](https://codecov.io/github/redis/go-redis/graph/badge.svg?token=tsrCZKuSSw)](https://codecov.io/github/redis/go-redis) -> go-redis is brought to you by :star: [**uptrace/uptrace**](https://github.com/uptrace/uptrace). -> Uptrace is an open-source APM tool that supports distributed tracing, metrics, and logs. You can -> use it to monitor applications and set up automatic alerts to receive notifications via email, -> Slack, Telegram, and others. -> -> See [OpenTelemetry](https://github.com/redis/go-redis/tree/master/example/otel) example which -> demonstrates how you can use Uptrace to monitor go-redis. +[![Discord](https://img.shields.io/discord/697882427875393627.svg?style=social&logo=discord)](https://discord.gg/W4txy5AeKM) +[![Twitch](https://img.shields.io/twitch/status/redisinc?style=social)](https://www.twitch.tv/redisinc) +[![YouTube](https://img.shields.io/youtube/channel/views/UCD78lHSwYqMlyetR0_P4Vig?style=social)](https://www.youtube.com/redisinc) +[![Twitter](https://img.shields.io/twitter/follow/redisinc?style=social)](https://twitter.com/redisinc) +[![Stack Exchange questions](https://img.shields.io/stackexchange/stackoverflow/t/go-redis?style=social&logo=stackoverflow&label=Stackoverflow)](https://stackoverflow.com/questions/tagged/go-redis) + +> go-redis is the official Redis client library for the Go programming language. It offers a straightforward interface for interacting with Redis servers. + +## Supported versions + +In `go-redis` we are aiming to support the last three releases of Redis. Currently, this means we do support: +- [Redis 8.0](https://raw.githubusercontent.com/redis/redis/8.0/00-RELEASENOTES) - using Redis CE 8.0 +- [Redis 8.2](https://raw.githubusercontent.com/redis/redis/8.2/00-RELEASENOTES) - using Redis CE 8.2 +- [Redis 8.4](https://raw.githubusercontent.com/redis/redis/8.4/00-RELEASENOTES) - using Redis CE 8.4 + +Although the `go.mod` states it requires at minimum `go 1.21`, our CI is configured to run the tests against all three +versions of Redis and multiple versions of Go ([1.21](https://go.dev/doc/devel/release#go1.21.0), +[1.23](https://go.dev/doc/devel/release#go1.23.0), oldstable, and stable). We observe that some modules related test may not pass with +Redis Stack 7.2 and some commands are changed with Redis CE 8.0. +Although it is not officially supported, `go-redis/v9` should be able to work with any Redis 7.0+. +Please do refer to the documentation and the tests if you experience any issues. ## How do I Redis? @@ -27,40 +42,39 @@ [Work at Redis](https://redis.com/company/careers/jobs/) -## Documentation - -- [English](https://redis.uptrace.dev) -- [简体中文](https://redis.uptrace.dev/zh/) ## Resources - [Discussions](https://github.com/redis/go-redis/discussions) -- [Chat](https://discord.gg/rWtp5Aj) +- [Chat](https://discord.gg/W4txy5AeKM) - [Reference](https://pkg.go.dev/github.com/redis/go-redis/v9) - [Examples](https://pkg.go.dev/github.com/redis/go-redis/v9#pkg-examples) +## old documentation + +- [English](https://redis.uptrace.dev) +- [简体中文](https://redis.uptrace.dev/zh/) + ## Ecosystem -- [Redis Mock](https://github.com/go-redis/redismock) +- [Entra ID (Azure AD)](https://github.com/redis/go-redis-entraid) - [Distributed Locks](https://github.com/bsm/redislock) - [Redis Cache](https://github.com/go-redis/cache) - [Rate limiting](https://github.com/go-redis/redis_rate) -This client also works with [Kvrocks](https://github.com/apache/incubator-kvrocks), a distributed -key value NoSQL database that uses RocksDB as storage engine and is compatible with Redis protocol. - ## Features -- Redis 3 commands except QUIT, MONITOR, and SYNC. -- Automatic connection pooling with +- Redis commands except QUIT and SYNC. +- Automatic connection pooling. +- [StreamingCredentialsProvider (e.g. entra id, oauth)](#1-streaming-credentials-provider-highest-priority) (experimental) - [Pub/Sub](https://redis.uptrace.dev/guide/go-redis-pubsub.html). - [Pipelines and transactions](https://redis.uptrace.dev/guide/go-redis-pipelines.html). - [Scripting](https://redis.uptrace.dev/guide/lua-scripting.html). - [Redis Sentinel](https://redis.uptrace.dev/guide/go-redis-sentinel.html). - [Redis Cluster](https://redis.uptrace.dev/guide/go-redis-cluster.html). -- [Redis Ring](https://redis.uptrace.dev/guide/ring.html). - [Redis Performance Monitoring](https://redis.uptrace.dev/guide/redis-performance-monitoring.html). - [Redis Probabilistic [RedisStack]](https://redis.io/docs/data-types/probabilistic/) +- [Customizable read and write buffers size.](#custom-buffer-sizes) ## Installation @@ -96,6 +110,7 @@ func ExampleClient() { Password: "", // no password set DB: 0, // use default DB }) + defer rdb.Close() err := rdb.Set(ctx, "key", "value", 0).Err() if err != nil { @@ -121,17 +136,121 @@ func ExampleClient() { } ``` -The above can be modified to specify the version of the RESP protocol by adding the `protocol` -option to the `Options` struct: +### Authentication + +The Redis client supports multiple ways to provide authentication credentials, with a clear priority order. Here are the available options: + +#### 1. Streaming Credentials Provider (Highest Priority) - Experimental feature + +The streaming credentials provider allows for dynamic credential updates during the connection lifetime. This is particularly useful for managed identity services and token-based authentication. ```go - rdb := redis.NewClient(&redis.Options{ - Addr: "localhost:6379", - Password: "", // no password set - DB: 0, // use default DB - Protocol: 3, // specify 2 for RESP 2 or 3 for RESP 3 - }) +type StreamingCredentialsProvider interface { + Subscribe(listener CredentialsListener) (Credentials, UnsubscribeFunc, error) +} +type CredentialsListener interface { + OnNext(credentials Credentials) // Called when credentials are updated + OnError(err error) // Called when an error occurs +} + +type Credentials interface { + BasicAuth() (username string, password string) + RawCredentials() string +} +``` + +Example usage: +```go +rdb := redis.NewClient(&redis.Options{ + Addr: "localhost:6379", + StreamingCredentialsProvider: &MyCredentialsProvider{}, +}) +``` + +**Note:** The streaming credentials provider can be used with [go-redis-entraid](https://github.com/redis/go-redis-entraid) to enable Entra ID (formerly Azure AD) authentication. This allows for seamless integration with Azure's managed identity services and token-based authentication. + +Example with Entra ID: +```go +import ( + "github.com/redis/go-redis/v9" + "github.com/redis/go-redis-entraid" +) + +// Create an Entra ID credentials provider +provider := entraid.NewDefaultAzureIdentityProvider() + +// Configure Redis client with Entra ID authentication +rdb := redis.NewClient(&redis.Options{ + Addr: "your-redis-server.redis.cache.windows.net:6380", + StreamingCredentialsProvider: provider, + TLSConfig: &tls.Config{ + MinVersion: tls.VersionTLS12, + }, +}) +``` + +#### 2. Context-based Credentials Provider + +The context-based provider allows credentials to be determined at the time of each operation, using the context. + +```go +rdb := redis.NewClient(&redis.Options{ + Addr: "localhost:6379", + CredentialsProviderContext: func(ctx context.Context) (string, string, error) { + // Return username, password, and any error + return "user", "pass", nil + }, +}) +``` + +#### 3. Regular Credentials Provider + +A simple function-based provider that returns static credentials. + +```go +rdb := redis.NewClient(&redis.Options{ + Addr: "localhost:6379", + CredentialsProvider: func() (string, string) { + // Return username and password + return "user", "pass" + }, +}) +``` + +#### 4. Username/Password Fields (Lowest Priority) + +The most basic way to provide credentials is through the `Username` and `Password` fields in the options. + +```go +rdb := redis.NewClient(&redis.Options{ + Addr: "localhost:6379", + Username: "user", + Password: "pass", +}) +``` + +#### Priority Order + +The client will use credentials in the following priority order: +1. Streaming Credentials Provider (if set) +2. Context-based Credentials Provider (if set) +3. Regular Credentials Provider (if set) +4. Username/Password fields (if set) + +If none of these are set, the client will attempt to connect without authentication. + +### Protocol Version + +The client supports both RESP2 and RESP3 protocols. You can specify the protocol version in the options: + +```go +rdb := redis.NewClient(&redis.Options{ + Addr: "localhost:6379", + Password: "", // no password set + DB: 0, // use default DB + Protocol: 3, // specify 2 for RESP 2 or 3 for RESP 3 +}) ``` ### Connecting via a redis url @@ -143,9 +262,6 @@ to this specification. ```go import ( - "context" - "fmt" - "github.com/redis/go-redis/v9" ) @@ -161,9 +277,119 @@ func ExampleClient() *redis.Client { ``` -## Contributing +### Instrument with OpenTelemetry -Please see [out contributing guidelines](CONTRIBUTING.md) to help us improve this library! +```go +import ( + "github.com/redis/go-redis/v9" + "github.com/redis/go-redis/extra/redisotel/v9" + "errors" +) + +func main() { + ... + rdb := redis.NewClient(&redis.Options{...}) + + if err := errors.Join(redisotel.InstrumentTracing(rdb), redisotel.InstrumentMetrics(rdb)); err != nil { + log.Fatal(err) + } +``` + + +### Buffer Size Configuration + +go-redis uses 32KiB read and write buffers by default for optimal performance. For high-throughput applications or large pipelines, you can customize buffer sizes: + +```go +rdb := redis.NewClient(&redis.Options{ + Addr: "localhost:6379", + ReadBufferSize: 1024 * 1024, // 1MiB read buffer + WriteBufferSize: 1024 * 1024, // 1MiB write buffer +}) +``` + +### Advanced Configuration + +go-redis supports extending the client identification phase to allow projects to send their own custom client identification. + +#### Default Client Identification + +By default, go-redis automatically sends the client library name and version during the connection process. This feature is available in redis-server as of version 7.2. As a result, the command is "fire and forget", meaning it should fail silently, in the case that the redis server does not support this feature. + +#### Disabling Identity Verification + +When connection identity verification is not required or needs to be explicitly disabled, a `DisableIdentity` configuration option exists. +Initially there was a typo and the option was named `DisableIndentity` instead of `DisableIdentity`. The misspelled option is marked as Deprecated and will be removed in V10 of this library. +Although both options will work at the moment, the correct option is `DisableIdentity`. The deprecated option will be removed in V10 of this library, so please use the correct option name to avoid any issues. + +To disable verification, set the `DisableIdentity` option to `true` in the Redis client options: + +```go +rdb := redis.NewClient(&redis.Options{ + Addr: "localhost:6379", + Password: "", + DB: 0, + DisableIdentity: true, // Disable set-info on connect +}) +``` + +#### Unstable RESP3 Structures for RediSearch Commands +When integrating Redis with application functionalities using RESP3, it's important to note that some response structures aren't final yet. This is especially true for more complex structures like search and query results. We recommend using RESP2 when using the search and query capabilities, but we plan to stabilize the RESP3-based API-s in the coming versions. You can find more guidance in the upcoming release notes. + +To enable unstable RESP3, set the option in your client configuration: + +```go +redis.NewClient(&redis.Options{ + UnstableResp3: true, + }) +``` +**Note:** When UnstableResp3 mode is enabled, it's necessary to use RawResult() and RawVal() to retrieve a raw data. + Since, raw response is the only option for unstable search commands Val() and Result() calls wouldn't have any affect on them: + +```go +res1, err := client.FTSearchWithArgs(ctx, "txt", "foo bar", &redis.FTSearchOptions{}).RawResult() +val1 := client.FTSearchWithArgs(ctx, "txt", "foo bar", &redis.FTSearchOptions{}).RawVal() +``` + +#### Redis-Search Default Dialect + +In the Redis-Search module, **the default dialect is 2**. If needed, you can explicitly specify a different dialect using the appropriate configuration in your queries. + +**Important**: Be aware that the query dialect may impact the results returned. If needed, you can revert to a different dialect version by passing the desired dialect in the arguments of the command you want to execute. +For example: +``` + res2, err := rdb.FTSearchWithArgs(ctx, + "idx:bicycle", + "@pickup_zone:[CONTAINS $bike]", + &redis.FTSearchOptions{ + Params: map[string]interface{}{ + "bike": "POINT(-0.1278 51.5074)", + }, + DialectVersion: 3, + }, + ).Result() +``` +You can find further details in the [query dialect documentation](https://redis.io/docs/latest/develop/interact/search-and-query/advanced-concepts/dialects/). + +#### Custom buffer sizes +Prior to v9.12, the buffer size was the default go value of 4096 bytes. Starting from v9.12, +go-redis uses 32KiB read and write buffers by default for optimal performance. +For high-throughput applications or large pipelines, you can customize buffer sizes: + +```go +rdb := redis.NewClient(&redis.Options{ + Addr: "localhost:6379", + ReadBufferSize: 1024 * 1024, // 1MiB read buffer + WriteBufferSize: 1024 * 1024, // 1MiB write buffer +}) +``` + +**Important**: If you experience any issues with the default buffer sizes, please try setting them to the go default of 4096 bytes. + +## Contributing +We welcome contributions to the go-redis library! If you have a bug fix, feature request, or improvement, please open an issue or pull request on GitHub. +We appreciate your help in making go-redis better for everyone. +If you are interested in contributing to the go-redis library, please check out our [contributing guidelines](CONTRIBUTING.md) for more information on how to get started. ## Look and feel @@ -200,38 +426,150 @@ vals, err := rdb.Eval(ctx, "return {KEYS[1],ARGV[1]}", []string{"key"}, "hello") res, err := rdb.Do(ctx, "set", "key", "value").Result() ``` -## Run the test +## Typed Errors -go-redis will start a redis-server and run the test cases. - -The paths of redis-server bin file and redis config file are defined in `main_test.go`: +go-redis provides typed error checking functions for common Redis errors: ```go -var ( - redisServerBin, _ = filepath.Abs(filepath.Join("testdata", "redis", "src", "redis-server")) - redisServerConf, _ = filepath.Abs(filepath.Join("testdata", "redis", "redis.conf")) -) +// Cluster and replication errors +redis.IsLoadingError(err) // Redis is loading the dataset +redis.IsReadOnlyError(err) // Write to read-only replica +redis.IsClusterDownError(err) // Cluster is down +redis.IsTryAgainError(err) // Command should be retried +redis.IsMasterDownError(err) // Master is down +redis.IsMovedError(err) // Returns (address, true) if key moved +redis.IsAskError(err) // Returns (address, true) if key being migrated + +// Connection and resource errors +redis.IsMaxClientsError(err) // Maximum clients reached +redis.IsAuthError(err) // Authentication failed (NOAUTH, WRONGPASS, unauthenticated) +redis.IsPermissionError(err) // Permission denied (NOPERM) +redis.IsOOMError(err) // Out of memory (OOM) + +// Transaction errors +redis.IsExecAbortError(err) // Transaction aborted (EXECABORT) ``` -For local testing, you can change the variables to refer to your local files, or create a soft link -to the corresponding folder for redis-server and copy the config file to `testdata/redis/`: +### Error Wrapping in Hooks -```shell -ln -s /usr/bin/redis-server ./go-redis/testdata/redis/src -cp ./go-redis/testdata/redis.conf ./go-redis/testdata/redis/ +When wrapping errors in hooks, use custom error types with `Unwrap()` method (preferred) or `fmt.Errorf` with `%w`. Always call `cmd.SetErr()` to preserve error type information: + +```go +// Custom error type (preferred) +type AppError struct { + Code string + RequestID string + Err error +} + +func (e *AppError) Error() string { + return fmt.Sprintf("[%s] request_id=%s: %v", e.Code, e.RequestID, e.Err) +} + +func (e *AppError) Unwrap() error { + return e.Err +} + +// Hook implementation +func (h MyHook) ProcessHook(next redis.ProcessHook) redis.ProcessHook { + return func(ctx context.Context, cmd redis.Cmder) error { + err := next(ctx, cmd) + if err != nil { + // Wrap with custom error type + wrappedErr := &AppError{ + Code: "REDIS_ERROR", + RequestID: getRequestID(ctx), + Err: err, + } + cmd.SetErr(wrappedErr) + return wrappedErr // Return wrapped error to preserve it + } + return nil + } +} + +// Typed error detection works through wrappers +if redis.IsLoadingError(err) { + // Retry logic +} + +// Extract custom error if needed +var appErr *AppError +if errors.As(err, &appErr) { + log.Printf("Request: %s", appErr.RequestID) +} ``` -Lastly, run: - -```shell -go test +Alternatively, use `fmt.Errorf` with `%w`: +```go +wrappedErr := fmt.Errorf("context: %w", err) +cmd.SetErr(wrappedErr) ``` -Another option is to run your specific tests with an already running redis. The example below, tests -against a redis running on port 9999.: +### Pipeline Hook Example +For pipeline operations, use `ProcessPipelineHook`: + +```go +type PipelineLoggingHook struct{} + +func (h PipelineLoggingHook) DialHook(next redis.DialHook) redis.DialHook { + return next +} + +func (h PipelineLoggingHook) ProcessHook(next redis.ProcessHook) redis.ProcessHook { + return next +} + +func (h PipelineLoggingHook) ProcessPipelineHook(next redis.ProcessPipelineHook) redis.ProcessPipelineHook { + return func(ctx context.Context, cmds []redis.Cmder) error { + start := time.Now() + + // Execute the pipeline + err := next(ctx, cmds) + + duration := time.Since(start) + log.Printf("Pipeline executed %d commands in %v", len(cmds), duration) + + // Process individual command errors + // Note: Individual command errors are already set on each cmd by the pipeline execution + for _, cmd := range cmds { + if cmdErr := cmd.Err(); cmdErr != nil { + // Check for specific error types using typed error functions + if redis.IsAuthError(cmdErr) { + log.Printf("Auth error in pipeline command %s: %v", cmd.Name(), cmdErr) + } else if redis.IsPermissionError(cmdErr) { + log.Printf("Permission error in pipeline command %s: %v", cmd.Name(), cmdErr) + } + + // Optionally wrap individual command errors to add context + // The wrapped error preserves type information through errors.As() + wrappedErr := fmt.Errorf("pipeline cmd %s failed: %w", cmd.Name(), cmdErr) + cmd.SetErr(wrappedErr) + } + } + + // Return the pipeline-level error (connection errors, etc.) + // You can wrap it if needed, or return it as-is + return err + } +} + +// Register the hook +rdb.AddHook(PipelineLoggingHook{}) + +// Use pipeline - errors are still properly typed +pipe := rdb.Pipeline() +pipe.Set(ctx, "key1", "value1", 0) +pipe.Get(ctx, "key2") +_, err := pipe.Exec(ctx) +``` + +## Run the test + +Recommended to use Docker, just need to run: ```shell -REDIS_PORT=9999 go test +make test ``` ## See also @@ -243,6 +581,14 @@ REDIS_PORT=9999 go test ## Contributors +> The go-redis project was originally initiated by :star: [**uptrace/uptrace**](https://github.com/uptrace/uptrace). +> Uptrace is an open-source APM tool that supports distributed tracing, metrics, and logs. You can +> use it to monitor applications and set up automatic alerts to receive notifications via email, +> Slack, Telegram, and others. +> +> See [OpenTelemetry](https://github.com/redis/go-redis/tree/master/example/otel) example which +> demonstrates how you can use Uptrace to monitor go-redis. + Thanks to all the people who already contributed! diff --git a/vendor/github.com/redis/go-redis/v9/RELEASE-NOTES.md b/vendor/github.com/redis/go-redis/v9/RELEASE-NOTES.md new file mode 100644 index 00000000..7b705ee6 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/RELEASE-NOTES.md @@ -0,0 +1,859 @@ +# Release Notes + +# 9.18.0 (2026-02-16) + +## 🚀 Highlights + +### Redis 8.6 Support + +Added support for Redis 8.6, including new commands and features for streams idempotent production and HOTKEYS. + +### Smart Client Handoff (Maintenance Notifications) for Cluster + +This release introduces comprehensive support for Redis Cluster maintenance notifications via SMIGRATING/SMIGRATED push notifications. The client now automatically handles slot migrations by: +- **Relaxing timeouts during migration** (SMIGRATING) to prevent false failures +- **Triggering lazy cluster state reloads** upon completion (SMIGRATED) +- Enabling seamless operations during Redis Enterprise maintenance windows + +([#3643](https://github.com/redis/go-redis/pull/3643)) by [@ndyakov](https://github.com/ndyakov) + +### OpenTelemetry Native Metrics Support + +Added comprehensive OpenTelemetry metrics support following the [OpenTelemetry Database Client Semantic Conventions](https://opentelemetry.io/docs/specs/semconv/database/database-metrics/). The implementation uses a Bridge Pattern to keep the core library dependency-free while providing optional metrics instrumentation through the new `extra/redisotel-native` package. + +**Metric groups include:** +- Command metrics: Operation duration with retry tracking +- Connection basic: Connection count and creation time +- Resiliency: Errors, handoffs, timeout relaxation +- Connection advanced: Wait time and use time +- Pubsub metrics: Published and received messages +- Stream metrics: Processing duration and maintenance notifications + +([#3637](https://github.com/redis/go-redis/pull/3637)) by [@ofekshenawa](https://github.com/ofekshenawa) + +## ✨ New Features + +- **HOTKEYS Commands**: Added support for Redis HOTKEYS feature for identifying hot keys based on CPU consumption and network utilization ([#3695](https://github.com/redis/go-redis/pull/3695)) by [@ofekshenawa](https://github.com/ofekshenawa) +- **Streams Idempotent Production**: Added support for Redis 8.6+ Streams Idempotent Production with `ProducerID`, `IdempotentID`, `IdempotentAuto` in `XAddArgs` and new `XCFGSET` command ([#3693](https://github.com/redis/go-redis/pull/3693)) by [@ofekshenawa](https://github.com/ofekshenawa) +- **NaN Values for TimeSeries**: Added support for NaN (Not a Number) values in Redis time series commands ([#3687](https://github.com/redis/go-redis/pull/3687)) by [@ofekshenawa](https://github.com/ofekshenawa) +- **DialerRetries Options**: Added `DialerRetries` and `DialerRetryTimeout` to `ClusterOptions`, `RingOptions`, and `FailoverOptions` ([#3686](https://github.com/redis/go-redis/pull/3686)) by [@naveenchander30](https://github.com/naveenchander30) +- **ConnMaxLifetimeJitter**: Added jitter configuration to distribute connection expiration times and prevent thundering herd ([#3666](https://github.com/redis/go-redis/pull/3666)) by [@cyningsun](https://github.com/cyningsun) +- **Digest Helper Functions**: Added `DigestString` and `DigestBytes` helper functions for client-side xxh3 hashing compatible with Redis DIGEST command ([#3679](https://github.com/redis/go-redis/pull/3679)) by [@ofekshenawa](https://github.com/ofekshenawa) +- **SMIGRATED New Format**: Updated SMIGRATED parser to support new format and remember original host:port ([#3697](https://github.com/redis/go-redis/pull/3697)) by [@ndyakov](https://github.com/ndyakov) +- **Cluster State Reload Interval**: Added cluster state reload interval option for maintenance notifications ([#3663](https://github.com/redis/go-redis/pull/3663)) by [@ndyakov](https://github.com/ndyakov) + +## 🐛 Bug Fixes + +- **PubSub nil pointer dereference**: Fixed nil pointer dereference in PubSub after `WithTimeout()` - `pubSubPool` is now properly cloned ([#3710](https://github.com/redis/go-redis/pull/3710)) by [@Copilot](https://github.com/apps/copilot-swe-agent) +- **MaintNotificationsConfig nil check**: Guard against nil `MaintNotificationsConfig` in `initConn` ([#3707](https://github.com/redis/go-redis/pull/3707)) by [@veeceey](https://github.com/veeceey) +- **wantConnQueue zombie elements**: Fixed zombie `wantConn` elements accumulation in `wantConnQueue` ([#3680](https://github.com/redis/go-redis/pull/3680)) by [@cyningsun](https://github.com/cyningsun) +- **XADD/XTRIM approx flag**: Fixed XADD and XTRIM to use `=` when approx is false ([#3684](https://github.com/redis/go-redis/pull/3684)) by [@ndyakov](https://github.com/ndyakov) +- **Sentinel timeout retry**: When connection to a sentinel times out, attempt to connect to other sentinels ([#3654](https://github.com/redis/go-redis/pull/3654)) by [@cxljs](https://github.com/cxljs) + +## ⚡ Performance + +- **Fuzz test optimization**: Eliminated repeated string conversions, used functional approach for cleaner operation selection ([#3692](https://github.com/redis/go-redis/pull/3692)) by [@feiguoL](https://github.com/feiguoL) +- **Pre-allocate capacity**: Pre-allocate slice capacity to prevent multiple capacity expansions ([#3689](https://github.com/redis/go-redis/pull/3689)) by [@feelshu](https://github.com/feelshu) + +## 🧪 Testing + +- **Comprehensive TLS tests**: Added comprehensive TLS tests and example for standalone, cluster, and certificate authentication ([#3681](https://github.com/redis/go-redis/pull/3681)) by [@ndyakov](https://github.com/ndyakov) +- **Redis 8.6**: Updated CI to use Redis 8.6-pre ([#3685](https://github.com/redis/go-redis/pull/3685)) by [@ndyakov](https://github.com/ndyakov) + +## 🧰 Maintenance + +- **Deprecation warnings**: Added deprecation warnings for commands based on Redis documentation ([#3673](https://github.com/redis/go-redis/pull/3673)) by [@ndyakov](https://github.com/ndyakov) +- **Use errors.Join()**: Replaced custom error join function with standard library `errors.Join()` ([#3653](https://github.com/redis/go-redis/pull/3653)) by [@cxljs](https://github.com/cxljs) +- **Use Go 1.21 min/max**: Use Go 1.21's built-in min/max functions ([#3656](https://github.com/redis/go-redis/pull/3656)) by [@cxljs](https://github.com/cxljs) +- **Proper formatting**: Code formatting improvements ([#3670](https://github.com/redis/go-redis/pull/3670)) by [@12ya](https://github.com/12ya) +- **Set commands documentation**: Added comprehensive documentation to all set command methods ([#3642](https://github.com/redis/go-redis/pull/3642)) by [@iamamirsalehi](https://github.com/iamamirsalehi) +- **MaxActiveConns docs**: Added default value documentation for `MaxActiveConns` ([#3674](https://github.com/redis/go-redis/pull/3674)) by [@codykaup](https://github.com/codykaup) +- **README example update**: Updated README example ([#3657](https://github.com/redis/go-redis/pull/3657)) by [@cxljs](https://github.com/cxljs) +- **Cluster maintnotif example**: Added example application for cluster maintenance notifications ([#3651](https://github.com/redis/go-redis/pull/3651)) by [@ndyakov](https://github.com/ndyakov) + +## 👥 Contributors + +We'd like to thank all the contributors who worked on this release! + +[@12ya](https://github.com/12ya), [@Copilot](https://github.com/apps/copilot-swe-agent), [@codykaup](https://github.com/codykaup), [@cxljs](https://github.com/cxljs), [@cyningsun](https://github.com/cyningsun), [@feelshu](https://github.com/feelshu), [@feiguoL](https://github.com/feiguoL), [@iamamirsalehi](https://github.com/iamamirsalehi), [@naveenchander30](https://github.com/naveenchander30), [@ndyakov](https://github.com/ndyakov), [@ofekshenawa](https://github.com/ofekshenawa), [@veeceey](https://github.com/veeceey) + +--- + +**Full Changelog**: https://github.com/redis/go-redis/compare/v9.17.0...v9.18.0 + +# 9.18.0-beta.2 (2025-12-09) + +## 🚀 Highlights + +### Go Version Update + +This release updates the minimum required Go version to 1.21. This is part of a gradual migration strategy where the minimum supported Go version will be three versions behind the latest release. With each new Go version release, we will bump the minimum version by one, ensuring compatibility while staying current with the Go ecosystem. + +### Stability Improvements + +This release includes several important stability fixes: +- Fixed a critical panic in the handoff worker manager that could occur when handling nil errors +- Improved test reliability for Smart Client Handoff functionality +- Fixed logging format issues that could cause runtime errors + +## ✨ New Features + +- OpenTelemetry metrics improvements for nil response handling ([#3638](https://github.com/redis/go-redis/pull/3638)) by [@fengve](https://github.com/fengve) + +## 🐛 Bug Fixes + +- Fixed panic on nil error in handoffWorkerManager closeConnFromRequest ([#3633](https://github.com/redis/go-redis/pull/3633)) by [@ccoVeille](https://github.com/ccoVeille) +- Fixed bad sprintf syntax in logging ([#3632](https://github.com/redis/go-redis/pull/3632)) by [@ccoVeille](https://github.com/ccoVeille) + +## 🧰 Maintenance + +- Updated minimum Go version to 1.21 ([#3640](https://github.com/redis/go-redis/pull/3640)) by [@ndyakov](https://github.com/ndyakov) +- Use Go 1.20 idiomatic string<->byte conversion ([#3435](https://github.com/redis/go-redis/pull/3435)) by [@justinhwang](https://github.com/justinhwang) +- Reduce flakiness of Smart Client Handoff test ([#3641](https://github.com/redis/go-redis/pull/3641)) by [@kiryazovi-redis](https://github.com/kiryazovi-redis) +- Revert PR #3634 (Observability metrics phase1) ([#3635](https://github.com/redis/go-redis/pull/3635)) by [@ofekshenawa](https://github.com/ofekshenawa) + +## 👥 Contributors + +We'd like to thank all the contributors who worked on this release! + +[@justinhwang](https://github.com/justinhwang), [@ndyakov](https://github.com/ndyakov), [@kiryazovi-redis](https://github.com/kiryazovi-redis), [@fengve](https://github.com/fengve), [@ccoVeille](https://github.com/ccoVeille), [@ofekshenawa](https://github.com/ofekshenawa) + +--- + +**Full Changelog**: https://github.com/redis/go-redis/compare/v9.18.0-beta.1...v9.18.0-beta.2 + +# 9.18.0-beta.1 (2025-12-01) + +## 🚀 Highlights + +### Request and Response Policy Based Routing in Cluster Mode + +This beta release introduces comprehensive support for Redis COMMAND-based request and response policy routing for cluster clients. This feature enables intelligent command routing and response aggregation based on Redis command metadata. + +**Key Features:** +- **Command Policy Loader**: Automatically parses and caches COMMAND metadata with routing/aggregation hints +- **Enhanced Routing Engine**: Supports all request policies including: + - `default(keyless)` - Commands without keys + - `default(hashslot)` - Commands with hash slot routing + - `all_shards` - Commands that need to run on all shards + - `all_nodes` - Commands that need to run on all nodes + - `multi_shard` - Commands that span multiple shards + - `special` - Commands with custom routing logic +- **Response Aggregator**: Intelligently combines multi-shard replies based on response policies: + - `all_succeeded` - All shards must succeed + - `one_succeeded` - At least one shard must succeed + - `agg_sum` - Aggregate numeric responses + - `special` - Custom aggregation logic (e.g., FT.CURSOR) +- **Raw Command Support**: Policies are enforced on `Client.Do(ctx, args...)` + +This feature is particularly useful for Redis Stack commands like RediSearch that need to operate across multiple shards in a cluster. + +### Connection Pool Improvements + +Fixed a critical defect in the connection pool's turn management mechanism that could lead to connection leaks under certain conditions. The fix ensures proper 1:1 correspondence between turns and connections. + +## ✨ New Features + +- Request and Response Policy Based Routing in Cluster Mode ([#3422](https://github.com/redis/go-redis/pull/3422)) by [@ofekshenawa](https://github.com/ofekshenawa) + +## 🐛 Bug Fixes + +- Fixed connection pool turn management to prevent connection leaks ([#3626](https://github.com/redis/go-redis/pull/3626)) by [@cyningsun](https://github.com/cyningsun) + +## 🧰 Maintenance + +- chore(deps): bump rojopolis/spellcheck-github-actions from 0.54.0 to 0.55.0 ([#3627](https://github.com/redis/go-redis/pull/3627)) + +## 👥 Contributors + +We'd like to thank all the contributors who worked on this release! + +[@cyningsun](https://github.com/cyningsun), [@ofekshenawa](https://github.com/ofekshenawa), [@ndyakov](https://github.com/ndyakov) + +--- + +**Full Changelog**: https://github.com/redis/go-redis/compare/v9.17.1...v9.18.0-beta.1 + +# 9.17.1 (2025-11-25) + +## 🐛 Bug Fixes + +- add wait to keyless commands list ([#3615](https://github.com/redis/go-redis/pull/3615)) by [@marcoferrer](https://github.com/marcoferrer) +- fix(time): remove cached time optimization ([#3611](https://github.com/redis/go-redis/pull/3611)) by [@ndyakov](https://github.com/ndyakov) + +## 🧰 Maintenance + +- chore(deps): bump golangci/golangci-lint-action from 9.0.0 to 9.1.0 ([#3609](https://github.com/redis/go-redis/pull/3609)) +- chore(deps): bump actions/checkout from 5 to 6 ([#3610](https://github.com/redis/go-redis/pull/3610)) +- chore(script): fix help call in tag.sh ([#3606](https://github.com/redis/go-redis/pull/3606)) by [@ndyakov](https://github.com/ndyakov) + +## Contributors +We'd like to thank all the contributors who worked on this release! + +[@marcoferrer](https://github.com/marcoferrer) and [@ndyakov](https://github.com/ndyakov) + +--- + +**Full Changelog**: https://github.com/redis/go-redis/compare/v9.17.0...v9.17.1 + +# 9.17.0 (2025-11-19) + +## 🚀 Highlights + +### Redis 8.4 Support +Added support for Redis 8.4, including new commands and features ([#3572](https://github.com/redis/go-redis/pull/3572)) + +### Typed Errors +Introduced typed errors for better error handling using `errors.As` instead of string checks. Errors can now be wrapped and set to commands in hooks without breaking library functionality ([#3602](https://github.com/redis/go-redis/pull/3602)) + +### New Commands +- **CAS/CAD Commands**: Added support for Compare-And-Set/Compare-And-Delete operations with conditional matching (`IFEQ`, `IFNE`, `IFDEQ`, `IFDNE`) ([#3583](https://github.com/redis/go-redis/pull/3583), [#3595](https://github.com/redis/go-redis/pull/3595)) +- **MSETEX**: Atomically set multiple key-value pairs with expiration options and conditional modes ([#3580](https://github.com/redis/go-redis/pull/3580)) +- **XReadGroup CLAIM**: Consume both incoming and idle pending entries from streams in a single call ([#3578](https://github.com/redis/go-redis/pull/3578)) +- **ACL Commands**: Added `ACLGenPass`, `ACLUsers`, and `ACLWhoAmI` ([#3576](https://github.com/redis/go-redis/pull/3576)) +- **SLOWLOG Commands**: Added `SLOWLOG LEN` and `SLOWLOG RESET` ([#3585](https://github.com/redis/go-redis/pull/3585)) +- **LATENCY Commands**: Added `LATENCY LATEST` and `LATENCY RESET` ([#3584](https://github.com/redis/go-redis/pull/3584)) + +### Search & Vector Improvements +- **Hybrid Search**: Added **EXPERIMENTAL** support for the new `FT.HYBRID` command ([#3573](https://github.com/redis/go-redis/pull/3573)) +- **Vector Range**: Added `VRANGE` command for vector sets ([#3543](https://github.com/redis/go-redis/pull/3543)) +- **FT.INFO Enhancements**: Added vector-specific attributes in FT.INFO response ([#3596](https://github.com/redis/go-redis/pull/3596)) + +### Connection Pool Improvements +- **Improved Connection Success Rate**: Implemented FIFO queue-based fairness and context pattern for connection creation to prevent premature cancellation under high concurrency ([#3518](https://github.com/redis/go-redis/pull/3518)) +- **Connection State Machine**: Resolved race conditions and improved pool performance with proper state tracking ([#3559](https://github.com/redis/go-redis/pull/3559)) +- **Pool Performance**: Significant performance improvements with faster semaphores, lockless hook manager, and reduced allocations (47-67% faster Get/Put operations) ([#3565](https://github.com/redis/go-redis/pull/3565)) + +### Metrics & Observability +- **Canceled Metric Attribute**: Added 'canceled' metrics attribute to distinguish context cancellation errors from other errors ([#3566](https://github.com/redis/go-redis/pull/3566)) + +## ✨ New Features + +- Typed errors with wrapping support ([#3602](https://github.com/redis/go-redis/pull/3602)) by [@ndyakov](https://github.com/ndyakov) +- CAS/CAD commands (marked as experimental) ([#3583](https://github.com/redis/go-redis/pull/3583), [#3595](https://github.com/redis/go-redis/pull/3595)) by [@ndyakov](https://github.com/ndyakov), [@htemelski-redis](https://github.com/htemelski-redis) +- MSETEX command support ([#3580](https://github.com/redis/go-redis/pull/3580)) by [@ofekshenawa](https://github.com/ofekshenawa) +- XReadGroup CLAIM argument ([#3578](https://github.com/redis/go-redis/pull/3578)) by [@ofekshenawa](https://github.com/ofekshenawa) +- ACL commands: GenPass, Users, WhoAmI ([#3576](https://github.com/redis/go-redis/pull/3576)) by [@destinyoooo](https://github.com/destinyoooo) +- SLOWLOG commands: LEN, RESET ([#3585](https://github.com/redis/go-redis/pull/3585)) by [@destinyoooo](https://github.com/destinyoooo) +- LATENCY commands: LATEST, RESET ([#3584](https://github.com/redis/go-redis/pull/3584)) by [@destinyoooo](https://github.com/destinyoooo) +- Hybrid search command (FT.HYBRID) ([#3573](https://github.com/redis/go-redis/pull/3573)) by [@htemelski-redis](https://github.com/htemelski-redis) +- Vector range command (VRANGE) ([#3543](https://github.com/redis/go-redis/pull/3543)) by [@cxljs](https://github.com/cxljs) +- Vector-specific attributes in FT.INFO ([#3596](https://github.com/redis/go-redis/pull/3596)) by [@ndyakov](https://github.com/ndyakov) +- Improved connection pool success rate with FIFO queue ([#3518](https://github.com/redis/go-redis/pull/3518)) by [@cyningsun](https://github.com/cyningsun) +- Canceled metrics attribute for context errors ([#3566](https://github.com/redis/go-redis/pull/3566)) by [@pvragov](https://github.com/pvragov) + +## 🐛 Bug Fixes + +- Fixed Failover Client MaintNotificationsConfig ([#3600](https://github.com/redis/go-redis/pull/3600)) by [@ajax16384](https://github.com/ajax16384) +- Fixed ACLGenPass function to use the bit parameter ([#3597](https://github.com/redis/go-redis/pull/3597)) by [@destinyoooo](https://github.com/destinyoooo) +- Return error instead of panic from commands ([#3568](https://github.com/redis/go-redis/pull/3568)) by [@dragneelfps](https://github.com/dragneelfps) +- Safety harness in `joinErrors` to prevent panic ([#3577](https://github.com/redis/go-redis/pull/3577)) by [@manisharma](https://github.com/manisharma) + +## ⚡ Performance + +- Connection state machine with race condition fixes ([#3559](https://github.com/redis/go-redis/pull/3559)) by [@ndyakov](https://github.com/ndyakov) +- Pool performance improvements: 47-67% faster Get/Put, 33% less memory, 50% fewer allocations ([#3565](https://github.com/redis/go-redis/pull/3565)) by [@ndyakov](https://github.com/ndyakov) + +## 🧪 Testing & Infrastructure + +- Updated to Redis 8.4.0 image ([#3603](https://github.com/redis/go-redis/pull/3603)) by [@ndyakov](https://github.com/ndyakov) +- Added Redis 8.4-RC1-pre to CI ([#3572](https://github.com/redis/go-redis/pull/3572)) by [@ndyakov](https://github.com/ndyakov) +- Refactored tests for idiomatic Go ([#3561](https://github.com/redis/go-redis/pull/3561), [#3562](https://github.com/redis/go-redis/pull/3562), [#3563](https://github.com/redis/go-redis/pull/3563)) by [@12ya](https://github.com/12ya) + +## 👥 Contributors + +We'd like to thank all the contributors who worked on this release! + +[@12ya](https://github.com/12ya), [@ajax16384](https://github.com/ajax16384), [@cxljs](https://github.com/cxljs), [@cyningsun](https://github.com/cyningsun), [@destinyoooo](https://github.com/destinyoooo), [@dragneelfps](https://github.com/dragneelfps), [@htemelski-redis](https://github.com/htemelski-redis), [@manisharma](https://github.com/manisharma), [@ndyakov](https://github.com/ndyakov), [@ofekshenawa](https://github.com/ofekshenawa), [@pvragov](https://github.com/pvragov) + +--- + +**Full Changelog**: https://github.com/redis/go-redis/compare/v9.16.0...v9.17.0 + +# 9.16.0 (2025-10-23) + +## 🚀 Highlights + +### Maintenance Notifications Support + +This release introduces comprehensive support for Redis maintenance notifications, enabling applications to handle server maintenance events gracefully. The new `maintnotifications` package provides: + +- **RESP3 Push Notifications**: Full support for Redis RESP3 protocol push notifications +- **Connection Handoff**: Automatic connection migration during server maintenance with configurable retry policies and circuit breakers +- **Graceful Degradation**: Configurable timeout relaxation during maintenance windows to prevent false failures +- **Event-Driven Architecture**: Background workers with on-demand scaling for efficient handoff processing +- **Production-Ready**: Comprehensive E2E testing framework and monitoring capabilities + +For detailed usage examples and configuration options, see the [maintenance notifications documentation](maintnotifications/README.md). + +## ✨ New Features + +- **Trace Filtering**: Add support for filtering traces for specific commands, including pipeline operations and dial operations ([#3519](https://github.com/redis/go-redis/pull/3519), [#3550](https://github.com/redis/go-redis/pull/3550)) + - New `TraceCmdFilter` option to selectively trace commands + - Reduces overhead by excluding high-frequency or low-value commands from traces + +## 🐛 Bug Fixes + +- **Pipeline Error Handling**: Fix issue where pipeline repeatedly sets the same error ([#3525](https://github.com/redis/go-redis/pull/3525)) +- **Connection Pool**: Ensure re-authentication does not interfere with connection handoff operations ([#3547](https://github.com/redis/go-redis/pull/3547)) + +## 🔧 Improvements + +- **Hash Commands**: Update hash command implementations ([#3523](https://github.com/redis/go-redis/pull/3523)) +- **OpenTelemetry**: Use `metric.WithAttributeSet` to avoid unnecessary attribute copying in redisotel ([#3552](https://github.com/redis/go-redis/pull/3552)) + +## 📚 Documentation + +- **Cluster Client**: Add explanation for why `MaxRetries` is disabled for `ClusterClient` ([#3551](https://github.com/redis/go-redis/pull/3551)) + +## 🧪 Testing & Infrastructure + +- **E2E Testing**: Upgrade E2E testing framework with improved reliability and coverage ([#3541](https://github.com/redis/go-redis/pull/3541)) +- **Release Process**: Improved resiliency of the release process ([#3530](https://github.com/redis/go-redis/pull/3530)) + +## 📦 Dependencies + +- Bump `rojopolis/spellcheck-github-actions` from 0.51.0 to 0.52.0 ([#3520](https://github.com/redis/go-redis/pull/3520)) +- Bump `github/codeql-action` from 3 to 4 ([#3544](https://github.com/redis/go-redis/pull/3544)) + +## 👥 Contributors + +We'd like to thank all the contributors who worked on this release! + +[@ndyakov](https://github.com/ndyakov), [@htemelski-redis](https://github.com/htemelski-redis), [@Sovietaced](https://github.com/Sovietaced), [@Udhayarajan](https://github.com/Udhayarajan), [@boekkooi-impossiblecloud](https://github.com/boekkooi-impossiblecloud), [@Pika-Gopher](https://github.com/Pika-Gopher), [@cxljs](https://github.com/cxljs), [@huiyifyj](https://github.com/huiyifyj), [@omid-h70](https://github.com/omid-h70) + +--- + +**Full Changelog**: https://github.com/redis/go-redis/compare/v9.14.0...v9.16.0 + + +# 9.15.0 was accidentally released. Please use version 9.16.0 instead. + +# 9.15.0-beta.3 (2025-09-26) + +## Highlights +This beta release includes a pre-production version of processing push notifications and hitless upgrades. + +# Changes + +- chore: Update hash_commands.go ([#3523](https://github.com/redis/go-redis/pull/3523)) + +## 🚀 New Features + +- feat: RESP3 notifications support & Hitless notifications handling ([#3418](https://github.com/redis/go-redis/pull/3418)) + +## 🐛 Bug Fixes + +- fix: pipeline repeatedly sets the error ([#3525](https://github.com/redis/go-redis/pull/3525)) + +## 🧰 Maintenance + +- chore(deps): bump rojopolis/spellcheck-github-actions from 0.51.0 to 0.52.0 ([#3520](https://github.com/redis/go-redis/pull/3520)) +- feat(e2e-testing): maintnotifications e2e and refactor ([#3526](https://github.com/redis/go-redis/pull/3526)) +- feat(tag.sh): Improved resiliency of the release process ([#3530](https://github.com/redis/go-redis/pull/3530)) + +## Contributors +We'd like to thank all the contributors who worked on this release! + +[@cxljs](https://github.com/cxljs), [@ndyakov](https://github.com/ndyakov), [@htemelski-redis](https://github.com/htemelski-redis), and [@omid-h70](https://github.com/omid-h70) + + +# 9.15.0-beta.1 (2025-09-10) + +## Highlights +This beta release includes a pre-production version of processing push notifications and hitless upgrades. + +### Hitless Upgrades +Hitless upgrades is a major new feature that allows for zero-downtime upgrades in Redis clusters. +You can find more information in the [Hitless Upgrades documentation](https://github.com/redis/go-redis/tree/master/hitless). + +# Changes + +## 🚀 New Features +- [CAE-1088] & [CAE-1072] feat: RESP3 notifications support & Hitless notifications handling ([#3418](https://github.com/redis/go-redis/pull/3418)) + +## Contributors +We'd like to thank all the contributors who worked on this release! + +[@ndyakov](https://github.com/ndyakov), [@htemelski-redis](https://github.com/htemelski-redis), [@ofekshenawa](https://github.com/ofekshenawa) + + +# 9.14.0 (2025-09-10) + +## Highlights +- Added batch process method to the pipeline ([#3510](https://github.com/redis/go-redis/pull/3510)) + +# Changes + +## 🚀 New Features + +- Added batch process method to the pipeline ([#3510](https://github.com/redis/go-redis/pull/3510)) + +## 🐛 Bug Fixes + +- fix: SetErr on Cmd if the command cannot be queued correctly in multi/exec ([#3509](https://github.com/redis/go-redis/pull/3509)) + +## 🧰 Maintenance + +- Updates release drafter config to exclude dependabot ([#3511](https://github.com/redis/go-redis/pull/3511)) +- chore(deps): bump actions/setup-go from 5 to 6 ([#3504](https://github.com/redis/go-redis/pull/3504)) + +## Contributors +We'd like to thank all the contributors who worked on this release! + +[@elena-kolevska](https://github.com/elena-kolevksa), [@htemelski-redis](https://github.com/htemelski-redis) and [@ndyakov](https://github.com/ndyakov) + + +# 9.13.0 (2025-09-03) + +## Highlights +- Pipeliner expose queued commands ([#3496](https://github.com/redis/go-redis/pull/3496)) +- Ensure that JSON.GET returns Nil response ([#3470](https://github.com/redis/go-redis/pull/3470)) +- Fixes on Read and Write buffer sizes and UniversalOptions + +## Changes +- Pipeliner expose queued commands ([#3496](https://github.com/redis/go-redis/pull/3496)) +- fix(test): fix a timing issue in pubsub test ([#3498](https://github.com/redis/go-redis/pull/3498)) +- Allow users to enable read-write splitting in failover mode. ([#3482](https://github.com/redis/go-redis/pull/3482)) +- Set the read/write buffer size of the sentinel client to 4KiB ([#3476](https://github.com/redis/go-redis/pull/3476)) + +## 🚀 New Features + +- fix(otel): register wait metrics ([#3499](https://github.com/redis/go-redis/pull/3499)) +- Support subscriptions against cluster slave nodes ([#3480](https://github.com/redis/go-redis/pull/3480)) +- Add wait metrics to otel ([#3493](https://github.com/redis/go-redis/pull/3493)) +- Clean failing timeout implementation ([#3472](https://github.com/redis/go-redis/pull/3472)) + +## 🐛 Bug Fixes + +- Do not assume that all non-IP hosts are loopbacks ([#3085](https://github.com/redis/go-redis/pull/3085)) +- Ensure that JSON.GET returns Nil response ([#3470](https://github.com/redis/go-redis/pull/3470)) + +## 🧰 Maintenance + +- fix(otel): register wait metrics ([#3499](https://github.com/redis/go-redis/pull/3499)) +- fix(make test): Add default env in makefile ([#3491](https://github.com/redis/go-redis/pull/3491)) +- Update the introduction to running tests in README.md ([#3495](https://github.com/redis/go-redis/pull/3495)) +- test: Add comprehensive edge case tests for IncrByFloat command ([#3477](https://github.com/redis/go-redis/pull/3477)) +- Set the default read/write buffer size of Redis connection to 32KiB ([#3483](https://github.com/redis/go-redis/pull/3483)) +- Bumps test image to 8.2.1-pre ([#3478](https://github.com/redis/go-redis/pull/3478)) +- fix UniversalOptions miss ReadBufferSize and WriteBufferSize options ([#3485](https://github.com/redis/go-redis/pull/3485)) +- chore(deps): bump actions/checkout from 4 to 5 ([#3484](https://github.com/redis/go-redis/pull/3484)) +- Removes dry run for stale issues policy ([#3471](https://github.com/redis/go-redis/pull/3471)) +- Update otel metrics URL ([#3474](https://github.com/redis/go-redis/pull/3474)) + +## Contributors +We'd like to thank all the contributors who worked on this release! + +[@LINKIWI](https://github.com/LINKIWI), [@cxljs](https://github.com/cxljs), [@cybersmeashish](https://github.com/cybersmeashish), [@elena-kolevska](https://github.com/elena-kolevska), [@htemelski-redis](https://github.com/htemelski-redis), [@mwhooker](https://github.com/mwhooker), [@ndyakov](https://github.com/ndyakov), [@ofekshenawa](https://github.com/ofekshenawa), [@suever](https://github.com/suever) + + +# 9.12.1 (2025-08-11) +## 🚀 Highlights +In the last version (9.12.0) the client introduced bigger write and read buffer sized. The default value we set was 512KiB. +However, users reported that this is too big for most use cases and can lead to high memory usage. +In this version the default value is changed to 256KiB. The `README.md` was updated to reflect the +correct default value and include a note that the default value can be changed. + +## 🐛 Bug Fixes + +- fix(options): Add buffer sizes to failover. Update README ([#3468](https://github.com/redis/go-redis/pull/3468)) + +## 🧰 Maintenance + +- fix(options): Add buffer sizes to failover. Update README ([#3468](https://github.com/redis/go-redis/pull/3468)) +- chore: update & fix otel example ([#3466](https://github.com/redis/go-redis/pull/3466)) + +## Contributors +We'd like to thank all the contributors who worked on this release! + +[@ndyakov](https://github.com/ndyakov) and [@vmihailenco](https://github.com/vmihailenco) + +# 9.12.0 (2025-08-05) + +## 🚀 Highlights + +- This release includes support for [Redis 8.2](https://redis.io/docs/latest/operate/oss_and_stack/stack-with-enterprise/release-notes/redisce/redisos-8.2-release-notes/). +- Introduces an experimental Query Builders for `FTSearch`, `FTAggregate` and other search commands. +- Adds support for `EPSILON` option in `FT.VSIM`. +- Includes bug fixes and improvements contributed by the community related to ring and [redisotel](https://github.com/redis/go-redis/tree/master/extra/redisotel). + +## Changes +- Improve stale issue workflow ([#3458](https://github.com/redis/go-redis/pull/3458)) +- chore(ci): Add 8.2 rc2 pre build for CI ([#3459](https://github.com/redis/go-redis/pull/3459)) +- Added new stream commands ([#3450](https://github.com/redis/go-redis/pull/3450)) +- feat: Add "skip_verify" to Sentinel ([#3428](https://github.com/redis/go-redis/pull/3428)) +- fix: `errors.Join` requires Go 1.20 or later ([#3442](https://github.com/redis/go-redis/pull/3442)) +- DOC-4344 document quickstart examples ([#3426](https://github.com/redis/go-redis/pull/3426)) +- feat(bitop): add support for the new bitop operations ([#3409](https://github.com/redis/go-redis/pull/3409)) + +## 🚀 New Features + +- feat: recover addIdleConn may occur panic ([#2445](https://github.com/redis/go-redis/pull/2445)) +- feat(ring): specify custom health check func via HeartbeatFn option ([#2940](https://github.com/redis/go-redis/pull/2940)) +- Add Query Builder for RediSearch commands ([#3436](https://github.com/redis/go-redis/pull/3436)) +- add configurable buffer sizes for Redis connections ([#3453](https://github.com/redis/go-redis/pull/3453)) +- Add VAMANA vector type to RediSearch ([#3449](https://github.com/redis/go-redis/pull/3449)) +- VSIM add `EPSILON` option ([#3454](https://github.com/redis/go-redis/pull/3454)) +- Add closing support to otel metrics instrumentation ([#3444](https://github.com/redis/go-redis/pull/3444)) + +## 🐛 Bug Fixes + +- fix(redisotel): fix buggy append in reportPoolStats ([#3122](https://github.com/redis/go-redis/pull/3122)) +- fix(search): return results even if doc is empty ([#3457](https://github.com/redis/go-redis/pull/3457)) +- [ISSUE-3402]: Ring.Pipelined return dial timeout error ([#3403](https://github.com/redis/go-redis/pull/3403)) + +## 🧰 Maintenance + +- Merges stale issues jobs into one job with two steps ([#3463](https://github.com/redis/go-redis/pull/3463)) +- improve code readability ([#3446](https://github.com/redis/go-redis/pull/3446)) +- chore(release): 9.12.0-beta.1 ([#3460](https://github.com/redis/go-redis/pull/3460)) +- DOC-5472 time series doc examples ([#3443](https://github.com/redis/go-redis/pull/3443)) +- Add VAMANA compression algorithm tests ([#3461](https://github.com/redis/go-redis/pull/3461)) +- bumped redis 8.2 version used in the CI/CD ([#3451](https://github.com/redis/go-redis/pull/3451)) + +## Contributors +We'd like to thank all the contributors who worked on this release! + +[@andy-stark-redis](https://github.com/andy-stark-redis), [@cxljs](https://github.com/cxljs), [@elena-kolevska](https://github.com/elena-kolevska), [@htemelski-redis](https://github.com/htemelski-redis), [@jouir](https://github.com/jouir), [@monkey92t](https://github.com/monkey92t), [@ndyakov](https://github.com/ndyakov), [@ofekshenawa](https://github.com/ofekshenawa), [@rokn](https://github.com/rokn), [@smnvdev](https://github.com/smnvdev), [@strobil](https://github.com/strobil) and [@wzy9607](https://github.com/wzy9607) + +## New Contributors +* [@htemelski-redis](https://github.com/htemelski-redis) made their first contribution in [#3409](https://github.com/redis/go-redis/pull/3409) +* [@smnvdev](https://github.com/smnvdev) made their first contribution in [#3403](https://github.com/redis/go-redis/pull/3403) +* [@rokn](https://github.com/rokn) made their first contribution in [#3444](https://github.com/redis/go-redis/pull/3444) + +# 9.11.0 (2025-06-24) + +## 🚀 Highlights + +Fixes TxPipeline to work correctly in cluster scenarios, allowing execution of commands +only in the same slot. + +# Changes + +## 🚀 New Features + +- Set cluster slot for `scan` commands, rather than random ([#2623](https://github.com/redis/go-redis/pull/2623)) +- Add CredentialsProvider field to UniversalOptions ([#2927](https://github.com/redis/go-redis/pull/2927)) +- feat(redisotel): add WithCallerEnabled option ([#3415](https://github.com/redis/go-redis/pull/3415)) + +## 🐛 Bug Fixes + +- fix(txpipeline): keyless commands should take the slot of the keyed ([#3411](https://github.com/redis/go-redis/pull/3411)) +- fix(loading): cache the loaded flag for slave nodes ([#3410](https://github.com/redis/go-redis/pull/3410)) +- fix(txpipeline): should return error on multi/exec on multiple slots ([#3408](https://github.com/redis/go-redis/pull/3408)) +- fix: check if the shard exists to avoid returning nil ([#3396](https://github.com/redis/go-redis/pull/3396)) + +## 🧰 Maintenance + +- feat: optimize connection pool waitTurn ([#3412](https://github.com/redis/go-redis/pull/3412)) +- chore(ci): update CI redis builds ([#3407](https://github.com/redis/go-redis/pull/3407)) +- chore: remove a redundant method from `Ring`, `Client` and `ClusterClient` ([#3401](https://github.com/redis/go-redis/pull/3401)) +- test: refactor TestBasicCredentials using table-driven tests ([#3406](https://github.com/redis/go-redis/pull/3406)) +- perf: reduce unnecessary memory allocation operations ([#3399](https://github.com/redis/go-redis/pull/3399)) +- fix: insert entry during iterating over a map ([#3398](https://github.com/redis/go-redis/pull/3398)) +- DOC-5229 probabilistic data type examples ([#3413](https://github.com/redis/go-redis/pull/3413)) +- chore(deps): bump rojopolis/spellcheck-github-actions from 0.49.0 to 0.51.0 ([#3414](https://github.com/redis/go-redis/pull/3414)) + +## Contributors +We'd like to thank all the contributors who worked on this release! + +[@andy-stark-redis](https://github.com/andy-stark-redis), [@boekkooi-impossiblecloud](https://github.com/boekkooi-impossiblecloud), [@cxljs](https://github.com/cxljs), [@dcherubini](https://github.com/dcherubini), [@dependabot[bot]](https://github.com/apps/dependabot), [@iamamirsalehi](https://github.com/iamamirsalehi), [@ndyakov](https://github.com/ndyakov), [@pete-woods](https://github.com/pete-woods), [@twz915](https://github.com/twz915) and [dependabot[bot]](https://github.com/apps/dependabot) + +# 9.10.0 (2025-06-06) + +## 🚀 Highlights + +`go-redis` now supports [vector sets](https://redis.io/docs/latest/develop/data-types/vector-sets/). This data type is marked +as "in preview" in Redis and its support in `go-redis` is marked as experimental. You can find examples in the documentation and +in the `doctests` folder. + +# Changes + +## 🚀 New Features + +- feat: support vectorset ([#3375](https://github.com/redis/go-redis/pull/3375)) + +## 🧰 Maintenance + +- Add the missing NewFloatSliceResult for testing ([#3393](https://github.com/redis/go-redis/pull/3393)) +- DOC-5078 vector set examples ([#3394](https://github.com/redis/go-redis/pull/3394)) + +## Contributors +We'd like to thank all the contributors who worked on this release! + +[@AndBobsYourUncle](https://github.com/AndBobsYourUncle), [@andy-stark-redis](https://github.com/andy-stark-redis), [@fukua95](https://github.com/fukua95) and [@ndyakov](https://github.com/ndyakov) + + + +# 9.9.0 (2025-05-27) + +## 🚀 Highlights +- **Token-based Authentication**: Added `StreamingCredentialsProvider` for dynamic credential updates (experimental) + - Can be used with [go-redis-entraid](https://github.com/redis/go-redis-entraid) for Azure AD authentication +- **Connection Statistics**: Added connection waiting statistics for better monitoring +- **Failover Improvements**: Added `ParseFailoverURL` for easier failover configuration +- **Ring Client Enhancements**: Added shard access methods for better Pub/Sub management + +## ✨ New Features +- Added `StreamingCredentialsProvider` for token-based authentication ([#3320](https://github.com/redis/go-redis/pull/3320)) + - Supports dynamic credential updates + - Includes connection close hooks + - Note: Currently marked as experimental +- Added `ParseFailoverURL` for parsing failover URLs ([#3362](https://github.com/redis/go-redis/pull/3362)) +- Added connection waiting statistics ([#2804](https://github.com/redis/go-redis/pull/2804)) +- Added new utility functions: + - `ParseFloat` and `MustParseFloat` in public utils package ([#3371](https://github.com/redis/go-redis/pull/3371)) + - Unit tests for `Atoi`, `ParseInt`, `ParseUint`, and `ParseFloat` ([#3377](https://github.com/redis/go-redis/pull/3377)) +- Added Ring client shard access methods: + - `GetShardClients()` to retrieve all active shard clients + - `GetShardClientForKey(key string)` to get the shard client for a specific key ([#3388](https://github.com/redis/go-redis/pull/3388)) + +## 🐛 Bug Fixes +- Fixed routing reads to loading slave nodes ([#3370](https://github.com/redis/go-redis/pull/3370)) +- Added support for nil lag in XINFO GROUPS ([#3369](https://github.com/redis/go-redis/pull/3369)) +- Fixed pool acquisition timeout issues ([#3381](https://github.com/redis/go-redis/pull/3381)) +- Optimized unnecessary copy operations ([#3376](https://github.com/redis/go-redis/pull/3376)) + +## 📚 Documentation +- Updated documentation for XINFO GROUPS with nil lag support ([#3369](https://github.com/redis/go-redis/pull/3369)) +- Added package-level comments for new features + +## ⚡ Performance and Reliability +- Optimized `ReplaceSpaces` function ([#3383](https://github.com/redis/go-redis/pull/3383)) +- Set default value for `Options.Protocol` in `init()` ([#3387](https://github.com/redis/go-redis/pull/3387)) +- Exported pool errors for public consumption ([#3380](https://github.com/redis/go-redis/pull/3380)) + +## 🔧 Dependencies and Infrastructure +- Updated Redis CI to version 8.0.1 ([#3372](https://github.com/redis/go-redis/pull/3372)) +- Updated spellcheck GitHub Actions ([#3389](https://github.com/redis/go-redis/pull/3389)) +- Removed unused parameters ([#3382](https://github.com/redis/go-redis/pull/3382), [#3384](https://github.com/redis/go-redis/pull/3384)) + +## 🧪 Testing +- Added unit tests for pool acquisition timeout ([#3381](https://github.com/redis/go-redis/pull/3381)) +- Added unit tests for utility functions ([#3377](https://github.com/redis/go-redis/pull/3377)) + +## 👥 Contributors + +We would like to thank all the contributors who made this release possible: + +[@ndyakov](https://github.com/ndyakov), [@ofekshenawa](https://github.com/ofekshenawa), [@LINKIWI](https://github.com/LINKIWI), [@iamamirsalehi](https://github.com/iamamirsalehi), [@fukua95](https://github.com/fukua95), [@lzakharov](https://github.com/lzakharov), [@DengY11](https://github.com/DengY11) + +## 📝 Changelog + +For a complete list of changes, see the [full changelog](https://github.com/redis/go-redis/compare/v9.8.0...v9.9.0). + +# 9.8.0 (2025-04-30) + +## 🚀 Highlights +- **Redis 8 Support**: Full compatibility with Redis 8.0, including testing and CI integration +- **Enhanced Hash Operations**: Added support for new hash commands (`HGETDEL`, `HGETEX`, `HSETEX`) and `HSTRLEN` command +- **Search Improvements**: Enabled Search DIALECT 2 by default and added `CountOnly` argument for `FT.Search` + +## ✨ New Features +- Added support for new hash commands: `HGETDEL`, `HGETEX`, `HSETEX` ([#3305](https://github.com/redis/go-redis/pull/3305)) +- Added `HSTRLEN` command for hash operations ([#2843](https://github.com/redis/go-redis/pull/2843)) +- Added `Do` method for raw query by single connection from `pool.Conn()` ([#3182](https://github.com/redis/go-redis/pull/3182)) +- Prevent false-positive marshaling by treating zero time.Time as empty in isEmptyValue ([#3273](https://github.com/redis/go-redis/pull/3273)) +- Added FailoverClusterClient support for Universal client ([#2794](https://github.com/redis/go-redis/pull/2794)) +- Added support for cluster mode with `IsClusterMode` config parameter ([#3255](https://github.com/redis/go-redis/pull/3255)) +- Added client name support in `HELLO` RESP handshake ([#3294](https://github.com/redis/go-redis/pull/3294)) +- **Enabled Search DIALECT 2 by default** ([#3213](https://github.com/redis/go-redis/pull/3213)) +- Added read-only option for failover configurations ([#3281](https://github.com/redis/go-redis/pull/3281)) +- Added `CountOnly` argument for `FT.Search` to use `LIMIT 0 0` ([#3338](https://github.com/redis/go-redis/pull/3338)) +- Added `DB` option support in `NewFailoverClusterClient` ([#3342](https://github.com/redis/go-redis/pull/3342)) +- Added `nil` check for the options when creating a client ([#3363](https://github.com/redis/go-redis/pull/3363)) + +## 🐛 Bug Fixes +- Fixed `PubSub` concurrency safety issues ([#3360](https://github.com/redis/go-redis/pull/3360)) +- Fixed panic caused when argument is `nil` ([#3353](https://github.com/redis/go-redis/pull/3353)) +- Improved error handling when fetching master node from sentinels ([#3349](https://github.com/redis/go-redis/pull/3349)) +- Fixed connection pool timeout issues and increased retries ([#3298](https://github.com/redis/go-redis/pull/3298)) +- Fixed context cancellation error leading to connection spikes on Primary instances ([#3190](https://github.com/redis/go-redis/pull/3190)) +- Fixed RedisCluster client to consider `MASTERDOWN` a retriable error ([#3164](https://github.com/redis/go-redis/pull/3164)) +- Fixed tracing to show complete commands instead of truncated versions ([#3290](https://github.com/redis/go-redis/pull/3290)) +- Fixed OpenTelemetry instrumentation to prevent multiple span reporting ([#3168](https://github.com/redis/go-redis/pull/3168)) +- Fixed `FT.Search` Limit argument and added `CountOnly` argument for limit 0 0 ([#3338](https://github.com/redis/go-redis/pull/3338)) +- Fixed missing command in interface ([#3344](https://github.com/redis/go-redis/pull/3344)) +- Fixed slot calculation for `COUNTKEYSINSLOT` command ([#3327](https://github.com/redis/go-redis/pull/3327)) +- Updated PubSub implementation with correct context ([#3329](https://github.com/redis/go-redis/pull/3329)) + +## 📚 Documentation +- Added hash search examples ([#3357](https://github.com/redis/go-redis/pull/3357)) +- Fixed documentation comments ([#3351](https://github.com/redis/go-redis/pull/3351)) +- Added `CountOnly` search example ([#3345](https://github.com/redis/go-redis/pull/3345)) +- Added examples for list commands: `LLEN`, `LPOP`, `LPUSH`, `LRANGE`, `RPOP`, `RPUSH` ([#3234](https://github.com/redis/go-redis/pull/3234)) +- Added `SADD` and `SMEMBERS` command examples ([#3242](https://github.com/redis/go-redis/pull/3242)) +- Updated `README.md` to use Redis Discord guild ([#3331](https://github.com/redis/go-redis/pull/3331)) +- Updated `HExpire` command documentation ([#3355](https://github.com/redis/go-redis/pull/3355)) +- Featured OpenTelemetry instrumentation more prominently ([#3316](https://github.com/redis/go-redis/pull/3316)) +- Updated `README.md` with additional information ([#310ce55](https://github.com/redis/go-redis/commit/310ce55)) + +## ⚡ Performance and Reliability +- Bound connection pool background dials to configured dial timeout ([#3089](https://github.com/redis/go-redis/pull/3089)) +- Ensured context isn't exhausted via concurrent query ([#3334](https://github.com/redis/go-redis/pull/3334)) + +## 🔧 Dependencies and Infrastructure +- Updated testing image to Redis 8.0-RC2 ([#3361](https://github.com/redis/go-redis/pull/3361)) +- Enabled CI for Redis CE 8.0 ([#3274](https://github.com/redis/go-redis/pull/3274)) +- Updated various dependencies: + - Bumped golangci/golangci-lint-action from 6.5.0 to 7.0.0 ([#3354](https://github.com/redis/go-redis/pull/3354)) + - Bumped rojopolis/spellcheck-github-actions ([#3336](https://github.com/redis/go-redis/pull/3336)) + - Bumped golang.org/x/net in example/otel ([#3308](https://github.com/redis/go-redis/pull/3308)) +- Migrated golangci-lint configuration to v2 format ([#3354](https://github.com/redis/go-redis/pull/3354)) + +## ⚠️ Breaking Changes +- **Enabled Search DIALECT 2 by default** ([#3213](https://github.com/redis/go-redis/pull/3213)) +- Dropped RedisGears (Triggers and Functions) support ([#3321](https://github.com/redis/go-redis/pull/3321)) +- Dropped FT.PROFILE command that was never enabled ([#3323](https://github.com/redis/go-redis/pull/3323)) + +## 🔒 Security +- Fixed network error handling on SETINFO (CVE-2025-29923) ([#3295](https://github.com/redis/go-redis/pull/3295)) + +## 🧪 Testing +- Added integration tests for Redis 8 behavior changes in Redis Search ([#3337](https://github.com/redis/go-redis/pull/3337)) +- Added vector types INT8 and UINT8 tests ([#3299](https://github.com/redis/go-redis/pull/3299)) +- Added test codes for search_commands.go ([#3285](https://github.com/redis/go-redis/pull/3285)) +- Fixed example test sorting ([#3292](https://github.com/redis/go-redis/pull/3292)) + +## 👥 Contributors + +We would like to thank all the contributors who made this release possible: + +[@alexander-menshchikov](https://github.com/alexander-menshchikov), [@EXPEbdodla](https://github.com/EXPEbdodla), [@afti](https://github.com/afti), [@dmaier-redislabs](https://github.com/dmaier-redislabs), [@four_leaf_clover](https://github.com/four_leaf_clover), [@alohaglenn](https://github.com/alohaglenn), [@gh73962](https://github.com/gh73962), [@justinmir](https://github.com/justinmir), [@LINKIWI](https://github.com/LINKIWI), [@liushuangbill](https://github.com/liushuangbill), [@golang88](https://github.com/golang88), [@gnpaone](https://github.com/gnpaone), [@ndyakov](https://github.com/ndyakov), [@nikolaydubina](https://github.com/nikolaydubina), [@oleglacto](https://github.com/oleglacto), [@andy-stark-redis](https://github.com/andy-stark-redis), [@rodneyosodo](https://github.com/rodneyosodo), [@dependabot](https://github.com/dependabot), [@rfyiamcool](https://github.com/rfyiamcool), [@frankxjkuang](https://github.com/frankxjkuang), [@fukua95](https://github.com/fukua95), [@soleymani-milad](https://github.com/soleymani-milad), [@ofekshenawa](https://github.com/ofekshenawa), [@khasanovbi](https://github.com/khasanovbi) + + +# Old Changelog +## Unreleased + +### Changed + +* `go-redis` won't skip span creation if the parent spans is not recording. ([#2980](https://github.com/redis/go-redis/issues/2980)) + Users can use the OpenTelemetry sampler to control the sampling behavior. + For instance, you can use the `ParentBased(NeverSample())` sampler from `go.opentelemetry.io/otel/sdk/trace` to keep + a similar behavior (drop orphan spans) of `go-redis` as before. + +## [9.0.5](https://github.com/redis/go-redis/compare/v9.0.4...v9.0.5) (2023-05-29) + + +### Features + +* Add ACL LOG ([#2536](https://github.com/redis/go-redis/issues/2536)) ([31ba855](https://github.com/redis/go-redis/commit/31ba855ddebc38fbcc69a75d9d4fb769417cf602)) +* add field protocol to setupClusterQueryParams ([#2600](https://github.com/redis/go-redis/issues/2600)) ([840c25c](https://github.com/redis/go-redis/commit/840c25cb6f320501886a82a5e75f47b491e46fbe)) +* add protocol option ([#2598](https://github.com/redis/go-redis/issues/2598)) ([3917988](https://github.com/redis/go-redis/commit/391798880cfb915c4660f6c3ba63e0c1a459e2af)) + + + +## [9.0.4](https://github.com/redis/go-redis/compare/v9.0.3...v9.0.4) (2023-05-01) + + +### Bug Fixes + +* reader float parser ([#2513](https://github.com/redis/go-redis/issues/2513)) ([46f2450](https://github.com/redis/go-redis/commit/46f245075e6e3a8bd8471f9ca67ea95fd675e241)) + + +### Features + +* add client info command ([#2483](https://github.com/redis/go-redis/issues/2483)) ([b8c7317](https://github.com/redis/go-redis/commit/b8c7317cc6af444603731f7017c602347c0ba61e)) +* no longer verify HELLO error messages ([#2515](https://github.com/redis/go-redis/issues/2515)) ([7b4f217](https://github.com/redis/go-redis/commit/7b4f2179cb5dba3d3c6b0c6f10db52b837c912c8)) +* read the structure to increase the judgment of the omitempty op… ([#2529](https://github.com/redis/go-redis/issues/2529)) ([37c057b](https://github.com/redis/go-redis/commit/37c057b8e597c5e8a0e372337f6a8ad27f6030af)) + + + +## [9.0.3](https://github.com/redis/go-redis/compare/v9.0.2...v9.0.3) (2023-04-02) + +### New Features + +- feat(scan): scan time.Time sets the default decoding (#2413) +- Add support for CLUSTER LINKS command (#2504) +- Add support for acl dryrun command (#2502) +- Add support for COMMAND GETKEYS & COMMAND GETKEYSANDFLAGS (#2500) +- Add support for LCS Command (#2480) +- Add support for BZMPOP (#2456) +- Adding support for ZMPOP command (#2408) +- Add support for LMPOP (#2440) +- feat: remove pool unused fields (#2438) +- Expiretime and PExpireTime (#2426) +- Implement `FUNCTION` group of commands (#2475) +- feat(zadd): add ZAddLT and ZAddGT (#2429) +- Add: Support for COMMAND LIST command (#2491) +- Add support for BLMPOP (#2442) +- feat: check pipeline.Do to prevent confusion with Exec (#2517) +- Function stats, function kill, fcall and fcall_ro (#2486) +- feat: Add support for CLUSTER SHARDS command (#2507) +- feat(cmd): support for adding byte,bit parameters to the bitpos command (#2498) + +### Fixed + +- fix: eval api cmd.SetFirstKeyPos (#2501) +- fix: limit the number of connections created (#2441) +- fixed #2462 v9 continue support dragonfly, it's Hello command return "NOAUTH Authentication required" error (#2479) +- Fix for internal/hscan/structmap.go:89:23: undefined: reflect.Pointer (#2458) +- fix: group lag can be null (#2448) + +### Maintenance + +- Updating to the latest version of redis (#2508) +- Allowing for running tests on a port other than the fixed 6380 (#2466) +- redis 7.0.8 in tests (#2450) +- docs: Update redisotel example for v9 (#2425) +- chore: update go mod, Upgrade golang.org/x/net version to 0.7.0 (#2476) +- chore: add Chinese translation (#2436) +- chore(deps): bump github.com/bsm/gomega from 1.20.0 to 1.26.0 (#2421) +- chore(deps): bump github.com/bsm/ginkgo/v2 from 2.5.0 to 2.7.0 (#2420) +- chore(deps): bump actions/setup-go from 3 to 4 (#2495) +- docs: add instructions for the HSet api (#2503) +- docs: add reading lag field comment (#2451) +- test: update go mod before testing(go mod tidy) (#2423) +- docs: fix comment typo (#2505) +- test: remove testify (#2463) +- refactor: change ListElementCmd to KeyValuesCmd. (#2443) +- fix(appendArg): appendArg case special type (#2489) + +## [9.0.2](https://github.com/redis/go-redis/compare/v9.0.1...v9.0.2) (2023-02-01) + +### Features + +* upgrade OpenTelemetry, use the new metrics API. ([#2410](https://github.com/redis/go-redis/issues/2410)) ([e29e42c](https://github.com/redis/go-redis/commit/e29e42cde2755ab910d04185025dc43ce6f59c65)) + +## v9 2023-01-30 + +### Breaking + +- Changed Pipelines to not be thread-safe any more. + +### Added + +- Added support for [RESP3](https://github.com/antirez/RESP3/blob/master/spec.md) protocol. It was + contributed by @monkey92t who has done the majority of work in this release. +- Added `ContextTimeoutEnabled` option that controls whether the client respects context timeouts + and deadlines. See + [Redis Timeouts](https://redis.uptrace.dev/guide/go-redis-debugging.html#timeouts) for details. +- Added `ParseClusterURL` to parse URLs into `ClusterOptions`, for example, + `redis://user:password@localhost:6789?dial_timeout=3&read_timeout=6s&addr=localhost:6790&addr=localhost:6791`. +- Added metrics instrumentation using `redisotel.IstrumentMetrics`. See + [documentation](https://redis.uptrace.dev/guide/go-redis-monitoring.html) +- Added `redis.HasErrorPrefix` to help working with errors. + +### Changed + +- Removed asynchronous cancellation based on the context timeout. It was racy in v8 and is + completely gone in v9. +- Reworked hook interface and added `DialHook`. +- Replaced `redisotel.NewTracingHook` with `redisotel.InstrumentTracing`. See + [example](example/otel) and + [documentation](https://redis.uptrace.dev/guide/go-redis-monitoring.html). +- Replaced `*redis.Z` with `redis.Z` since it is small enough to be passed as value without making + an allocation. +- Renamed the option `MaxConnAge` to `ConnMaxLifetime`. +- Renamed the option `IdleTimeout` to `ConnMaxIdleTime`. +- Removed connection reaper in favor of `MaxIdleConns`. +- Removed `WithContext` since `context.Context` can be passed directly as an arg. +- Removed `Pipeline.Close` since there is no real need to explicitly manage pipeline resources and + it can be safely reused via `sync.Pool` etc. `Pipeline.Discard` is still available if you want to + reset commands for some reason. + +### Fixed + +- Improved and fixed pipeline retries. +- As usually, added support for more commands and fixed some bugs. diff --git a/vendor/github.com/redis/go-redis/v9/acl_commands.go b/vendor/github.com/redis/go-redis/v9/acl_commands.go index 06847be2..0a8a195c 100644 --- a/vendor/github.com/redis/go-redis/v9/acl_commands.go +++ b/vendor/github.com/redis/go-redis/v9/acl_commands.go @@ -4,8 +4,24 @@ import "context" type ACLCmdable interface { ACLDryRun(ctx context.Context, username string, command ...interface{}) *StringCmd + ACLLog(ctx context.Context, count int64) *ACLLogCmd ACLLogReset(ctx context.Context) *StatusCmd + + ACLGenPass(ctx context.Context, bit int) *StringCmd + + ACLSetUser(ctx context.Context, username string, rules ...string) *StatusCmd + ACLDelUser(ctx context.Context, username string) *IntCmd + ACLUsers(ctx context.Context) *StringSliceCmd + ACLWhoAmI(ctx context.Context) *StringCmd + ACLList(ctx context.Context) *StringSliceCmd + + ACLCat(ctx context.Context) *StringSliceCmd + ACLCatArgs(ctx context.Context, options *ACLCatArgs) *StringSliceCmd +} + +type ACLCatArgs struct { + Category string } func (c cmdable) ACLDryRun(ctx context.Context, username string, command ...interface{}) *StringCmd { @@ -33,3 +49,68 @@ func (c cmdable) ACLLogReset(ctx context.Context) *StatusCmd { _ = c(ctx, cmd) return cmd } + +func (c cmdable) ACLDelUser(ctx context.Context, username string) *IntCmd { + cmd := NewIntCmd(ctx, "acl", "deluser", username) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ACLSetUser(ctx context.Context, username string, rules ...string) *StatusCmd { + args := make([]interface{}, 3+len(rules)) + args[0] = "acl" + args[1] = "setuser" + args[2] = username + for i, rule := range rules { + args[i+3] = rule + } + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ACLGenPass(ctx context.Context, bit int) *StringCmd { + args := make([]interface{}, 0, 3) + args = append(args, "acl", "genpass") + if bit > 0 { + args = append(args, bit) + } + cmd := NewStringCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ACLUsers(ctx context.Context) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "acl", "users") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ACLWhoAmI(ctx context.Context) *StringCmd { + cmd := NewStringCmd(ctx, "acl", "whoami") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ACLList(ctx context.Context) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "acl", "list") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ACLCat(ctx context.Context) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "acl", "cat") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ACLCatArgs(ctx context.Context, options *ACLCatArgs) *StringSliceCmd { + // if there is a category passed, build new cmd, if there isn't - use the ACLCat method + if options != nil && options.Category != "" { + cmd := NewStringSliceCmd(ctx, "acl", "cat", options.Category) + _ = c(ctx, cmd) + return cmd + } + + return c.ACLCat(ctx) +} diff --git a/vendor/github.com/redis/go-redis/v9/adapters.go b/vendor/github.com/redis/go-redis/v9/adapters.go new file mode 100644 index 00000000..952a4c26 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/adapters.go @@ -0,0 +1,118 @@ +package redis + +import ( + "context" + "errors" + "net" + "time" + + "github.com/redis/go-redis/v9/internal/interfaces" + "github.com/redis/go-redis/v9/push" +) + +// ErrInvalidCommand is returned when an invalid command is passed to ExecuteCommand. +var ErrInvalidCommand = errors.New("invalid command type") + +// ErrInvalidPool is returned when the pool type is not supported. +var ErrInvalidPool = errors.New("invalid pool type") + +// newClientAdapter creates a new client adapter for regular Redis clients. +func newClientAdapter(client *baseClient) interfaces.ClientInterface { + return &clientAdapter{client: client} +} + +// clientAdapter adapts a Redis client to implement interfaces.ClientInterface. +type clientAdapter struct { + client *baseClient +} + +// GetOptions returns the client options. +func (ca *clientAdapter) GetOptions() interfaces.OptionsInterface { + return &optionsAdapter{options: ca.client.opt} +} + +// GetPushProcessor returns the client's push notification processor. +func (ca *clientAdapter) GetPushProcessor() interfaces.NotificationProcessor { + return &pushProcessorAdapter{processor: ca.client.pushProcessor} +} + +// optionsAdapter adapts Redis options to implement interfaces.OptionsInterface. +type optionsAdapter struct { + options *Options +} + +// GetReadTimeout returns the read timeout. +func (oa *optionsAdapter) GetReadTimeout() time.Duration { + return oa.options.ReadTimeout +} + +// GetWriteTimeout returns the write timeout. +func (oa *optionsAdapter) GetWriteTimeout() time.Duration { + return oa.options.WriteTimeout +} + +// GetNetwork returns the network type. +func (oa *optionsAdapter) GetNetwork() string { + return oa.options.Network +} + +// GetAddr returns the connection address. +func (oa *optionsAdapter) GetAddr() string { + return oa.options.Addr +} + +// GetNodeAddress returns the address of the Redis node as reported by the server. +// For cluster clients, this is the endpoint from CLUSTER SLOTS before any transformation. +// For standalone clients, this defaults to Addr. +func (oa *optionsAdapter) GetNodeAddress() string { + return oa.options.NodeAddress +} + +// IsTLSEnabled returns true if TLS is enabled. +func (oa *optionsAdapter) IsTLSEnabled() bool { + return oa.options.TLSConfig != nil +} + +// GetProtocol returns the protocol version. +func (oa *optionsAdapter) GetProtocol() int { + return oa.options.Protocol +} + +// GetPoolSize returns the connection pool size. +func (oa *optionsAdapter) GetPoolSize() int { + return oa.options.PoolSize +} + +// NewDialer returns a new dialer function for the connection. +func (oa *optionsAdapter) NewDialer() func(context.Context) (net.Conn, error) { + baseDialer := oa.options.NewDialer() + return func(ctx context.Context) (net.Conn, error) { + // Extract network and address from the options + network := oa.options.Network + addr := oa.options.Addr + return baseDialer(ctx, network, addr) + } +} + +// pushProcessorAdapter adapts a push.NotificationProcessor to implement interfaces.NotificationProcessor. +type pushProcessorAdapter struct { + processor push.NotificationProcessor +} + +// RegisterHandler registers a handler for a specific push notification name. +func (ppa *pushProcessorAdapter) RegisterHandler(pushNotificationName string, handler interface{}, protected bool) error { + if pushHandler, ok := handler.(push.NotificationHandler); ok { + return ppa.processor.RegisterHandler(pushNotificationName, pushHandler, protected) + } + return errors.New("handler must implement push.NotificationHandler") +} + +// UnregisterHandler removes a handler for a specific push notification name. +func (ppa *pushProcessorAdapter) UnregisterHandler(pushNotificationName string) error { + return ppa.processor.UnregisterHandler(pushNotificationName) +} + +// GetHandler returns the handler for a specific push notification name. +func (ppa *pushProcessorAdapter) GetHandler(pushNotificationName string) interface{} { + return ppa.processor.GetHandler(pushNotificationName) +} diff --git a/vendor/github.com/redis/go-redis/v9/auth/auth.go b/vendor/github.com/redis/go-redis/v9/auth/auth.go new file mode 100644 index 00000000..1f5c8022 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/auth/auth.go @@ -0,0 +1,61 @@ +// Package auth package provides authentication-related interfaces and types. +// It also includes a basic implementation of credentials using username and password. +package auth + +// StreamingCredentialsProvider is an interface that defines the methods for a streaming credentials provider. +// It is used to provide credentials for authentication. +// The CredentialsListener is used to receive updates when the credentials change. +type StreamingCredentialsProvider interface { + // Subscribe subscribes to the credentials provider for updates. + // It returns the current credentials, a cancel function to unsubscribe from the provider, + // and an error if any. + // TODO(ndyakov): Should we add context to the Subscribe method? + Subscribe(listener CredentialsListener) (Credentials, UnsubscribeFunc, error) +} + +// UnsubscribeFunc is a function that is used to cancel the subscription to the credentials provider. +// It is used to unsubscribe from the provider when the credentials are no longer needed. +type UnsubscribeFunc func() error + +// CredentialsListener is an interface that defines the methods for a credentials listener. +// It is used to receive updates when the credentials change. +// The OnNext method is called when the credentials change. +// The OnError method is called when an error occurs while requesting the credentials. +type CredentialsListener interface { + OnNext(credentials Credentials) + OnError(err error) +} + +// Credentials is an interface that defines the methods for credentials. +// It is used to provide the credentials for authentication. +type Credentials interface { + // BasicAuth returns the username and password for basic authentication. + BasicAuth() (username string, password string) + // RawCredentials returns the raw credentials as a string. + // This can be used to extract the username and password from the raw credentials or + // additional information if present in the token. + RawCredentials() string +} + +type basicAuth struct { + username string + password string +} + +// RawCredentials returns the raw credentials as a string. +func (b *basicAuth) RawCredentials() string { + return b.username + ":" + b.password +} + +// BasicAuth returns the username and password for basic authentication. +func (b *basicAuth) BasicAuth() (username string, password string) { + return b.username, b.password +} + +// NewBasicCredentials creates a new Credentials object from the given username and password. +func NewBasicCredentials(username, password string) Credentials { + return &basicAuth{ + username: username, + password: password, + } +} diff --git a/vendor/github.com/redis/go-redis/v9/auth/reauth_credentials_listener.go b/vendor/github.com/redis/go-redis/v9/auth/reauth_credentials_listener.go new file mode 100644 index 00000000..40076a0b --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/auth/reauth_credentials_listener.go @@ -0,0 +1,47 @@ +package auth + +// ReAuthCredentialsListener is a struct that implements the CredentialsListener interface. +// It is used to re-authenticate the credentials when they are updated. +// It contains: +// - reAuth: a function that takes the new credentials and returns an error if any. +// - onErr: a function that takes an error and handles it. +type ReAuthCredentialsListener struct { + reAuth func(credentials Credentials) error + onErr func(err error) +} + +// OnNext is called when the credentials are updated. +// It calls the reAuth function with the new credentials. +// If the reAuth function returns an error, it calls the onErr function with the error. +func (c *ReAuthCredentialsListener) OnNext(credentials Credentials) { + if c.reAuth == nil { + return + } + + err := c.reAuth(credentials) + if err != nil { + c.OnError(err) + } +} + +// OnError is called when an error occurs. +// It can be called from both the credentials provider and the reAuth function. +func (c *ReAuthCredentialsListener) OnError(err error) { + if c.onErr == nil { + return + } + + c.onErr(err) +} + +// NewReAuthCredentialsListener creates a new ReAuthCredentialsListener. +// Implements the auth.CredentialsListener interface. +func NewReAuthCredentialsListener(reAuth func(credentials Credentials) error, onErr func(err error)) *ReAuthCredentialsListener { + return &ReAuthCredentialsListener{ + reAuth: reAuth, + onErr: onErr, + } +} + +// Ensure ReAuthCredentialsListener implements the CredentialsListener interface. +var _ CredentialsListener = (*ReAuthCredentialsListener)(nil) diff --git a/vendor/github.com/redis/go-redis/v9/bitmap_commands.go b/vendor/github.com/redis/go-redis/v9/bitmap_commands.go index 550d7f52..86aa9b7e 100644 --- a/vendor/github.com/redis/go-redis/v9/bitmap_commands.go +++ b/vendor/github.com/redis/go-redis/v9/bitmap_commands.go @@ -2,6 +2,7 @@ package redis import ( "context" + "errors" ) type BitMapCmdable interface { @@ -11,10 +12,15 @@ type BitMapCmdable interface { BitOpAnd(ctx context.Context, destKey string, keys ...string) *IntCmd BitOpOr(ctx context.Context, destKey string, keys ...string) *IntCmd BitOpXor(ctx context.Context, destKey string, keys ...string) *IntCmd + BitOpDiff(ctx context.Context, destKey string, keys ...string) *IntCmd + BitOpDiff1(ctx context.Context, destKey string, keys ...string) *IntCmd + BitOpAndOr(ctx context.Context, destKey string, keys ...string) *IntCmd + BitOpOne(ctx context.Context, destKey string, keys ...string) *IntCmd BitOpNot(ctx context.Context, destKey string, key string) *IntCmd BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd BitPosSpan(ctx context.Context, key string, bit int8, start, end int64, span string) *IntCmd BitField(ctx context.Context, key string, values ...interface{}) *IntSliceCmd + BitFieldRO(ctx context.Context, key string, values ...interface{}) *IntSliceCmd } func (c cmdable) GetBit(ctx context.Context, key string, offset int64) *IntCmd { @@ -37,16 +43,26 @@ func (c cmdable) SetBit(ctx context.Context, key string, offset int64, value int type BitCount struct { Start, End int64 + Unit string // BYTE(default) | BIT } +const BitCountIndexByte string = "BYTE" +const BitCountIndexBit string = "BIT" + func (c cmdable) BitCount(ctx context.Context, key string, bitCount *BitCount) *IntCmd { - args := []interface{}{"bitcount", key} + args := make([]any, 2, 5) + args[0] = "bitcount" + args[1] = key if bitCount != nil { - args = append( - args, - bitCount.Start, - bitCount.End, - ) + args = append(args, bitCount.Start, bitCount.End) + if bitCount.Unit != "" { + if bitCount.Unit != BitCountIndexByte && bitCount.Unit != BitCountIndexBit { + cmd := NewIntCmd(ctx) + cmd.SetErr(errors.New("redis: invalid bitcount index")) + return cmd + } + args = append(args, bitCount.Unit) + } } cmd := NewIntCmd(ctx, args...) _ = c(ctx, cmd) @@ -66,22 +82,50 @@ func (c cmdable) bitOp(ctx context.Context, op, destKey string, keys ...string) return cmd } +// BitOpAnd creates a new bitmap in which users are members of all given bitmaps func (c cmdable) BitOpAnd(ctx context.Context, destKey string, keys ...string) *IntCmd { return c.bitOp(ctx, "and", destKey, keys...) } +// BitOpOr creates a new bitmap in which users are member of at least one given bitmap func (c cmdable) BitOpOr(ctx context.Context, destKey string, keys ...string) *IntCmd { return c.bitOp(ctx, "or", destKey, keys...) } +// BitOpXor creates a new bitmap in which users are the result of XORing all given bitmaps func (c cmdable) BitOpXor(ctx context.Context, destKey string, keys ...string) *IntCmd { return c.bitOp(ctx, "xor", destKey, keys...) } +// BitOpNot creates a new bitmap in which users are not members of a given bitmap func (c cmdable) BitOpNot(ctx context.Context, destKey string, key string) *IntCmd { return c.bitOp(ctx, "not", destKey, key) } +// BitOpDiff creates a new bitmap in which users are members of bitmap X but not of any of bitmaps Y1, Y2, … +// Introduced with Redis 8.2 +func (c cmdable) BitOpDiff(ctx context.Context, destKey string, keys ...string) *IntCmd { + return c.bitOp(ctx, "diff", destKey, keys...) +} + +// BitOpDiff1 creates a new bitmap in which users are members of one or more of bitmaps Y1, Y2, … but not members of bitmap X +// Introduced with Redis 8.2 +func (c cmdable) BitOpDiff1(ctx context.Context, destKey string, keys ...string) *IntCmd { + return c.bitOp(ctx, "diff1", destKey, keys...) +} + +// BitOpAndOr creates a new bitmap in which users are members of bitmap X and also members of one or more of bitmaps Y1, Y2, … +// Introduced with Redis 8.2 +func (c cmdable) BitOpAndOr(ctx context.Context, destKey string, keys ...string) *IntCmd { + return c.bitOp(ctx, "andor", destKey, keys...) +} + +// BitOpOne creates a new bitmap in which users are members of exactly one of the given bitmaps +// Introduced with Redis 8.2 +func (c cmdable) BitOpOne(ctx context.Context, destKey string, keys ...string) *IntCmd { + return c.bitOp(ctx, "one", destKey, keys...) +} + // BitPos is an API before Redis version 7.0, cmd: bitpos key bit start end // if you need the `byte | bit` parameter, please use `BitPosSpan`. func (c cmdable) BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd { @@ -97,7 +141,9 @@ func (c cmdable) BitPos(ctx context.Context, key string, bit int64, pos ...int64 args[3] = pos[0] args[4] = pos[1] default: - panic("too many arguments") + cmd := NewIntCmd(ctx) + cmd.SetErr(errors.New("too many arguments")) + return cmd } cmd := NewIntCmd(ctx, args...) _ = c(ctx, cmd) @@ -138,7 +184,9 @@ func (c cmdable) BitFieldRO(ctx context.Context, key string, values ...interface args[0] = "BITFIELD_RO" args[1] = key if len(values)%2 != 0 { - panic("BitFieldRO: invalid number of arguments, must be even") + c := NewIntSliceCmd(ctx) + c.SetErr(errors.New("BitFieldRO: invalid number of arguments, must be even")) + return c } for i := 0; i < len(values); i += 2 { args = append(args, "GET", values[i], values[i+1]) diff --git a/vendor/github.com/redis/go-redis/v9/cluster_commands.go b/vendor/github.com/redis/go-redis/v9/cluster_commands.go index 0caf0977..a02683f2 100644 --- a/vendor/github.com/redis/go-redis/v9/cluster_commands.go +++ b/vendor/github.com/redis/go-redis/v9/cluster_commands.go @@ -4,6 +4,7 @@ import "context" type ClusterCmdable interface { ClusterMyShardID(ctx context.Context) *StringCmd + ClusterMyID(ctx context.Context) *StringCmd ClusterSlots(ctx context.Context) *ClusterSlotsCmd ClusterShards(ctx context.Context) *ClusterShardsCmd ClusterLinks(ctx context.Context) *ClusterLinksCmd @@ -35,6 +36,15 @@ func (c cmdable) ClusterMyShardID(ctx context.Context) *StringCmd { return cmd } +func (c cmdable) ClusterMyID(ctx context.Context) *StringCmd { + cmd := NewStringCmd(ctx, "cluster", "myid") + _ = c(ctx, cmd) + return cmd +} + +// ClusterSlots returns the mapping of cluster slots to nodes. +// +// Deprecated: Use ClusterShards instead as of Redis 7.0.0. func (c cmdable) ClusterSlots(ctx context.Context) *ClusterSlotsCmd { cmd := NewClusterSlotsCmd(ctx, "cluster", "slots") _ = c(ctx, cmd) @@ -146,6 +156,9 @@ func (c cmdable) ClusterSaveConfig(ctx context.Context) *StatusCmd { return cmd } +// ClusterSlaves lists the replica nodes of a master node. +// +// Deprecated: Use ClusterReplicas instead as of Redis 5.0.0. func (c cmdable) ClusterSlaves(ctx context.Context, nodeID string) *StringSliceCmd { cmd := NewStringSliceCmd(ctx, "cluster", "slaves", nodeID) _ = c(ctx, cmd) diff --git a/vendor/github.com/redis/go-redis/v9/command.go b/vendor/github.com/redis/go-redis/v9/command.go index 06ed86ed..a2a2f051 100644 --- a/vendor/github.com/redis/go-redis/v9/command.go +++ b/vendor/github.com/redis/go-redis/v9/command.go @@ -4,6 +4,7 @@ import ( "bufio" "context" "fmt" + "maps" "net" "regexp" "strconv" @@ -14,9 +15,173 @@ import ( "github.com/redis/go-redis/v9/internal" "github.com/redis/go-redis/v9/internal/hscan" "github.com/redis/go-redis/v9/internal/proto" + "github.com/redis/go-redis/v9/internal/routing" "github.com/redis/go-redis/v9/internal/util" ) +// keylessCommands contains Redis commands that have empty key specifications (9th slot empty) +// Only includes core Redis commands, excludes FT.*, ts.*, timeseries.*, search.* and subcommands +var keylessCommands = map[string]struct{}{ + "acl": {}, + "asking": {}, + "auth": {}, + "bgrewriteaof": {}, + "bgsave": {}, + "client": {}, + "cluster": {}, + "config": {}, + "debug": {}, + "discard": {}, + "echo": {}, + "exec": {}, + "failover": {}, + "function": {}, + "hello": {}, + "hotkeys": {}, + "latency": {}, + "lolwut": {}, + "module": {}, + "monitor": {}, + "multi": {}, + "pfselftest": {}, + "ping": {}, + "psubscribe": {}, + "psync": {}, + "publish": {}, + "pubsub": {}, + "punsubscribe": {}, + "quit": {}, + "readonly": {}, + "readwrite": {}, + "replconf": {}, + "replicaof": {}, + "role": {}, + "save": {}, + "script": {}, + "select": {}, + "shutdown": {}, + "slaveof": {}, + "slowlog": {}, + "subscribe": {}, + "swapdb": {}, + "sync": {}, + "unsubscribe": {}, + "unwatch": {}, + "wait": {}, +} + +// CmdTyper interface for getting command type +type CmdTyper interface { + GetCmdType() CmdType +} + +// CmdTypeGetter interface for getting command type without circular imports +type CmdTypeGetter interface { + GetCmdType() CmdType +} + +type CmdType uint8 + +const ( + CmdTypeGeneric CmdType = iota + CmdTypeString + CmdTypeInt + CmdTypeBool + CmdTypeFloat + CmdTypeStringSlice + CmdTypeIntSlice + CmdTypeFloatSlice + CmdTypeBoolSlice + CmdTypeMapStringString + CmdTypeMapStringInt + CmdTypeMapStringInterface + CmdTypeMapStringInterfaceSlice + CmdTypeSlice + CmdTypeStatus + CmdTypeDuration + CmdTypeTime + CmdTypeKeyValueSlice + CmdTypeStringStructMap + CmdTypeXMessageSlice + CmdTypeXStreamSlice + CmdTypeXPending + CmdTypeXPendingExt + CmdTypeXAutoClaim + CmdTypeXAutoClaimJustID + CmdTypeXInfoConsumers + CmdTypeXInfoGroups + CmdTypeXInfoStream + CmdTypeXInfoStreamFull + CmdTypeZSlice + CmdTypeZWithKey + CmdTypeScan + CmdTypeClusterSlots + CmdTypeGeoLocation + CmdTypeGeoSearchLocation + CmdTypeGeoPos + CmdTypeCommandsInfo + CmdTypeSlowLog + CmdTypeMapStringStringSlice + CmdTypeMapMapStringInterface + CmdTypeKeyValues + CmdTypeZSliceWithKey + CmdTypeFunctionList + CmdTypeFunctionStats + CmdTypeLCS + CmdTypeKeyFlags + CmdTypeClusterLinks + CmdTypeClusterShards + CmdTypeRankWithScore + CmdTypeClientInfo + CmdTypeACLLog + CmdTypeInfo + CmdTypeMonitor + CmdTypeJSON + CmdTypeJSONSlice + CmdTypeIntPointerSlice + CmdTypeScanDump + CmdTypeBFInfo + CmdTypeCFInfo + CmdTypeCMSInfo + CmdTypeTopKInfo + CmdTypeTDigestInfo + CmdTypeFTSynDump + CmdTypeAggregate + CmdTypeFTInfo + CmdTypeFTSpellCheck + CmdTypeFTSearch + CmdTypeTSTimestampValue + CmdTypeTSTimestampValueSlice + CmdTypeHotKeys +) + +type ( + CmdTypeXAutoClaimValue struct { + messages []XMessage + start string + } + + CmdTypeXAutoClaimJustIDValue struct { + ids []string + start string + } + + CmdTypeScanValue struct { + keys []string + cursor uint64 + } + + CmdTypeKeyValuesValue struct { + key string + values []string + } + + CmdTypeZSliceWithKeyValue struct { + key string + zSlice []Z + } +) + type Cmder interface { // command name. // e.g. "set k v ex 10" -> "set", "cluster info" -> "cluster". @@ -34,15 +199,23 @@ type Cmder interface { // e.g. "set k v ex 10" -> "set k v ex 10: OK", "get k" -> "get k: v". String() string + // Clone creates a copy of the command. + Clone() Cmder + stringArg(int) string firstKeyPos() int8 SetFirstKeyPos(int8) + stepCount() int8 + SetStepCount(int8) readTimeout() *time.Duration readReply(rd *proto.Reader) error - + readRawReply(rd *proto.Reader) error SetErr(error) Err() error + + // GetCmdType returns the command type for fast value extraction + GetCmdType() CmdType } func setCmdsErr(cmds []Cmder, e error) { @@ -75,12 +248,22 @@ func writeCmd(wr *proto.Writer, cmd Cmder) error { return wr.WriteArgs(cmd.Args()) } +// cmdFirstKeyPos returns the position of the first key in the command's arguments. +// If the command does not have a key, it returns 0. +// TODO: Use the data in CommandInfo to determine the first key position. func cmdFirstKeyPos(cmd Cmder) int { if pos := cmd.firstKeyPos(); pos != 0 { return int(pos) } - switch cmd.Name() { + name := cmd.Name() + + // first check if the command is keyless + if _, ok := keylessCommands[name]; ok { + return 0 + } + + switch name { case "eval", "evalsha", "eval_ro", "evalsha_ro": if cmd.stringArg(2) != "0" { return 3 @@ -122,12 +305,14 @@ func cmdString(cmd Cmder, val interface{}) string { //------------------------------------------------------------------------------ type baseCmd struct { - ctx context.Context - args []interface{} - err error - keyPos int8 - + ctx context.Context + args []interface{} + err error + keyPos int8 + _stepCount int8 + rawVal interface{} _readTimeout *time.Duration + cmdType CmdType } var _ Cmder = (*Cmd)(nil) @@ -167,6 +352,8 @@ func (cmd *baseCmd) stringArg(pos int) string { switch v := arg.(type) { case string: return v + case []byte: + return string(v) default: // TODO: consider using appendArg return fmt.Sprint(v) @@ -181,6 +368,14 @@ func (cmd *baseCmd) SetFirstKeyPos(keyPos int8) { cmd.keyPos = keyPos } +func (cmd *baseCmd) stepCount() int8 { + return cmd._stepCount +} + +func (cmd *baseCmd) SetStepCount(stepCount int8) { + cmd._stepCount = stepCount +} + func (cmd *baseCmd) SetErr(e error) { cmd.err = e } @@ -197,6 +392,38 @@ func (cmd *baseCmd) setReadTimeout(d time.Duration) { cmd._readTimeout = &d } +func (cmd *baseCmd) readRawReply(rd *proto.Reader) (err error) { + cmd.rawVal, err = rd.ReadReply() + return err +} + +func (cmd *baseCmd) GetCmdType() CmdType { + return cmd.cmdType +} + +func (cmd *baseCmd) cloneBaseCmd() baseCmd { + var readTimeout *time.Duration + if cmd._readTimeout != nil { + timeout := *cmd._readTimeout + readTimeout = &timeout + } + + // Create a copy of args slice + args := make([]interface{}, len(cmd.args)) + copy(args, cmd.args) + + return baseCmd{ + ctx: cmd.ctx, + args: args, + err: cmd.err, + keyPos: cmd.keyPos, + _stepCount: cmd._stepCount, + rawVal: cmd.rawVal, + _readTimeout: readTimeout, + cmdType: cmd.cmdType, + } +} + //------------------------------------------------------------------------------ type Cmd struct { @@ -208,8 +435,9 @@ type Cmd struct { func NewCmd(ctx context.Context, args ...interface{}) *Cmd { return &Cmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeGeneric, }, } } @@ -482,6 +710,13 @@ func (cmd *Cmd) readReply(rd *proto.Reader) (err error) { return err } +func (cmd *Cmd) Clone() Cmder { + return &Cmd{ + baseCmd: cmd.cloneBaseCmd(), + val: cmd.val, + } +} + //------------------------------------------------------------------------------ type SliceCmd struct { @@ -495,8 +730,9 @@ var _ Cmder = (*SliceCmd)(nil) func NewSliceCmd(ctx context.Context, args ...interface{}) *SliceCmd { return &SliceCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeSlice, }, } } @@ -542,6 +778,18 @@ func (cmd *SliceCmd) readReply(rd *proto.Reader) (err error) { return err } +func (cmd *SliceCmd) Clone() Cmder { + var val []interface{} + if cmd.val != nil { + val = make([]interface{}, len(cmd.val)) + copy(val, cmd.val) + } + return &SliceCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: val, + } +} + //------------------------------------------------------------------------------ type StatusCmd struct { @@ -555,8 +803,9 @@ var _ Cmder = (*StatusCmd)(nil) func NewStatusCmd(ctx context.Context, args ...interface{}) *StatusCmd { return &StatusCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeStatus, }, } } @@ -573,6 +822,10 @@ func (cmd *StatusCmd) Result() (string, error) { return cmd.val, cmd.err } +func (cmd *StatusCmd) Bytes() ([]byte, error) { + return util.StringToBytes(cmd.val), cmd.err +} + func (cmd *StatusCmd) String() string { return cmdString(cmd, cmd.val) } @@ -582,6 +835,13 @@ func (cmd *StatusCmd) readReply(rd *proto.Reader) (err error) { return err } +func (cmd *StatusCmd) Clone() Cmder { + return &StatusCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: cmd.val, + } +} + //------------------------------------------------------------------------------ type IntCmd struct { @@ -595,8 +855,9 @@ var _ Cmder = (*IntCmd)(nil) func NewIntCmd(ctx context.Context, args ...interface{}) *IntCmd { return &IntCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeInt, }, } } @@ -626,6 +887,82 @@ func (cmd *IntCmd) readReply(rd *proto.Reader) (err error) { return err } +func (cmd *IntCmd) Clone() Cmder { + return &IntCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: cmd.val, + } +} + +//------------------------------------------------------------------------------ + +// DigestCmd is a command that returns a uint64 xxh3 hash digest. +// +// This command is specifically designed for the Redis DIGEST command, +// which returns the xxh3 hash of a key's value as a hex string. +// The hex string is automatically parsed to a uint64 value. +// +// The digest can be used for optimistic locking with SetIFDEQ, SetIFDNE, +// and DelExArgs commands. +// +// For examples of client-side digest generation and usage patterns, see: +// example/digest-optimistic-locking/ +// +// Redis 8.4+. See https://redis.io/commands/digest/ +type DigestCmd struct { + baseCmd + + val uint64 +} + +var _ Cmder = (*DigestCmd)(nil) + +func NewDigestCmd(ctx context.Context, args ...interface{}) *DigestCmd { + return &DigestCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *DigestCmd) SetVal(val uint64) { + cmd.val = val +} + +func (cmd *DigestCmd) Val() uint64 { + return cmd.val +} + +func (cmd *DigestCmd) Result() (uint64, error) { + return cmd.val, cmd.err +} + +func (cmd *DigestCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *DigestCmd) Clone() Cmder { + return &DigestCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: cmd.val, + } +} + +func (cmd *DigestCmd) readReply(rd *proto.Reader) (err error) { + // Redis DIGEST command returns a hex string (e.g., "a1b2c3d4e5f67890") + // We parse it as a uint64 xxh3 hash value + var hexStr string + hexStr, err = rd.ReadString() + if err != nil { + return err + } + + // Parse hex string to uint64 + cmd.val, err = strconv.ParseUint(hexStr, 16, 64) + return err +} + //------------------------------------------------------------------------------ type IntSliceCmd struct { @@ -639,8 +976,9 @@ var _ Cmder = (*IntSliceCmd)(nil) func NewIntSliceCmd(ctx context.Context, args ...interface{}) *IntSliceCmd { return &IntSliceCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeIntSlice, }, } } @@ -675,6 +1013,18 @@ func (cmd *IntSliceCmd) readReply(rd *proto.Reader) error { return nil } +func (cmd *IntSliceCmd) Clone() Cmder { + var val []int64 + if cmd.val != nil { + val = make([]int64, len(cmd.val)) + copy(val, cmd.val) + } + return &IntSliceCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: val, + } +} + //------------------------------------------------------------------------------ type DurationCmd struct { @@ -689,8 +1039,9 @@ var _ Cmder = (*DurationCmd)(nil) func NewDurationCmd(ctx context.Context, precision time.Duration, args ...interface{}) *DurationCmd { return &DurationCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeDuration, }, precision: precision, } @@ -728,6 +1079,14 @@ func (cmd *DurationCmd) readReply(rd *proto.Reader) error { return nil } +func (cmd *DurationCmd) Clone() Cmder { + return &DurationCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: cmd.val, + precision: cmd.precision, + } +} + //------------------------------------------------------------------------------ type TimeCmd struct { @@ -741,8 +1100,9 @@ var _ Cmder = (*TimeCmd)(nil) func NewTimeCmd(ctx context.Context, args ...interface{}) *TimeCmd { return &TimeCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeTime, }, } } @@ -779,6 +1139,13 @@ func (cmd *TimeCmd) readReply(rd *proto.Reader) error { return nil } +func (cmd *TimeCmd) Clone() Cmder { + return &TimeCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: cmd.val, + } +} + //------------------------------------------------------------------------------ type BoolCmd struct { @@ -792,8 +1159,9 @@ var _ Cmder = (*BoolCmd)(nil) func NewBoolCmd(ctx context.Context, args ...interface{}) *BoolCmd { return &BoolCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeBool, }, } } @@ -826,6 +1194,13 @@ func (cmd *BoolCmd) readReply(rd *proto.Reader) (err error) { return err } +func (cmd *BoolCmd) Clone() Cmder { + return &BoolCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: cmd.val, + } +} + //------------------------------------------------------------------------------ type StringCmd struct { @@ -839,8 +1214,9 @@ var _ Cmder = (*StringCmd)(nil) func NewStringCmd(ctx context.Context, args ...interface{}) *StringCmd { return &StringCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeString, }, } } @@ -872,28 +1248,28 @@ func (cmd *StringCmd) Int() (int, error) { if cmd.err != nil { return 0, cmd.err } - return strconv.Atoi(cmd.Val()) + return strconv.Atoi(cmd.val) } func (cmd *StringCmd) Int64() (int64, error) { if cmd.err != nil { return 0, cmd.err } - return strconv.ParseInt(cmd.Val(), 10, 64) + return strconv.ParseInt(cmd.val, 10, 64) } func (cmd *StringCmd) Uint64() (uint64, error) { if cmd.err != nil { return 0, cmd.err } - return strconv.ParseUint(cmd.Val(), 10, 64) + return strconv.ParseUint(cmd.val, 10, 64) } func (cmd *StringCmd) Float32() (float32, error) { if cmd.err != nil { return 0, cmd.err } - f, err := strconv.ParseFloat(cmd.Val(), 32) + f, err := strconv.ParseFloat(cmd.val, 32) if err != nil { return 0, err } @@ -904,14 +1280,14 @@ func (cmd *StringCmd) Float64() (float64, error) { if cmd.err != nil { return 0, cmd.err } - return strconv.ParseFloat(cmd.Val(), 64) + return strconv.ParseFloat(cmd.val, 64) } func (cmd *StringCmd) Time() (time.Time, error) { if cmd.err != nil { return time.Time{}, cmd.err } - return time.Parse(time.RFC3339Nano, cmd.Val()) + return time.Parse(time.RFC3339Nano, cmd.val) } func (cmd *StringCmd) Scan(val interface{}) error { @@ -930,6 +1306,13 @@ func (cmd *StringCmd) readReply(rd *proto.Reader) (err error) { return err } +func (cmd *StringCmd) Clone() Cmder { + return &StringCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: cmd.val, + } +} + //------------------------------------------------------------------------------ type FloatCmd struct { @@ -943,8 +1326,9 @@ var _ Cmder = (*FloatCmd)(nil) func NewFloatCmd(ctx context.Context, args ...interface{}) *FloatCmd { return &FloatCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeFloat, }, } } @@ -970,6 +1354,13 @@ func (cmd *FloatCmd) readReply(rd *proto.Reader) (err error) { return err } +func (cmd *FloatCmd) Clone() Cmder { + return &FloatCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: cmd.val, + } +} + //------------------------------------------------------------------------------ type FloatSliceCmd struct { @@ -983,8 +1374,9 @@ var _ Cmder = (*FloatSliceCmd)(nil) func NewFloatSliceCmd(ctx context.Context, args ...interface{}) *FloatSliceCmd { return &FloatSliceCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeFloatSlice, }, } } @@ -1025,6 +1417,18 @@ func (cmd *FloatSliceCmd) readReply(rd *proto.Reader) error { return nil } +func (cmd *FloatSliceCmd) Clone() Cmder { + var val []float64 + if cmd.val != nil { + val = make([]float64, len(cmd.val)) + copy(val, cmd.val) + } + return &FloatSliceCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: val, + } +} + //------------------------------------------------------------------------------ type StringSliceCmd struct { @@ -1038,8 +1442,9 @@ var _ Cmder = (*StringSliceCmd)(nil) func NewStringSliceCmd(ctx context.Context, args ...interface{}) *StringSliceCmd { return &StringSliceCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeStringSlice, }, } } @@ -1061,7 +1466,7 @@ func (cmd *StringSliceCmd) String() string { } func (cmd *StringSliceCmd) ScanSlice(container interface{}) error { - return proto.ScanSlice(cmd.Val(), container) + return proto.ScanSlice(cmd.val, container) } func (cmd *StringSliceCmd) readReply(rd *proto.Reader) error { @@ -1083,6 +1488,18 @@ func (cmd *StringSliceCmd) readReply(rd *proto.Reader) error { return nil } +func (cmd *StringSliceCmd) Clone() Cmder { + var val []string + if cmd.val != nil { + val = make([]string, len(cmd.val)) + copy(val, cmd.val) + } + return &StringSliceCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: val, + } +} + //------------------------------------------------------------------------------ type KeyValue struct { @@ -1101,8 +1518,9 @@ var _ Cmder = (*KeyValueSliceCmd)(nil) func NewKeyValueSliceCmd(ctx context.Context, args ...interface{}) *KeyValueSliceCmd { return &KeyValueSliceCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeKeyValueSlice, }, } } @@ -1177,6 +1595,18 @@ func (cmd *KeyValueSliceCmd) readReply(rd *proto.Reader) error { // nolint:dupl return nil } +func (cmd *KeyValueSliceCmd) Clone() Cmder { + var val []KeyValue + if cmd.val != nil { + val = make([]KeyValue, len(cmd.val)) + copy(val, cmd.val) + } + return &KeyValueSliceCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: val, + } +} + //------------------------------------------------------------------------------ type BoolSliceCmd struct { @@ -1190,8 +1620,9 @@ var _ Cmder = (*BoolSliceCmd)(nil) func NewBoolSliceCmd(ctx context.Context, args ...interface{}) *BoolSliceCmd { return &BoolSliceCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeBoolSlice, }, } } @@ -1226,6 +1657,18 @@ func (cmd *BoolSliceCmd) readReply(rd *proto.Reader) error { return nil } +func (cmd *BoolSliceCmd) Clone() Cmder { + var val []bool + if cmd.val != nil { + val = make([]bool, len(cmd.val)) + copy(val, cmd.val) + } + return &BoolSliceCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: val, + } +} + //------------------------------------------------------------------------------ type MapStringStringCmd struct { @@ -1239,8 +1682,9 @@ var _ Cmder = (*MapStringStringCmd)(nil) func NewMapStringStringCmd(ctx context.Context, args ...interface{}) *MapStringStringCmd { return &MapStringStringCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeMapStringString, }, } } @@ -1305,6 +1749,20 @@ func (cmd *MapStringStringCmd) readReply(rd *proto.Reader) error { return nil } +func (cmd *MapStringStringCmd) Clone() Cmder { + var val map[string]string + if cmd.val != nil { + val = make(map[string]string, len(cmd.val)) + for k, v := range cmd.val { + val[k] = v + } + } + return &MapStringStringCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: val, + } +} + //------------------------------------------------------------------------------ type MapStringIntCmd struct { @@ -1318,8 +1776,9 @@ var _ Cmder = (*MapStringIntCmd)(nil) func NewMapStringIntCmd(ctx context.Context, args ...interface{}) *MapStringIntCmd { return &MapStringIntCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeMapStringInt, }, } } @@ -1362,6 +1821,20 @@ func (cmd *MapStringIntCmd) readReply(rd *proto.Reader) error { return nil } +func (cmd *MapStringIntCmd) Clone() Cmder { + var val map[string]int64 + if cmd.val != nil { + val = make(map[string]int64, len(cmd.val)) + for k, v := range cmd.val { + val[k] = v + } + } + return &MapStringIntCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: val, + } +} + // ------------------------------------------------------------------------------ type MapStringSliceInterfaceCmd struct { baseCmd @@ -1371,8 +1844,9 @@ type MapStringSliceInterfaceCmd struct { func NewMapStringSliceInterfaceCmd(ctx context.Context, args ...interface{}) *MapStringSliceInterfaceCmd { return &MapStringSliceInterfaceCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeMapStringInterfaceSlice, }, } } @@ -1394,33 +1868,88 @@ func (cmd *MapStringSliceInterfaceCmd) Val() map[string][]interface{} { } func (cmd *MapStringSliceInterfaceCmd) readReply(rd *proto.Reader) (err error) { - n, err := rd.ReadMapLen() + readType, err := rd.PeekReplyType() if err != nil { return err } - cmd.val = make(map[string][]interface{}, n) - for i := 0; i < n; i++ { - k, err := rd.ReadString() + + cmd.val = make(map[string][]interface{}) + + switch readType { + case proto.RespMap: + n, err := rd.ReadMapLen() if err != nil { return err } - nn, err := rd.ReadArrayLen() - if err != nil { - return err - } - cmd.val[k] = make([]interface{}, nn) - for j := 0; j < nn; j++ { - value, err := rd.ReadReply() + for i := 0; i < n; i++ { + k, err := rd.ReadString() if err != nil { return err } - cmd.val[k][j] = value + nn, err := rd.ReadArrayLen() + if err != nil { + return err + } + cmd.val[k] = make([]interface{}, nn) + for j := 0; j < nn; j++ { + value, err := rd.ReadReply() + if err != nil { + return err + } + cmd.val[k][j] = value + } + } + case proto.RespArray: + // RESP2 response + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + + for i := 0; i < n; i++ { + // Each entry in this array is itself an array with key details + itemLen, err := rd.ReadArrayLen() + if err != nil { + return err + } + + key, err := rd.ReadString() + if err != nil { + return err + } + cmd.val[key] = make([]interface{}, 0, itemLen-1) + for j := 1; j < itemLen; j++ { + // Read the inner array for timestamp-value pairs + data, err := rd.ReadReply() + if err != nil { + return err + } + cmd.val[key] = append(cmd.val[key], data) + } } } return nil } +func (cmd *MapStringSliceInterfaceCmd) Clone() Cmder { + var val map[string][]interface{} + if cmd.val != nil { + val = make(map[string][]interface{}, len(cmd.val)) + for k, v := range cmd.val { + if v != nil { + newSlice := make([]interface{}, len(v)) + copy(newSlice, v) + val[k] = newSlice + } + } + } + return &MapStringSliceInterfaceCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: val, + } +} + //------------------------------------------------------------------------------ type StringStructMapCmd struct { @@ -1434,8 +1963,9 @@ var _ Cmder = (*StringStructMapCmd)(nil) func NewStringStructMapCmd(ctx context.Context, args ...interface{}) *StringStructMapCmd { return &StringStructMapCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeStringStructMap, }, } } @@ -1473,11 +2003,28 @@ func (cmd *StringStructMapCmd) readReply(rd *proto.Reader) error { return nil } +func (cmd *StringStructMapCmd) Clone() Cmder { + var val map[string]struct{} + if cmd.val != nil { + val = maps.Clone(cmd.val) + } + return &StringStructMapCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: val, + } +} + //------------------------------------------------------------------------------ type XMessage struct { ID string Values map[string]interface{} + // MillisElapsedFromDelivery is the number of milliseconds since the entry was last delivered. + // Only populated when using XREADGROUP with CLAIM argument for claimed entries. + MillisElapsedFromDelivery int64 + // DeliveredCount is the number of times the entry was delivered. + // Only populated when using XREADGROUP with CLAIM argument for claimed entries. + DeliveredCount int64 } type XMessageSliceCmd struct { @@ -1491,8 +2038,9 @@ var _ Cmder = (*XMessageSliceCmd)(nil) func NewXMessageSliceCmd(ctx context.Context, args ...interface{}) *XMessageSliceCmd { return &XMessageSliceCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeXMessageSlice, }, } } @@ -1518,6 +2066,28 @@ func (cmd *XMessageSliceCmd) readReply(rd *proto.Reader) (err error) { return err } +func (cmd *XMessageSliceCmd) Clone() Cmder { + var val []XMessage + if cmd.val != nil { + val = make([]XMessage, len(cmd.val)) + for i, msg := range cmd.val { + val[i] = XMessage{ + ID: msg.ID, + } + if msg.Values != nil { + val[i].Values = make(map[string]interface{}, len(msg.Values)) + for k, v := range msg.Values { + val[i].Values[k] = v + } + } + } + } + return &XMessageSliceCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: val, + } +} + func readXMessageSlice(rd *proto.Reader) ([]XMessage, error) { n, err := rd.ReadArrayLen() if err != nil { @@ -1534,10 +2104,16 @@ func readXMessageSlice(rd *proto.Reader) ([]XMessage, error) { } func readXMessage(rd *proto.Reader) (XMessage, error) { - if err := rd.ReadFixedArrayLen(2); err != nil { + // Read array length can be 2 or 4 (with CLAIM metadata) + n, err := rd.ReadArrayLen() + if err != nil { return XMessage{}, err } + if n != 2 && n != 4 { + return XMessage{}, fmt.Errorf("redis: got %d elements in the XMessage array, expected 2 or 4", n) + } + id, err := rd.ReadString() if err != nil { return XMessage{}, err @@ -1550,10 +2126,24 @@ func readXMessage(rd *proto.Reader) (XMessage, error) { } } - return XMessage{ + msg := XMessage{ ID: id, Values: v, - }, nil + } + + if n == 4 { + msg.MillisElapsedFromDelivery, err = rd.ReadInt() + if err != nil { + return XMessage{}, err + } + + msg.DeliveredCount, err = rd.ReadInt() + if err != nil { + return XMessage{}, err + } + } + + return msg, nil } func stringInterfaceMapParser(rd *proto.Reader) (map[string]interface{}, error) { @@ -1597,8 +2187,9 @@ var _ Cmder = (*XStreamSliceCmd)(nil) func NewXStreamSliceCmd(ctx context.Context, args ...interface{}) *XStreamSliceCmd { return &XStreamSliceCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeXStreamSlice, }, } } @@ -1651,6 +2242,36 @@ func (cmd *XStreamSliceCmd) readReply(rd *proto.Reader) error { return nil } +func (cmd *XStreamSliceCmd) Clone() Cmder { + var val []XStream + if cmd.val != nil { + val = make([]XStream, len(cmd.val)) + for i, stream := range cmd.val { + val[i] = XStream{ + Stream: stream.Stream, + } + if stream.Messages != nil { + val[i].Messages = make([]XMessage, len(stream.Messages)) + for j, msg := range stream.Messages { + val[i].Messages[j] = XMessage{ + ID: msg.ID, + } + if msg.Values != nil { + val[i].Messages[j].Values = make(map[string]interface{}, len(msg.Values)) + for k, v := range msg.Values { + val[i].Messages[j].Values[k] = v + } + } + } + } + } + } + return &XStreamSliceCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: val, + } +} + //------------------------------------------------------------------------------ type XPending struct { @@ -1670,8 +2291,9 @@ var _ Cmder = (*XPendingCmd)(nil) func NewXPendingCmd(ctx context.Context, args ...interface{}) *XPendingCmd { return &XPendingCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeXPending, }, } } @@ -1734,6 +2356,27 @@ func (cmd *XPendingCmd) readReply(rd *proto.Reader) error { return nil } +func (cmd *XPendingCmd) Clone() Cmder { + var val *XPending + if cmd.val != nil { + val = &XPending{ + Count: cmd.val.Count, + Lower: cmd.val.Lower, + Higher: cmd.val.Higher, + } + if cmd.val.Consumers != nil { + val.Consumers = make(map[string]int64, len(cmd.val.Consumers)) + for k, v := range cmd.val.Consumers { + val.Consumers[k] = v + } + } + } + return &XPendingCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: val, + } +} + //------------------------------------------------------------------------------ type XPendingExt struct { @@ -1753,8 +2396,9 @@ var _ Cmder = (*XPendingExtCmd)(nil) func NewXPendingExtCmd(ctx context.Context, args ...interface{}) *XPendingExtCmd { return &XPendingExtCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeXPendingExt, }, } } @@ -1809,6 +2453,18 @@ func (cmd *XPendingExtCmd) readReply(rd *proto.Reader) error { return nil } +func (cmd *XPendingExtCmd) Clone() Cmder { + var val []XPendingExt + if cmd.val != nil { + val = make([]XPendingExt, len(cmd.val)) + copy(val, cmd.val) + } + return &XPendingExtCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: val, + } +} + //------------------------------------------------------------------------------ type XAutoClaimCmd struct { @@ -1823,8 +2479,9 @@ var _ Cmder = (*XAutoClaimCmd)(nil) func NewXAutoClaimCmd(ctx context.Context, args ...interface{}) *XAutoClaimCmd { return &XAutoClaimCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeXAutoClaim, }, } } @@ -1879,6 +2536,29 @@ func (cmd *XAutoClaimCmd) readReply(rd *proto.Reader) error { return nil } +func (cmd *XAutoClaimCmd) Clone() Cmder { + var val []XMessage + if cmd.val != nil { + val = make([]XMessage, len(cmd.val)) + for i, msg := range cmd.val { + val[i] = XMessage{ + ID: msg.ID, + } + if msg.Values != nil { + val[i].Values = make(map[string]interface{}, len(msg.Values)) + for k, v := range msg.Values { + val[i].Values[k] = v + } + } + } + } + return &XAutoClaimCmd{ + baseCmd: cmd.cloneBaseCmd(), + start: cmd.start, + val: val, + } +} + //------------------------------------------------------------------------------ type XAutoClaimJustIDCmd struct { @@ -1893,8 +2573,9 @@ var _ Cmder = (*XAutoClaimJustIDCmd)(nil) func NewXAutoClaimJustIDCmd(ctx context.Context, args ...interface{}) *XAutoClaimJustIDCmd { return &XAutoClaimJustIDCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeXAutoClaimJustID, }, } } @@ -1957,6 +2638,19 @@ func (cmd *XAutoClaimJustIDCmd) readReply(rd *proto.Reader) error { return nil } +func (cmd *XAutoClaimJustIDCmd) Clone() Cmder { + var val []string + if cmd.val != nil { + val = make([]string, len(cmd.val)) + copy(val, cmd.val) + } + return &XAutoClaimJustIDCmd{ + baseCmd: cmd.cloneBaseCmd(), + start: cmd.start, + val: val, + } +} + //------------------------------------------------------------------------------ type XInfoConsumersCmd struct { @@ -1976,8 +2670,9 @@ var _ Cmder = (*XInfoConsumersCmd)(nil) func NewXInfoConsumersCmd(ctx context.Context, stream string, group string) *XInfoConsumersCmd { return &XInfoConsumersCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: []interface{}{"xinfo", "consumers", stream, group}, + ctx: ctx, + args: []interface{}{"xinfo", "consumers", stream, group}, + cmdType: CmdTypeXInfoConsumers, }, } } @@ -2043,6 +2738,18 @@ func (cmd *XInfoConsumersCmd) readReply(rd *proto.Reader) error { return nil } +func (cmd *XInfoConsumersCmd) Clone() Cmder { + var val []XInfoConsumer + if cmd.val != nil { + val = make([]XInfoConsumer, len(cmd.val)) + copy(val, cmd.val) + } + return &XInfoConsumersCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: val, + } +} + //------------------------------------------------------------------------------ type XInfoGroupsCmd struct { @@ -2056,7 +2763,9 @@ type XInfoGroup struct { Pending int64 LastDeliveredID string EntriesRead int64 - Lag int64 + // Lag represents the number of pending messages in the stream not yet + // delivered to this consumer group. Returns -1 when the lag cannot be determined. + Lag int64 } var _ Cmder = (*XInfoGroupsCmd)(nil) @@ -2064,8 +2773,9 @@ var _ Cmder = (*XInfoGroupsCmd)(nil) func NewXInfoGroupsCmd(ctx context.Context, stream string) *XInfoGroupsCmd { return &XInfoGroupsCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: []interface{}{"xinfo", "groups", stream}, + ctx: ctx, + args: []interface{}{"xinfo", "groups", stream}, + cmdType: CmdTypeXInfoGroups, }, } } @@ -2139,8 +2849,11 @@ func (cmd *XInfoGroupsCmd) readReply(rd *proto.Reader) error { // lag: the number of entries in the stream that are still waiting to be delivered // to the group's consumers, or a NULL(Nil) when that number can't be determined. + // In that case, we return -1. if err != nil && err != Nil { return err + } else if err == Nil { + group.Lag = -1 } default: return fmt.Errorf("redis: unexpected key %q in XINFO GROUPS reply", key) @@ -2151,6 +2864,18 @@ func (cmd *XInfoGroupsCmd) readReply(rd *proto.Reader) error { return nil } +func (cmd *XInfoGroupsCmd) Clone() Cmder { + var val []XInfoGroup + if cmd.val != nil { + val = make([]XInfoGroup, len(cmd.val)) + copy(val, cmd.val) + } + return &XInfoGroupsCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: val, + } +} + //------------------------------------------------------------------------------ type XInfoStreamCmd struct { @@ -2169,6 +2894,13 @@ type XInfoStream struct { FirstEntry XMessage LastEntry XMessage RecordedFirstEntryID string + + IDMPDuration int64 + IDMPMaxSize int64 + PIDsTracked int64 + IIDsTracked int64 + IIDsAdded int64 + IIDsDuplicates int64 } var _ Cmder = (*XInfoStreamCmd)(nil) @@ -2176,8 +2908,9 @@ var _ Cmder = (*XInfoStreamCmd)(nil) func NewXInfoStreamCmd(ctx context.Context, stream string) *XInfoStreamCmd { return &XInfoStreamCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: []interface{}{"xinfo", "stream", stream}, + ctx: ctx, + args: []interface{}{"xinfo", "stream", stream}, + cmdType: CmdTypeXInfoStream, }, } } @@ -2261,6 +2994,36 @@ func (cmd *XInfoStreamCmd) readReply(rd *proto.Reader) error { if err != nil { return err } + case "idmp-duration": + cmd.val.IDMPDuration, err = rd.ReadInt() + if err != nil { + return err + } + case "idmp-maxsize": + cmd.val.IDMPMaxSize, err = rd.ReadInt() + if err != nil { + return err + } + case "pids-tracked": + cmd.val.PIDsTracked, err = rd.ReadInt() + if err != nil { + return err + } + case "iids-tracked": + cmd.val.IIDsTracked, err = rd.ReadInt() + if err != nil { + return err + } + case "iids-added": + cmd.val.IIDsAdded, err = rd.ReadInt() + if err != nil { + return err + } + case "iids-duplicates": + cmd.val.IIDsDuplicates, err = rd.ReadInt() + if err != nil { + return err + } default: return fmt.Errorf("redis: unexpected key %q in XINFO STREAM reply", key) } @@ -2268,6 +3031,45 @@ func (cmd *XInfoStreamCmd) readReply(rd *proto.Reader) error { return nil } +func (cmd *XInfoStreamCmd) Clone() Cmder { + var val *XInfoStream + if cmd.val != nil { + val = &XInfoStream{ + Length: cmd.val.Length, + RadixTreeKeys: cmd.val.RadixTreeKeys, + RadixTreeNodes: cmd.val.RadixTreeNodes, + Groups: cmd.val.Groups, + LastGeneratedID: cmd.val.LastGeneratedID, + MaxDeletedEntryID: cmd.val.MaxDeletedEntryID, + EntriesAdded: cmd.val.EntriesAdded, + RecordedFirstEntryID: cmd.val.RecordedFirstEntryID, + } + // Clone XMessage fields + val.FirstEntry = XMessage{ + ID: cmd.val.FirstEntry.ID, + } + if cmd.val.FirstEntry.Values != nil { + val.FirstEntry.Values = make(map[string]interface{}, len(cmd.val.FirstEntry.Values)) + for k, v := range cmd.val.FirstEntry.Values { + val.FirstEntry.Values[k] = v + } + } + val.LastEntry = XMessage{ + ID: cmd.val.LastEntry.ID, + } + if cmd.val.LastEntry.Values != nil { + val.LastEntry.Values = make(map[string]interface{}, len(cmd.val.LastEntry.Values)) + for k, v := range cmd.val.LastEntry.Values { + val.LastEntry.Values[k] = v + } + } + } + return &XInfoStreamCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: val, + } +} + //------------------------------------------------------------------------------ type XInfoStreamFullCmd struct { @@ -2285,6 +3087,12 @@ type XInfoStreamFull struct { Entries []XMessage Groups []XInfoStreamGroup RecordedFirstEntryID string + IDMPDuration int64 + IDMPMaxSize int64 + PIDsTracked int64 + IIDsTracked int64 + IIDsAdded int64 + IIDsDuplicates int64 } type XInfoStreamGroup struct { @@ -2323,8 +3131,9 @@ var _ Cmder = (*XInfoStreamFullCmd)(nil) func NewXInfoStreamFullCmd(ctx context.Context, args ...interface{}) *XInfoStreamFullCmd { return &XInfoStreamFullCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeXInfoStreamFull, }, } } @@ -2405,6 +3214,36 @@ func (cmd *XInfoStreamFullCmd) readReply(rd *proto.Reader) error { if err != nil { return err } + case "idmp-duration": + cmd.val.IDMPDuration, err = rd.ReadInt() + if err != nil { + return err + } + case "idmp-maxsize": + cmd.val.IDMPMaxSize, err = rd.ReadInt() + if err != nil { + return err + } + case "pids-tracked": + cmd.val.PIDsTracked, err = rd.ReadInt() + if err != nil { + return err + } + case "iids-tracked": + cmd.val.IIDsTracked, err = rd.ReadInt() + if err != nil { + return err + } + case "iids-added": + cmd.val.IIDsAdded, err = rd.ReadInt() + if err != nil { + return err + } + case "iids-duplicates": + cmd.val.IIDsDuplicates, err = rd.ReadInt() + if err != nil { + return err + } default: return fmt.Errorf("redis: unexpected key %q in XINFO STREAM FULL reply", key) } @@ -2609,6 +3448,45 @@ func readXInfoStreamConsumers(rd *proto.Reader) ([]XInfoStreamConsumer, error) { return consumers, nil } +func (cmd *XInfoStreamFullCmd) Clone() Cmder { + var val *XInfoStreamFull + if cmd.val != nil { + val = &XInfoStreamFull{ + Length: cmd.val.Length, + RadixTreeKeys: cmd.val.RadixTreeKeys, + RadixTreeNodes: cmd.val.RadixTreeNodes, + LastGeneratedID: cmd.val.LastGeneratedID, + MaxDeletedEntryID: cmd.val.MaxDeletedEntryID, + EntriesAdded: cmd.val.EntriesAdded, + RecordedFirstEntryID: cmd.val.RecordedFirstEntryID, + } + // Clone Entries + if cmd.val.Entries != nil { + val.Entries = make([]XMessage, len(cmd.val.Entries)) + for i, msg := range cmd.val.Entries { + val.Entries[i] = XMessage{ + ID: msg.ID, + } + if msg.Values != nil { + val.Entries[i].Values = make(map[string]interface{}, len(msg.Values)) + for k, v := range msg.Values { + val.Entries[i].Values[k] = v + } + } + } + } + // Clone Groups - simplified copy for now due to complexity + if cmd.val.Groups != nil { + val.Groups = make([]XInfoStreamGroup, len(cmd.val.Groups)) + copy(val.Groups, cmd.val.Groups) + } + } + return &XInfoStreamFullCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: val, + } +} + //------------------------------------------------------------------------------ type ZSliceCmd struct { @@ -2622,8 +3500,9 @@ var _ Cmder = (*ZSliceCmd)(nil) func NewZSliceCmd(ctx context.Context, args ...interface{}) *ZSliceCmd { return &ZSliceCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeZSlice, }, } } @@ -2687,6 +3566,18 @@ func (cmd *ZSliceCmd) readReply(rd *proto.Reader) error { // nolint:dupl return nil } +func (cmd *ZSliceCmd) Clone() Cmder { + var val []Z + if cmd.val != nil { + val = make([]Z, len(cmd.val)) + copy(val, cmd.val) + } + return &ZSliceCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: val, + } +} + //------------------------------------------------------------------------------ type ZWithKeyCmd struct { @@ -2700,8 +3591,9 @@ var _ Cmder = (*ZWithKeyCmd)(nil) func NewZWithKeyCmd(ctx context.Context, args ...interface{}) *ZWithKeyCmd { return &ZWithKeyCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeZWithKey, }, } } @@ -2741,6 +3633,23 @@ func (cmd *ZWithKeyCmd) readReply(rd *proto.Reader) (err error) { return nil } +func (cmd *ZWithKeyCmd) Clone() Cmder { + var val *ZWithKey + if cmd.val != nil { + val = &ZWithKey{ + Z: Z{ + Score: cmd.val.Score, + Member: cmd.val.Member, + }, + Key: cmd.val.Key, + } + } + return &ZWithKeyCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: val, + } +} + //------------------------------------------------------------------------------ type ScanCmd struct { @@ -2757,8 +3666,9 @@ var _ Cmder = (*ScanCmd)(nil) func NewScanCmd(ctx context.Context, process cmdable, args ...interface{}) *ScanCmd { return &ScanCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeScan, }, process: process, } @@ -2806,6 +3716,20 @@ func (cmd *ScanCmd) readReply(rd *proto.Reader) error { return nil } +func (cmd *ScanCmd) Clone() Cmder { + var page []string + if cmd.page != nil { + page = make([]string, len(cmd.page)) + copy(page, cmd.page) + } + return &ScanCmd{ + baseCmd: cmd.cloneBaseCmd(), + page: page, + cursor: cmd.cursor, + process: cmd.process, + } +} + // Iterator creates a new ScanIterator. func (cmd *ScanCmd) Iterator() *ScanIterator { return &ScanIterator{ @@ -2838,8 +3762,9 @@ var _ Cmder = (*ClusterSlotsCmd)(nil) func NewClusterSlotsCmd(ctx context.Context, args ...interface{}) *ClusterSlotsCmd { return &ClusterSlotsCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeClusterSlots, }, } } @@ -2952,6 +3877,38 @@ func (cmd *ClusterSlotsCmd) readReply(rd *proto.Reader) error { return nil } +func (cmd *ClusterSlotsCmd) Clone() Cmder { + var val []ClusterSlot + if cmd.val != nil { + val = make([]ClusterSlot, len(cmd.val)) + for i, slot := range cmd.val { + val[i] = ClusterSlot{ + Start: slot.Start, + End: slot.End, + } + if slot.Nodes != nil { + val[i].Nodes = make([]ClusterNode, len(slot.Nodes)) + for j, node := range slot.Nodes { + val[i].Nodes[j] = ClusterNode{ + ID: node.ID, + Addr: node.Addr, + } + if node.NetworkingMetadata != nil { + val[i].Nodes[j].NetworkingMetadata = make(map[string]string, len(node.NetworkingMetadata)) + for k, v := range node.NetworkingMetadata { + val[i].Nodes[j].NetworkingMetadata[k] = v + } + } + } + } + } + } + return &ClusterSlotsCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: val, + } +} + //------------------------------------------------------------------------------ // GeoLocation is used with GeoAdd to add geospatial location. @@ -2991,8 +3948,9 @@ var _ Cmder = (*GeoLocationCmd)(nil) func NewGeoLocationCmd(ctx context.Context, q *GeoRadiusQuery, args ...interface{}) *GeoLocationCmd { return &GeoLocationCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: geoLocationArgs(q, args...), + ctx: ctx, + args: geoLocationArgs(q, args...), + cmdType: CmdTypeGeoLocation, }, q: q, } @@ -3100,6 +4058,34 @@ func (cmd *GeoLocationCmd) readReply(rd *proto.Reader) error { return nil } +func (cmd *GeoLocationCmd) Clone() Cmder { + var q *GeoRadiusQuery + if cmd.q != nil { + q = &GeoRadiusQuery{ + Radius: cmd.q.Radius, + Unit: cmd.q.Unit, + WithCoord: cmd.q.WithCoord, + WithDist: cmd.q.WithDist, + WithGeoHash: cmd.q.WithGeoHash, + Count: cmd.q.Count, + Sort: cmd.q.Sort, + Store: cmd.q.Store, + StoreDist: cmd.q.StoreDist, + withLen: cmd.q.withLen, + } + } + var locations []GeoLocation + if cmd.locations != nil { + locations = make([]GeoLocation, len(cmd.locations)) + copy(locations, cmd.locations) + } + return &GeoLocationCmd{ + baseCmd: cmd.cloneBaseCmd(), + q: q, + locations: locations, + } +} + //------------------------------------------------------------------------------ // GeoSearchQuery is used for GEOSearch/GEOSearchStore command query. @@ -3207,8 +4193,9 @@ func NewGeoSearchLocationCmd( ) *GeoSearchLocationCmd { return &GeoSearchLocationCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: geoSearchLocationArgs(opt, args), + cmdType: CmdTypeGeoSearchLocation, }, opt: opt, } @@ -3281,6 +4268,40 @@ func (cmd *GeoSearchLocationCmd) readReply(rd *proto.Reader) error { return nil } +func (cmd *GeoSearchLocationCmd) Clone() Cmder { + var opt *GeoSearchLocationQuery + if cmd.opt != nil { + opt = &GeoSearchLocationQuery{ + GeoSearchQuery: GeoSearchQuery{ + Member: cmd.opt.Member, + Longitude: cmd.opt.Longitude, + Latitude: cmd.opt.Latitude, + Radius: cmd.opt.Radius, + RadiusUnit: cmd.opt.RadiusUnit, + BoxWidth: cmd.opt.BoxWidth, + BoxHeight: cmd.opt.BoxHeight, + BoxUnit: cmd.opt.BoxUnit, + Sort: cmd.opt.Sort, + Count: cmd.opt.Count, + CountAny: cmd.opt.CountAny, + }, + WithCoord: cmd.opt.WithCoord, + WithDist: cmd.opt.WithDist, + WithHash: cmd.opt.WithHash, + } + } + var val []GeoLocation + if cmd.val != nil { + val = make([]GeoLocation, len(cmd.val)) + copy(val, cmd.val) + } + return &GeoSearchLocationCmd{ + baseCmd: cmd.cloneBaseCmd(), + opt: opt, + val: val, + } +} + //------------------------------------------------------------------------------ type GeoPos struct { @@ -3298,8 +4319,9 @@ var _ Cmder = (*GeoPosCmd)(nil) func NewGeoPosCmd(ctx context.Context, args ...interface{}) *GeoPosCmd { return &GeoPosCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeGeoPos, }, } } @@ -3355,17 +4377,37 @@ func (cmd *GeoPosCmd) readReply(rd *proto.Reader) error { return nil } +func (cmd *GeoPosCmd) Clone() Cmder { + var val []*GeoPos + if cmd.val != nil { + val = make([]*GeoPos, len(cmd.val)) + for i, pos := range cmd.val { + if pos != nil { + val[i] = &GeoPos{ + Longitude: pos.Longitude, + Latitude: pos.Latitude, + } + } + } + } + return &GeoPosCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: val, + } +} + //------------------------------------------------------------------------------ type CommandInfo struct { - Name string - Arity int8 - Flags []string - ACLFlags []string - FirstKeyPos int8 - LastKeyPos int8 - StepCount int8 - ReadOnly bool + Name string + Arity int8 + Flags []string + ACLFlags []string + FirstKeyPos int8 + LastKeyPos int8 + StepCount int8 + ReadOnly bool + CommandPolicy *routing.CommandPolicy } type CommandsInfoCmd struct { @@ -3379,8 +4421,9 @@ var _ Cmder = (*CommandsInfoCmd)(nil) func NewCommandsInfoCmd(ctx context.Context, args ...interface{}) *CommandsInfoCmd { return &CommandsInfoCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeCommandsInfo, }, } } @@ -3404,7 +4447,7 @@ func (cmd *CommandsInfoCmd) String() string { func (cmd *CommandsInfoCmd) readReply(rd *proto.Reader) error { const numArgRedis5 = 6 const numArgRedis6 = 7 - const numArgRedis7 = 10 + const numArgRedis7 = 10 // Also matches redis 8 n, err := rd.ReadArrayLen() if err != nil { @@ -3492,9 +4535,33 @@ func (cmd *CommandsInfoCmd) readReply(rd *proto.Reader) error { } if nn >= numArgRedis7 { - if err := rd.DiscardNext(); err != nil { + // The 8th argument is an array of tips. + tipsLen, err := rd.ReadArrayLen() + if err != nil { return err } + + rawTips := make(map[string]string, tipsLen) + if cmdInfo.ReadOnly { + rawTips[routing.ReadOnlyCMD] = "" + } + for f := 0; f < tipsLen; f++ { + tip, err := rd.ReadString() + if err != nil { + return err + } + + k, v, ok := strings.Cut(tip, ":") + if !ok { + // Handle tips that don't have a colon (like "nondeterministic_output") + rawTips[tip] = "" + } else { + // Handle normal key:value tips + rawTips[k] = v + } + } + cmdInfo.CommandPolicy = parseCommandPolicies(rawTips, cmdInfo.FirstKeyPos) + if err := rd.DiscardNext(); err != nil { return err } @@ -3509,13 +4576,47 @@ func (cmd *CommandsInfoCmd) readReply(rd *proto.Reader) error { return nil } +func (cmd *CommandsInfoCmd) Clone() Cmder { + var val map[string]*CommandInfo + if cmd.val != nil { + val = make(map[string]*CommandInfo, len(cmd.val)) + for k, v := range cmd.val { + if v != nil { + newInfo := &CommandInfo{ + Name: v.Name, + Arity: v.Arity, + FirstKeyPos: v.FirstKeyPos, + LastKeyPos: v.LastKeyPos, + StepCount: v.StepCount, + ReadOnly: v.ReadOnly, + CommandPolicy: v.CommandPolicy, // CommandPolicy can be shared as it's immutable + } + if v.Flags != nil { + newInfo.Flags = make([]string, len(v.Flags)) + copy(newInfo.Flags, v.Flags) + } + if v.ACLFlags != nil { + newInfo.ACLFlags = make([]string, len(v.ACLFlags)) + copy(newInfo.ACLFlags, v.ACLFlags) + } + val[k] = newInfo + } + } + } + return &CommandsInfoCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: val, + } +} + //------------------------------------------------------------------------------ type cmdsInfoCache struct { fn func(ctx context.Context) (map[string]*CommandInfo, error) - once internal.Once - cmds map[string]*CommandInfo + once internal.Once + refreshLock sync.Mutex + cmds map[string]*CommandInfo } func newCmdsInfoCache(fn func(ctx context.Context) (map[string]*CommandInfo, error)) *cmdsInfoCache { @@ -3525,26 +4626,66 @@ func newCmdsInfoCache(fn func(ctx context.Context) (map[string]*CommandInfo, err } func (c *cmdsInfoCache) Get(ctx context.Context) (map[string]*CommandInfo, error) { + c.refreshLock.Lock() + defer c.refreshLock.Unlock() + err := c.once.Do(func() error { cmds, err := c.fn(ctx) if err != nil { return err } + lowerCmds := make(map[string]*CommandInfo, len(cmds)) + // Extensions have cmd names in upper case. Convert them to lower case. for k, v := range cmds { - lower := internal.ToLower(k) - if lower != k { - cmds[lower] = v - } + lowerCmds[internal.ToLower(k)] = v } - c.cmds = cmds + c.cmds = lowerCmds return nil }) return c.cmds, err } +func (c *cmdsInfoCache) Refresh() { + c.refreshLock.Lock() + defer c.refreshLock.Unlock() + + c.once = internal.Once{} +} + +// ------------------------------------------------------------------------------ +const requestPolicy = "request_policy" +const responsePolicy = "response_policy" + +func parseCommandPolicies(commandInfoTips map[string]string, firstKeyPos int8) *routing.CommandPolicy { + req := routing.ReqDefault + resp := routing.RespDefaultKeyless + if firstKeyPos > 0 { + resp = routing.RespDefaultHashSlot + } + + tips := make(map[string]string, len(commandInfoTips)) + for k, v := range commandInfoTips { + if k == requestPolicy { + if p, err := routing.ParseRequestPolicy(v); err == nil { + req = p + } + continue + } + if k == responsePolicy { + if p, err := routing.ParseResponsePolicy(v); err == nil { + resp = p + } + continue + } + tips[k] = v + } + + return &routing.CommandPolicy{Request: req, Response: resp, Tips: tips} +} + //------------------------------------------------------------------------------ type SlowLog struct { @@ -3569,8 +4710,9 @@ var _ Cmder = (*SlowLogCmd)(nil) func NewSlowLogCmd(ctx context.Context, args ...interface{}) *SlowLogCmd { return &SlowLogCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeSlowLog, }, } } @@ -3655,6 +4797,356 @@ func (cmd *SlowLogCmd) readReply(rd *proto.Reader) error { return nil } +func (cmd *SlowLogCmd) Clone() Cmder { + var val []SlowLog + if cmd.val != nil { + val = make([]SlowLog, len(cmd.val)) + for i, log := range cmd.val { + val[i] = SlowLog{ + ID: log.ID, + Time: log.Time, + Duration: log.Duration, + ClientAddr: log.ClientAddr, + ClientName: log.ClientName, + } + if log.Args != nil { + val[i].Args = make([]string, len(log.Args)) + copy(val[i].Args, log.Args) + } + } + } + return &SlowLogCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: val, + } +} + +//----------------------------------------------------------------------- + +type Latency struct { + Name string + Time time.Time + Latest time.Duration + Max time.Duration +} + +type LatencyCmd struct { + baseCmd + val []Latency +} + +var _ Cmder = (*LatencyCmd)(nil) + +func NewLatencyCmd(ctx context.Context, args ...interface{}) *LatencyCmd { + return &LatencyCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *LatencyCmd) SetVal(val []Latency) { + cmd.val = val +} + +func (cmd *LatencyCmd) Val() []Latency { + return cmd.val +} + +func (cmd *LatencyCmd) Result() ([]Latency, error) { + return cmd.val, cmd.err +} + +func (cmd *LatencyCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *LatencyCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + cmd.val = make([]Latency, n) + for i := 0; i < len(cmd.val); i++ { + nn, err := rd.ReadArrayLen() + if err != nil { + return err + } + if nn < 3 { + return fmt.Errorf("redis: got %d elements in latency get, expected at least 3", nn) + } + if cmd.val[i].Name, err = rd.ReadString(); err != nil { + return err + } + createdAt, err := rd.ReadInt() + if err != nil { + return err + } + cmd.val[i].Time = time.Unix(createdAt, 0) + latest, err := rd.ReadInt() + if err != nil { + return err + } + cmd.val[i].Latest = time.Duration(latest) * time.Millisecond + maximum, err := rd.ReadInt() + if err != nil { + return err + } + cmd.val[i].Max = time.Duration(maximum) * time.Millisecond + } + return nil +} + +func (cmd *LatencyCmd) Clone() Cmder { + var val []Latency + if cmd.val != nil { + val = make([]Latency, len(cmd.val)) + copy(val, cmd.val) + } + return &LatencyCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: val, + } +} + +//----------------------------------------------------------------------- + +// HotKeysSlotRange represents a slot or slot range in the response. +// Single element slice = individual slot, two element slice = slot range [start, end]. +type HotKeysSlotRange []int64 + +// HotKeysKeyEntry represents a hot key entry with its metric value. +type HotKeysKeyEntry struct { + Key string + Value interface{} // Can be int64 or string +} + +// HotKeysResult represents the response data from HOTKEYS GET command. +// Field names match the Redis response format. +type HotKeysResult struct { + TrackingActive bool + SampleRatio uint8 + SelectedSlots []HotKeysSlotRange + SampledCommandsSelectedSlots time.Duration // Present when sample-ratio > 1 and selected-slots is not empty + AllCommandsSelectedSlots time.Duration // Present when selected-slots is not empty + AllCommandsAllSlots time.Duration + NetBytesSampledCommandsSelectedSlots int64 // Present when sample-ratio > 1 and selected-slots is not empty + NetBytesAllCommandsSelectedSlots int64 // Present when selected-slots is not empty + NetBytesAllCommandsAllSlots int64 + CollectionStartTime time.Time + CollectionDuration time.Duration + UsedCPUSys time.Duration + UsedCPUUser time.Duration + TotalNetBytes int64 + ByCPUTime []HotKeysKeyEntry + ByNetBytes []HotKeysKeyEntry +} + +type HotKeysCmd struct { + baseCmd + + val *HotKeysResult +} + +var _ Cmder = (*HotKeysCmd)(nil) + +func NewHotKeysCmd(ctx context.Context, args ...interface{}) *HotKeysCmd { + return &HotKeysCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + cmdType: CmdTypeHotKeys, + }, + } +} + +func (cmd *HotKeysCmd) SetVal(val *HotKeysResult) { + cmd.val = val +} + +func (cmd *HotKeysCmd) Val() *HotKeysResult { + return cmd.val +} + +func (cmd *HotKeysCmd) Result() (*HotKeysResult, error) { + return cmd.val, cmd.err +} + +func (cmd *HotKeysCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *HotKeysCmd) readReply(rd *proto.Reader) error { + // HOTKEYS GET response is wrapped in an array for aggregation support + arrayLen, err := rd.ReadArrayLen() + if err != nil { + return err + } + + if arrayLen == 0 { + // Empty array means no tracking was started or after reset + cmd.val = nil + return nil + } + + // Read the first (and typically only) element which is a map + n, err := rd.ReadMapLen() + if err != nil { + return err + } + + result := &HotKeysResult{} + data := make(map[string]interface{}, n) + + for i := 0; i < n; i++ { + k, err := rd.ReadString() + if err != nil { + return err + } + v, err := rd.ReadReply() + if err != nil { + if err == Nil { + data[k] = Nil + continue + } + if err, ok := err.(proto.RedisError); ok { + data[k] = err + continue + } + return err + } + data[k] = v + } + + if v, ok := data["tracking-active"].(int64); ok { + result.TrackingActive = v == 1 + } + if v, ok := data["sample-ratio"].(int64); ok { + result.SampleRatio = uint8(v) + } + if v, ok := data["selected-slots"].([]interface{}); ok { + result.SelectedSlots = make([]HotKeysSlotRange, 0, len(v)) + for _, slot := range v { + switch s := slot.(type) { + case int64: + // Single slot + result.SelectedSlots = append(result.SelectedSlots, HotKeysSlotRange{s}) + case []interface{}: + // Slot range + slotRange := make(HotKeysSlotRange, 0, len(s)) + for _, sr := range s { + if val, ok := sr.(int64); ok { + slotRange = append(slotRange, val) + } + } + result.SelectedSlots = append(result.SelectedSlots, slotRange) + } + } + } + if v, ok := data["sampled-commands-selected-slots-us"].(int64); ok { + result.SampledCommandsSelectedSlots = time.Duration(v) * time.Microsecond + } + if v, ok := data["all-commands-selected-slots-us"].(int64); ok { + result.AllCommandsSelectedSlots = time.Duration(v) * time.Microsecond + } + if v, ok := data["all-commands-all-slots-us"].(int64); ok { + result.AllCommandsAllSlots = time.Duration(v) * time.Microsecond + } + if v, ok := data["net-bytes-sampled-commands-selected-slots"].(int64); ok { + result.NetBytesSampledCommandsSelectedSlots = v + } + if v, ok := data["net-bytes-all-commands-selected-slots"].(int64); ok { + result.NetBytesAllCommandsSelectedSlots = v + } + if v, ok := data["net-bytes-all-commands-all-slots"].(int64); ok { + result.NetBytesAllCommandsAllSlots = v + } + if v, ok := data["collection-start-time-unix-ms"].(int64); ok { + result.CollectionStartTime = time.UnixMilli(v) + } + if v, ok := data["collection-duration-ms"].(int64); ok { + result.CollectionDuration = time.Duration(v) * time.Millisecond + } + if v, ok := data["used-cpu-sys-ms"].(int64); ok { + result.UsedCPUSys = time.Duration(v) * time.Millisecond + } + if v, ok := data["used-cpu-user-ms"].(int64); ok { + result.UsedCPUUser = time.Duration(v) * time.Millisecond + } + if v, ok := data["total-net-bytes"].(int64); ok { + result.TotalNetBytes = v + } + + if v, ok := data["by-cpu-time-us"].([]interface{}); ok { + result.ByCPUTime = parseHotKeysKeyEntries(v) + } + + if v, ok := data["by-net-bytes"].([]interface{}); ok { + result.ByNetBytes = parseHotKeysKeyEntries(v) + } + + cmd.val = result + return nil +} + +// parseHotKeysKeyEntries parses the key-value pairs from HOTKEYS GET response. +func parseHotKeysKeyEntries(v []interface{}) []HotKeysKeyEntry { + entries := make([]HotKeysKeyEntry, 0, len(v)/2) + for i := 0; i < len(v); i += 2 { + if i+1 < len(v) { + key, keyOk := v[i].(string) + if keyOk { + entries = append(entries, HotKeysKeyEntry{ + Key: key, + Value: v[i+1], // Can be int64 or string + }) + } + } + } + return entries +} + +func (cmd *HotKeysCmd) Clone() Cmder { + var val *HotKeysResult + if cmd.val != nil { + val = &HotKeysResult{ + TrackingActive: cmd.val.TrackingActive, + SampleRatio: cmd.val.SampleRatio, + SampledCommandsSelectedSlots: cmd.val.SampledCommandsSelectedSlots, + AllCommandsSelectedSlots: cmd.val.AllCommandsSelectedSlots, + AllCommandsAllSlots: cmd.val.AllCommandsAllSlots, + NetBytesSampledCommandsSelectedSlots: cmd.val.NetBytesSampledCommandsSelectedSlots, + NetBytesAllCommandsSelectedSlots: cmd.val.NetBytesAllCommandsSelectedSlots, + NetBytesAllCommandsAllSlots: cmd.val.NetBytesAllCommandsAllSlots, + CollectionStartTime: cmd.val.CollectionStartTime, + CollectionDuration: cmd.val.CollectionDuration, + UsedCPUSys: cmd.val.UsedCPUSys, + UsedCPUUser: cmd.val.UsedCPUUser, + TotalNetBytes: cmd.val.TotalNetBytes, + } + if cmd.val.SelectedSlots != nil { + val.SelectedSlots = make([]HotKeysSlotRange, len(cmd.val.SelectedSlots)) + for i, sr := range cmd.val.SelectedSlots { + val.SelectedSlots[i] = make(HotKeysSlotRange, len(sr)) + copy(val.SelectedSlots[i], sr) + } + } + if cmd.val.ByCPUTime != nil { + val.ByCPUTime = make([]HotKeysKeyEntry, len(cmd.val.ByCPUTime)) + copy(val.ByCPUTime, cmd.val.ByCPUTime) + } + if cmd.val.ByNetBytes != nil { + val.ByNetBytes = make([]HotKeysKeyEntry, len(cmd.val.ByNetBytes)) + copy(val.ByNetBytes, cmd.val.ByNetBytes) + } + } + return &HotKeysCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: val, + } +} + //----------------------------------------------------------------------- type MapStringInterfaceCmd struct { @@ -3668,8 +5160,9 @@ var _ Cmder = (*MapStringInterfaceCmd)(nil) func NewMapStringInterfaceCmd(ctx context.Context, args ...interface{}) *MapStringInterfaceCmd { return &MapStringInterfaceCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeMapStringInterface, }, } } @@ -3719,6 +5212,20 @@ func (cmd *MapStringInterfaceCmd) readReply(rd *proto.Reader) error { return nil } +func (cmd *MapStringInterfaceCmd) Clone() Cmder { + var val map[string]interface{} + if cmd.val != nil { + val = make(map[string]interface{}, len(cmd.val)) + for k, v := range cmd.val { + val[k] = v + } + } + return &MapStringInterfaceCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: val, + } +} + //----------------------------------------------------------------------- type MapStringStringSliceCmd struct { @@ -3732,8 +5239,9 @@ var _ Cmder = (*MapStringStringSliceCmd)(nil) func NewMapStringStringSliceCmd(ctx context.Context, args ...interface{}) *MapStringStringSliceCmd { return &MapStringStringSliceCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeMapStringStringSlice, }, } } @@ -3783,6 +5291,118 @@ func (cmd *MapStringStringSliceCmd) readReply(rd *proto.Reader) error { return nil } +func (cmd *MapStringStringSliceCmd) Clone() Cmder { + var val []map[string]string + if cmd.val != nil { + val = make([]map[string]string, len(cmd.val)) + for i, m := range cmd.val { + if m != nil { + val[i] = make(map[string]string, len(m)) + for k, v := range m { + val[i][k] = v + } + } + } + } + return &MapStringStringSliceCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: val, + } +} + +// ----------------------------------------------------------------------- + +// MapMapStringInterfaceCmd represents a command that returns a map of strings to interface{}. +type MapMapStringInterfaceCmd struct { + baseCmd + val map[string]interface{} +} + +func NewMapMapStringInterfaceCmd(ctx context.Context, args ...interface{}) *MapMapStringInterfaceCmd { + return &MapMapStringInterfaceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + cmdType: CmdTypeMapMapStringInterface, + }, + } +} + +func (cmd *MapMapStringInterfaceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *MapMapStringInterfaceCmd) SetVal(val map[string]interface{}) { + cmd.val = val +} + +func (cmd *MapMapStringInterfaceCmd) Result() (map[string]interface{}, error) { + return cmd.val, cmd.err +} + +func (cmd *MapMapStringInterfaceCmd) Val() map[string]interface{} { + return cmd.val +} + +// readReply will try to parse the reply from the proto.Reader for both resp2 and resp3 +func (cmd *MapMapStringInterfaceCmd) readReply(rd *proto.Reader) (err error) { + data, err := rd.ReadReply() + if err != nil { + return err + } + resultMap := map[string]interface{}{} + + switch midResponse := data.(type) { + case map[interface{}]interface{}: // resp3 will return map + for k, v := range midResponse { + stringKey, ok := k.(string) + if !ok { + return fmt.Errorf("redis: invalid map key %#v", k) + } + resultMap[stringKey] = v + } + case []interface{}: // resp2 will return array of arrays + n := len(midResponse) + for i := 0; i < n; i++ { + finalArr, ok := midResponse[i].([]interface{}) // final array that we need to transform to map + if !ok { + return fmt.Errorf("redis: unexpected response %#v", data) + } + m := len(finalArr) + if m%2 != 0 { // since this should be map, keys should be even number + return fmt.Errorf("redis: unexpected response %#v", data) + } + + for j := 0; j < m; j += 2 { + stringKey, ok := finalArr[j].(string) // the first one + if !ok { + return fmt.Errorf("redis: invalid map key %#v", finalArr[i]) + } + resultMap[stringKey] = finalArr[j+1] // second one is value + } + } + default: + return fmt.Errorf("redis: unexpected response %#v", data) + } + + cmd.val = resultMap + return nil +} + +func (cmd *MapMapStringInterfaceCmd) Clone() Cmder { + var val map[string]interface{} + if cmd.val != nil { + val = make(map[string]interface{}, len(cmd.val)) + for k, v := range cmd.val { + val[k] = v + } + } + return &MapMapStringInterfaceCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: val, + } +} + //----------------------------------------------------------------------- type MapStringInterfaceSliceCmd struct { @@ -3796,8 +5416,9 @@ var _ Cmder = (*MapStringInterfaceSliceCmd)(nil) func NewMapStringInterfaceSliceCmd(ctx context.Context, args ...interface{}) *MapStringInterfaceSliceCmd { return &MapStringInterfaceSliceCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeMapStringInterfaceSlice, }, } } @@ -3848,6 +5469,25 @@ func (cmd *MapStringInterfaceSliceCmd) readReply(rd *proto.Reader) error { return nil } +func (cmd *MapStringInterfaceSliceCmd) Clone() Cmder { + var val []map[string]interface{} + if cmd.val != nil { + val = make([]map[string]interface{}, len(cmd.val)) + for i, m := range cmd.val { + if m != nil { + val[i] = make(map[string]interface{}, len(m)) + for k, v := range m { + val[i][k] = v + } + } + } + } + return &MapStringInterfaceSliceCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: val, + } +} + //------------------------------------------------------------------------------ type KeyValuesCmd struct { @@ -3862,8 +5502,9 @@ var _ Cmder = (*KeyValuesCmd)(nil) func NewKeyValuesCmd(ctx context.Context, args ...interface{}) *KeyValuesCmd { return &KeyValuesCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeKeyValues, }, } } @@ -3910,6 +5551,19 @@ func (cmd *KeyValuesCmd) readReply(rd *proto.Reader) (err error) { return nil } +func (cmd *KeyValuesCmd) Clone() Cmder { + var val []string + if cmd.val != nil { + val = make([]string, len(cmd.val)) + copy(val, cmd.val) + } + return &KeyValuesCmd{ + baseCmd: cmd.cloneBaseCmd(), + key: cmd.key, + val: val, + } +} + //------------------------------------------------------------------------------ type ZSliceWithKeyCmd struct { @@ -3924,8 +5578,9 @@ var _ Cmder = (*ZSliceWithKeyCmd)(nil) func NewZSliceWithKeyCmd(ctx context.Context, args ...interface{}) *ZSliceWithKeyCmd { return &ZSliceWithKeyCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeZSliceWithKey, }, } } @@ -3993,6 +5648,19 @@ func (cmd *ZSliceWithKeyCmd) readReply(rd *proto.Reader) (err error) { return nil } +func (cmd *ZSliceWithKeyCmd) Clone() Cmder { + var val []Z + if cmd.val != nil { + val = make([]Z, len(cmd.val)) + copy(val, cmd.val) + } + return &ZSliceWithKeyCmd{ + baseCmd: cmd.cloneBaseCmd(), + key: cmd.key, + val: val, + } +} + type Function struct { Name string Description string @@ -4017,8 +5685,9 @@ var _ Cmder = (*FunctionListCmd)(nil) func NewFunctionListCmd(ctx context.Context, args ...interface{}) *FunctionListCmd { return &FunctionListCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeFunctionList, }, } } @@ -4145,6 +5814,37 @@ func (cmd *FunctionListCmd) readFunctions(rd *proto.Reader) ([]Function, error) return functions, nil } +func (cmd *FunctionListCmd) Clone() Cmder { + var val []Library + if cmd.val != nil { + val = make([]Library, len(cmd.val)) + for i, lib := range cmd.val { + val[i] = Library{ + Name: lib.Name, + Engine: lib.Engine, + Code: lib.Code, + } + if lib.Functions != nil { + val[i].Functions = make([]Function, len(lib.Functions)) + for j, fn := range lib.Functions { + val[i].Functions[j] = Function{ + Name: fn.Name, + Description: fn.Description, + } + if fn.Flags != nil { + val[i].Functions[j].Flags = make([]string, len(fn.Flags)) + copy(val[i].Functions[j].Flags, fn.Flags) + } + } + } + } + } + return &FunctionListCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: val, + } +} + // FunctionStats contains information about the scripts currently executing on the server, and the available engines // - Engines: // Statistics about the engine like number of functions and number of libraries @@ -4198,8 +5898,9 @@ var _ Cmder = (*FunctionStatsCmd)(nil) func NewFunctionStatsCmd(ctx context.Context, args ...interface{}) *FunctionStatsCmd { return &FunctionStatsCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeFunctionStats, }, } } @@ -4370,6 +6071,34 @@ func (cmd *FunctionStatsCmd) readRunningScripts(rd *proto.Reader) ([]RunningScri return runningScripts, len(runningScripts) > 0, nil } +func (cmd *FunctionStatsCmd) Clone() Cmder { + val := FunctionStats{ + isRunning: cmd.val.isRunning, + rs: cmd.val.rs, // RunningScript is a simple struct, can be copied directly + } + if cmd.val.Engines != nil { + val.Engines = make([]Engine, len(cmd.val.Engines)) + copy(val.Engines, cmd.val.Engines) + } + if cmd.val.allrs != nil { + val.allrs = make([]RunningScript, len(cmd.val.allrs)) + for i, rs := range cmd.val.allrs { + val.allrs[i] = RunningScript{ + Name: rs.Name, + Duration: rs.Duration, + } + if rs.Command != nil { + val.allrs[i].Command = make([]string, len(rs.Command)) + copy(val.allrs[i].Command, rs.Command) + } + } + } + return &FunctionStatsCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: val, + } +} + //------------------------------------------------------------------------------ // LCSQuery is a parameter used for the LCS command @@ -4433,8 +6162,9 @@ func NewLCSCmd(ctx context.Context, q *LCSQuery) *LCSCmd { } } cmd.baseCmd = baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeLCS, } return cmd @@ -4546,6 +6276,25 @@ func (cmd *LCSCmd) readPosition(rd *proto.Reader) (pos LCSPosition, err error) { return pos, nil } +func (cmd *LCSCmd) Clone() Cmder { + var val *LCSMatch + if cmd.val != nil { + val = &LCSMatch{ + MatchString: cmd.val.MatchString, + Len: cmd.val.Len, + } + if cmd.val.Matches != nil { + val.Matches = make([]LCSMatchedPosition, len(cmd.val.Matches)) + copy(val.Matches, cmd.val.Matches) + } + } + return &LCSCmd{ + baseCmd: cmd.cloneBaseCmd(), + readType: cmd.readType, + val: val, + } +} + // ------------------------------------------------------------------------ type KeyFlags struct { @@ -4564,8 +6313,9 @@ var _ Cmder = (*KeyFlagsCmd)(nil) func NewKeyFlagsCmd(ctx context.Context, args ...interface{}) *KeyFlagsCmd { return &KeyFlagsCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeKeyFlags, }, } } @@ -4624,6 +6374,26 @@ func (cmd *KeyFlagsCmd) readReply(rd *proto.Reader) error { return nil } +func (cmd *KeyFlagsCmd) Clone() Cmder { + var val []KeyFlags + if cmd.val != nil { + val = make([]KeyFlags, len(cmd.val)) + for i, kf := range cmd.val { + val[i] = KeyFlags{ + Key: kf.Key, + } + if kf.Flags != nil { + val[i].Flags = make([]string, len(kf.Flags)) + copy(val[i].Flags, kf.Flags) + } + } + } + return &KeyFlagsCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: val, + } +} + // --------------------------------------------------------------------------------------------------- type ClusterLink struct { @@ -4646,8 +6416,9 @@ var _ Cmder = (*ClusterLinksCmd)(nil) func NewClusterLinksCmd(ctx context.Context, args ...interface{}) *ClusterLinksCmd { return &ClusterLinksCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeClusterLinks, }, } } @@ -4713,6 +6484,18 @@ func (cmd *ClusterLinksCmd) readReply(rd *proto.Reader) error { return nil } +func (cmd *ClusterLinksCmd) Clone() Cmder { + var val []ClusterLink + if cmd.val != nil { + val = make([]ClusterLink, len(cmd.val)) + copy(val, cmd.val) + } + return &ClusterLinksCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: val, + } +} + // ------------------------------------------------------------------------------------------------------------------ type SlotRange struct { @@ -4748,8 +6531,9 @@ var _ Cmder = (*ClusterShardsCmd)(nil) func NewClusterShardsCmd(ctx context.Context, args ...interface{}) *ClusterShardsCmd { return &ClusterShardsCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeClusterShards, }, } } @@ -4863,6 +6647,28 @@ func (cmd *ClusterShardsCmd) readReply(rd *proto.Reader) error { return nil } +func (cmd *ClusterShardsCmd) Clone() Cmder { + var val []ClusterShard + if cmd.val != nil { + val = make([]ClusterShard, len(cmd.val)) + for i, shard := range cmd.val { + val[i] = ClusterShard{} + if shard.Slots != nil { + val[i].Slots = make([]SlotRange, len(shard.Slots)) + copy(val[i].Slots, shard.Slots) + } + if shard.Nodes != nil { + val[i].Nodes = make([]Node, len(shard.Nodes)) + copy(val[i].Nodes, shard.Nodes) + } + } + } + return &ClusterShardsCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: val, + } +} + // ----------------------------------------- type RankScore struct { @@ -4881,8 +6687,9 @@ var _ Cmder = (*RankWithScoreCmd)(nil) func NewRankWithScoreCmd(ctx context.Context, args ...interface{}) *RankWithScoreCmd { return &RankWithScoreCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeRankWithScore, }, } } @@ -4923,6 +6730,13 @@ func (cmd *RankWithScoreCmd) readReply(rd *proto.Reader) error { return nil } +func (cmd *RankWithScoreCmd) Clone() Cmder { + return &RankWithScoreCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: cmd.val, // RankScore is a simple struct, can be copied directly + } +} + // -------------------------------------------------------------------------------------------------- // ClientFlags is redis-server client flags, copy from redis/src/server.h (redis 7.0) @@ -4997,6 +6811,7 @@ type ClientInfo struct { PSub int // number of pattern matching subscriptions SSub int // redis version 7.0.3, number of shard channel subscriptions Multi int // number of commands in a MULTI/EXEC context + Watch int // redis version 7.4 RC1, number of keys this client is currently watching. QueryBuf int // qbuf, query buffer length (0 means no query pending) QueryBufFree int // qbuf-free, free space of the query buffer (0 means the buffer is full) ArgvMem int // incomplete arguments for the next command (already extracted from query buffer) @@ -5007,6 +6822,10 @@ type ClientInfo struct { OutputListLength int // oll, output list length (replies are queued in this list when the buffer is full) OutputMemory int // omem, output buffer memory usage TotalMemory int // tot-mem, total memory consumed by this client in its various buffers + TotalNetIn int // tot-net-in, total network input + TotalNetOut int // tot-net-out, total network output + TotalCmds int // tot-cmds, total number of commands processed + IoThread int // io-thread id Events string // file descriptor events (see below) LastCmd string // cmd, last command played User string // the authenticated username of the client @@ -5027,8 +6846,9 @@ var _ Cmder = (*ClientInfoCmd)(nil) func NewClientInfoCmd(ctx context.Context, args ...interface{}) *ClientInfoCmd { return &ClientInfoCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeClientInfo, }, } } @@ -5149,6 +6969,8 @@ func parseClientInfo(txt string) (info *ClientInfo, err error) { info.SSub, err = strconv.Atoi(val) case "multi": info.Multi, err = strconv.Atoi(val) + case "watch": + info.Watch, err = strconv.Atoi(val) case "qbuf": info.QueryBuf, err = strconv.Atoi(val) case "qbuf-free": @@ -5169,6 +6991,12 @@ func parseClientInfo(txt string) (info *ClientInfo, err error) { info.OutputMemory, err = strconv.Atoi(val) case "tot-mem": info.TotalMemory, err = strconv.Atoi(val) + case "tot-net-in": + info.TotalNetIn, err = strconv.Atoi(val) + case "tot-net-out": + info.TotalNetOut, err = strconv.Atoi(val) + case "tot-cmds": + info.TotalCmds, err = strconv.Atoi(val) case "events": info.Events = val case "cmd": @@ -5183,6 +7011,8 @@ func parseClientInfo(txt string) (info *ClientInfo, err error) { info.LibName = val case "lib-ver": info.LibVer = val + case "io-thread": + info.IoThread, err = strconv.Atoi(val) default: return nil, fmt.Errorf("redis: unexpected client info key(%s)", key) } @@ -5195,6 +7025,50 @@ func parseClientInfo(txt string) (info *ClientInfo, err error) { return info, nil } +func (cmd *ClientInfoCmd) Clone() Cmder { + var val *ClientInfo + if cmd.val != nil { + val = &ClientInfo{ + ID: cmd.val.ID, + Addr: cmd.val.Addr, + LAddr: cmd.val.LAddr, + FD: cmd.val.FD, + Name: cmd.val.Name, + Age: cmd.val.Age, + Idle: cmd.val.Idle, + Flags: cmd.val.Flags, + DB: cmd.val.DB, + Sub: cmd.val.Sub, + PSub: cmd.val.PSub, + SSub: cmd.val.SSub, + Multi: cmd.val.Multi, + Watch: cmd.val.Watch, + QueryBuf: cmd.val.QueryBuf, + QueryBufFree: cmd.val.QueryBufFree, + ArgvMem: cmd.val.ArgvMem, + MultiMem: cmd.val.MultiMem, + BufferSize: cmd.val.BufferSize, + BufferPeak: cmd.val.BufferPeak, + OutputBufferLength: cmd.val.OutputBufferLength, + OutputListLength: cmd.val.OutputListLength, + OutputMemory: cmd.val.OutputMemory, + TotalMemory: cmd.val.TotalMemory, + IoThread: cmd.val.IoThread, + Events: cmd.val.Events, + LastCmd: cmd.val.LastCmd, + User: cmd.val.User, + Redir: cmd.val.Redir, + Resp: cmd.val.Resp, + LibName: cmd.val.LibName, + LibVer: cmd.val.LibVer, + } + } + return &ClientInfoCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: val, + } +} + // ------------------------------------------- type ACLLogEntry struct { @@ -5221,8 +7095,9 @@ var _ Cmder = (*ACLLogCmd)(nil) func NewACLLogCmd(ctx context.Context, args ...interface{}) *ACLLogCmd { return &ACLLogCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeACLLog, }, } } @@ -5304,12 +7179,85 @@ func (cmd *ACLLogCmd) readReply(rd *proto.Reader) error { return nil } +func (cmd *ACLLogCmd) Clone() Cmder { + var val []*ACLLogEntry + if cmd.val != nil { + val = make([]*ACLLogEntry, len(cmd.val)) + for i, entry := range cmd.val { + if entry != nil { + val[i] = &ACLLogEntry{ + Count: entry.Count, + Reason: entry.Reason, + Context: entry.Context, + Object: entry.Object, + Username: entry.Username, + AgeSeconds: entry.AgeSeconds, + EntryID: entry.EntryID, + TimestampCreated: entry.TimestampCreated, + TimestampLastUpdated: entry.TimestampLastUpdated, + } + // Clone ClientInfo if present + if entry.ClientInfo != nil { + val[i].ClientInfo = &ClientInfo{ + ID: entry.ClientInfo.ID, + Addr: entry.ClientInfo.Addr, + LAddr: entry.ClientInfo.LAddr, + FD: entry.ClientInfo.FD, + Name: entry.ClientInfo.Name, + Age: entry.ClientInfo.Age, + Idle: entry.ClientInfo.Idle, + Flags: entry.ClientInfo.Flags, + DB: entry.ClientInfo.DB, + Sub: entry.ClientInfo.Sub, + PSub: entry.ClientInfo.PSub, + SSub: entry.ClientInfo.SSub, + Multi: entry.ClientInfo.Multi, + Watch: entry.ClientInfo.Watch, + QueryBuf: entry.ClientInfo.QueryBuf, + QueryBufFree: entry.ClientInfo.QueryBufFree, + ArgvMem: entry.ClientInfo.ArgvMem, + MultiMem: entry.ClientInfo.MultiMem, + BufferSize: entry.ClientInfo.BufferSize, + BufferPeak: entry.ClientInfo.BufferPeak, + OutputBufferLength: entry.ClientInfo.OutputBufferLength, + OutputListLength: entry.ClientInfo.OutputListLength, + OutputMemory: entry.ClientInfo.OutputMemory, + TotalMemory: entry.ClientInfo.TotalMemory, + IoThread: entry.ClientInfo.IoThread, + Events: entry.ClientInfo.Events, + LastCmd: entry.ClientInfo.LastCmd, + User: entry.ClientInfo.User, + Redir: entry.ClientInfo.Redir, + Resp: entry.ClientInfo.Resp, + LibName: entry.ClientInfo.LibName, + LibVer: entry.ClientInfo.LibVer, + } + } + } + } + } + return &ACLLogCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: val, + } +} + // LibraryInfo holds the library info. type LibraryInfo struct { LibName *string LibVer *string } +// WithLibraryName returns a valid LibraryInfo with library name only. +func WithLibraryName(libName string) LibraryInfo { + return LibraryInfo{LibName: &libName} +} + +// WithLibraryVersion returns a valid LibraryInfo with library version only. +func WithLibraryVersion(libVer string) LibraryInfo { + return LibraryInfo{LibVer: &libVer} +} + // ------------------------------------------- type InfoCmd struct { @@ -5322,8 +7270,9 @@ var _ Cmder = (*InfoCmd)(nil) func NewInfoCmd(ctx context.Context, args ...interface{}) *InfoCmd { return &InfoCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeInfo, }, } } @@ -5352,8 +7301,6 @@ func (cmd *InfoCmd) readReply(rd *proto.Reader) error { section := "" scanner := bufio.NewScanner(strings.NewReader(val)) - moduleRe := regexp.MustCompile(`module:name=(.+?),(.+)$`) - for scanner.Scan() { line := scanner.Text() if strings.HasPrefix(line, "#") { @@ -5364,6 +7311,7 @@ func (cmd *InfoCmd) readReply(rd *proto.Reader) error { cmd.val[section] = make(map[string]string) } else if line != "" { if section == "Modules" { + moduleRe := regexp.MustCompile(`module:name=(.+?),(.+)$`) kv := moduleRe.FindStringSubmatch(line) if len(kv) == 3 { cmd.val[section][kv[1]] = kv[2] @@ -5390,6 +7338,25 @@ func (cmd *InfoCmd) Item(section, key string) string { } } +func (cmd *InfoCmd) Clone() Cmder { + var val map[string]map[string]string + if cmd.val != nil { + val = make(map[string]map[string]string, len(cmd.val)) + for section, sectionMap := range cmd.val { + if sectionMap != nil { + val[section] = make(map[string]string, len(sectionMap)) + for k, v := range sectionMap { + val[section][k] = v + } + } + } + } + return &InfoCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: val, + } +} + type MonitorStatus int const ( @@ -5408,8 +7375,9 @@ type MonitorCmd struct { func newMonitorCmd(ctx context.Context, ch chan string) *MonitorCmd { return &MonitorCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: []interface{}{"monitor"}, + ctx: ctx, + args: []interface{}{"monitor"}, + cmdType: CmdTypeMonitor, }, ch: ch, status: monitorStatusIdle, @@ -5444,9 +7412,12 @@ func (cmd *MonitorCmd) readMonitor(rd *proto.Reader, cancel context.CancelFunc) for { cmd.mu.Lock() st := cmd.status + pk, _ := rd.Peek(1) cmd.mu.Unlock() - if pk, _ := rd.Peek(1); len(pk) != 0 && st == monitorStatusStart { + if len(pk) != 0 && st == monitorStatusStart { + cmd.mu.Lock() line, err := rd.ReadString() + cmd.mu.Unlock() if err != nil { return err } @@ -5471,3 +7442,586 @@ func (cmd *MonitorCmd) Stop() { defer cmd.mu.Unlock() cmd.status = monitorStatusStop } + +type VectorScoreSliceCmd struct { + baseCmd + + val []VectorScore +} + +var _ Cmder = (*VectorScoreSliceCmd)(nil) + +func NewVectorInfoSliceCmd(ctx context.Context, args ...any) *VectorScoreSliceCmd { + return &VectorScoreSliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *VectorScoreSliceCmd) SetVal(val []VectorScore) { + cmd.val = val +} + +func (cmd *VectorScoreSliceCmd) Val() []VectorScore { + return cmd.val +} + +func (cmd *VectorScoreSliceCmd) Result() ([]VectorScore, error) { + return cmd.val, cmd.err +} + +func (cmd *VectorScoreSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *VectorScoreSliceCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadMapLen() + if err != nil { + return err + } + + cmd.val = make([]VectorScore, n) + for i := 0; i < n; i++ { + name, err := rd.ReadString() + if err != nil { + return err + } + cmd.val[i].Name = name + + score, err := rd.ReadFloat() + if err != nil { + return err + } + cmd.val[i].Score = score + } + + return nil +} + +func (cmd *VectorScoreSliceCmd) Clone() Cmder { + return &VectorScoreSliceCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: cmd.val, + } +} + +func (cmd *MonitorCmd) Clone() Cmder { + // MonitorCmd cannot be safely cloned due to channels and goroutines + // Return a new MonitorCmd with the same channel + return newMonitorCmd(cmd.ctx, cmd.ch) +} + +// ExtractCommandValue extracts the value from a command result using the fast enum-based approach +func ExtractCommandValue(cmd interface{}) (interface{}, error) { + // First try to get the command type using the interface + if cmdTypeGetter, ok := cmd.(CmdTypeGetter); ok { + cmdType := cmdTypeGetter.GetCmdType() + + // Use fast type-based extraction + switch cmdType { + case CmdTypeGeneric: + if genericCmd, ok := cmd.(interface { + Val() interface{} + Err() error + }); ok { + return genericCmd.Val(), genericCmd.Err() + } + case CmdTypeString: + if stringCmd, ok := cmd.(interface { + Val() string + Err() error + }); ok { + return stringCmd.Val(), stringCmd.Err() + } + case CmdTypeInt: + if intCmd, ok := cmd.(interface { + Val() int64 + Err() error + }); ok { + return intCmd.Val(), intCmd.Err() + } + case CmdTypeBool: + if boolCmd, ok := cmd.(interface { + Val() bool + Err() error + }); ok { + return boolCmd.Val(), boolCmd.Err() + } + case CmdTypeFloat: + if floatCmd, ok := cmd.(interface { + Val() float64 + Err() error + }); ok { + return floatCmd.Val(), floatCmd.Err() + } + case CmdTypeStatus: + if statusCmd, ok := cmd.(interface { + Val() string + Err() error + }); ok { + return statusCmd.Val(), statusCmd.Err() + } + case CmdTypeDuration: + if durationCmd, ok := cmd.(interface { + Val() time.Duration + Err() error + }); ok { + return durationCmd.Val(), durationCmd.Err() + } + case CmdTypeTime: + if timeCmd, ok := cmd.(interface { + Val() time.Time + Err() error + }); ok { + return timeCmd.Val(), timeCmd.Err() + } + case CmdTypeStringStructMap: + if structMapCmd, ok := cmd.(interface { + Val() map[string]struct{} + Err() error + }); ok { + return structMapCmd.Val(), structMapCmd.Err() + } + case CmdTypeXMessageSlice: + if xMessageSliceCmd, ok := cmd.(interface { + Val() []XMessage + Err() error + }); ok { + return xMessageSliceCmd.Val(), xMessageSliceCmd.Err() + } + case CmdTypeXStreamSlice: + if xStreamSliceCmd, ok := cmd.(interface { + Val() []XStream + Err() error + }); ok { + return xStreamSliceCmd.Val(), xStreamSliceCmd.Err() + } + case CmdTypeXPending: + if xPendingCmd, ok := cmd.(interface { + Val() *XPending + Err() error + }); ok { + return xPendingCmd.Val(), xPendingCmd.Err() + } + case CmdTypeXPendingExt: + if xPendingExtCmd, ok := cmd.(interface { + Val() []XPendingExt + Err() error + }); ok { + return xPendingExtCmd.Val(), xPendingExtCmd.Err() + } + case CmdTypeXAutoClaim: + if xAutoClaimCmd, ok := cmd.(interface { + Val() ([]XMessage, string) + Err() error + }); ok { + messages, start := xAutoClaimCmd.Val() + return CmdTypeXAutoClaimValue{messages: messages, start: start}, xAutoClaimCmd.Err() + } + case CmdTypeXAutoClaimJustID: + if xAutoClaimJustIDCmd, ok := cmd.(interface { + Val() ([]string, string) + Err() error + }); ok { + ids, start := xAutoClaimJustIDCmd.Val() + return CmdTypeXAutoClaimJustIDValue{ids: ids, start: start}, xAutoClaimJustIDCmd.Err() + } + case CmdTypeXInfoConsumers: + if xInfoConsumersCmd, ok := cmd.(interface { + Val() []XInfoConsumer + Err() error + }); ok { + return xInfoConsumersCmd.Val(), xInfoConsumersCmd.Err() + } + case CmdTypeXInfoGroups: + if xInfoGroupsCmd, ok := cmd.(interface { + Val() []XInfoGroup + Err() error + }); ok { + return xInfoGroupsCmd.Val(), xInfoGroupsCmd.Err() + } + case CmdTypeXInfoStream: + if xInfoStreamCmd, ok := cmd.(interface { + Val() *XInfoStream + Err() error + }); ok { + return xInfoStreamCmd.Val(), xInfoStreamCmd.Err() + } + case CmdTypeXInfoStreamFull: + if xInfoStreamFullCmd, ok := cmd.(interface { + Val() *XInfoStreamFull + Err() error + }); ok { + return xInfoStreamFullCmd.Val(), xInfoStreamFullCmd.Err() + } + case CmdTypeZSlice: + if zSliceCmd, ok := cmd.(interface { + Val() []Z + Err() error + }); ok { + return zSliceCmd.Val(), zSliceCmd.Err() + } + case CmdTypeZWithKey: + if zWithKeyCmd, ok := cmd.(interface { + Val() *ZWithKey + Err() error + }); ok { + return zWithKeyCmd.Val(), zWithKeyCmd.Err() + } + case CmdTypeScan: + if scanCmd, ok := cmd.(interface { + Val() ([]string, uint64) + Err() error + }); ok { + keys, cursor := scanCmd.Val() + return CmdTypeScanValue{keys: keys, cursor: cursor}, scanCmd.Err() + } + case CmdTypeClusterSlots: + if clusterSlotsCmd, ok := cmd.(interface { + Val() []ClusterSlot + Err() error + }); ok { + return clusterSlotsCmd.Val(), clusterSlotsCmd.Err() + } + case CmdTypeGeoLocation: + if geoLocationCmd, ok := cmd.(interface { + Val() []GeoLocation + Err() error + }); ok { + return geoLocationCmd.Val(), geoLocationCmd.Err() + } + case CmdTypeGeoSearchLocation: + if geoSearchLocationCmd, ok := cmd.(interface { + Val() []GeoLocation + Err() error + }); ok { + return geoSearchLocationCmd.Val(), geoSearchLocationCmd.Err() + } + case CmdTypeGeoPos: + if geoPosCmd, ok := cmd.(interface { + Val() []*GeoPos + Err() error + }); ok { + return geoPosCmd.Val(), geoPosCmd.Err() + } + case CmdTypeCommandsInfo: + if commandsInfoCmd, ok := cmd.(interface { + Val() map[string]*CommandInfo + Err() error + }); ok { + return commandsInfoCmd.Val(), commandsInfoCmd.Err() + } + case CmdTypeSlowLog: + if slowLogCmd, ok := cmd.(interface { + Val() []SlowLog + Err() error + }); ok { + return slowLogCmd.Val(), slowLogCmd.Err() + } + case CmdTypeHotKeys: + if hotKeysCmd, ok := cmd.(interface { + Val() *HotKeysResult + Err() error + }); ok { + return hotKeysCmd.Val(), hotKeysCmd.Err() + } + case CmdTypeKeyValues: + if keyValuesCmd, ok := cmd.(interface { + Val() (string, []string) + Err() error + }); ok { + key, values := keyValuesCmd.Val() + return CmdTypeKeyValuesValue{key: key, values: values}, keyValuesCmd.Err() + } + case CmdTypeZSliceWithKey: + if zSliceWithKeyCmd, ok := cmd.(interface { + Val() (string, []Z) + Err() error + }); ok { + key, zSlice := zSliceWithKeyCmd.Val() + return CmdTypeZSliceWithKeyValue{key: key, zSlice: zSlice}, zSliceWithKeyCmd.Err() + } + case CmdTypeFunctionList: + if functionListCmd, ok := cmd.(interface { + Val() []Library + Err() error + }); ok { + return functionListCmd.Val(), functionListCmd.Err() + } + case CmdTypeFunctionStats: + if functionStatsCmd, ok := cmd.(interface { + Val() FunctionStats + Err() error + }); ok { + return functionStatsCmd.Val(), functionStatsCmd.Err() + } + case CmdTypeLCS: + if lcsCmd, ok := cmd.(interface { + Val() *LCSMatch + Err() error + }); ok { + return lcsCmd.Val(), lcsCmd.Err() + } + case CmdTypeKeyFlags: + if keyFlagsCmd, ok := cmd.(interface { + Val() []KeyFlags + Err() error + }); ok { + return keyFlagsCmd.Val(), keyFlagsCmd.Err() + } + case CmdTypeClusterLinks: + if clusterLinksCmd, ok := cmd.(interface { + Val() []ClusterLink + Err() error + }); ok { + return clusterLinksCmd.Val(), clusterLinksCmd.Err() + } + case CmdTypeClusterShards: + if clusterShardsCmd, ok := cmd.(interface { + Val() []ClusterShard + Err() error + }); ok { + return clusterShardsCmd.Val(), clusterShardsCmd.Err() + } + case CmdTypeRankWithScore: + if rankWithScoreCmd, ok := cmd.(interface { + Val() RankScore + Err() error + }); ok { + return rankWithScoreCmd.Val(), rankWithScoreCmd.Err() + } + case CmdTypeClientInfo: + if clientInfoCmd, ok := cmd.(interface { + Val() *ClientInfo + Err() error + }); ok { + return clientInfoCmd.Val(), clientInfoCmd.Err() + } + case CmdTypeACLLog: + if aclLogCmd, ok := cmd.(interface { + Val() []*ACLLogEntry + Err() error + }); ok { + return aclLogCmd.Val(), aclLogCmd.Err() + } + case CmdTypeInfo: + if infoCmd, ok := cmd.(interface { + Val() string + Err() error + }); ok { + return infoCmd.Val(), infoCmd.Err() + } + case CmdTypeMonitor: + if monitorCmd, ok := cmd.(interface { + Val() string + Err() error + }); ok { + return monitorCmd.Val(), monitorCmd.Err() + } + case CmdTypeJSON: + if jsonCmd, ok := cmd.(interface { + Val() string + Err() error + }); ok { + return jsonCmd.Val(), jsonCmd.Err() + } + case CmdTypeJSONSlice: + if jsonSliceCmd, ok := cmd.(interface { + Val() []interface{} + Err() error + }); ok { + return jsonSliceCmd.Val(), jsonSliceCmd.Err() + } + case CmdTypeIntPointerSlice: + if intPointerSliceCmd, ok := cmd.(interface { + Val() []*int64 + Err() error + }); ok { + return intPointerSliceCmd.Val(), intPointerSliceCmd.Err() + } + case CmdTypeScanDump: + if scanDumpCmd, ok := cmd.(interface { + Val() ScanDump + Err() error + }); ok { + return scanDumpCmd.Val(), scanDumpCmd.Err() + } + case CmdTypeBFInfo: + if bfInfoCmd, ok := cmd.(interface { + Val() BFInfo + Err() error + }); ok { + return bfInfoCmd.Val(), bfInfoCmd.Err() + } + case CmdTypeCFInfo: + if cfInfoCmd, ok := cmd.(interface { + Val() CFInfo + Err() error + }); ok { + return cfInfoCmd.Val(), cfInfoCmd.Err() + } + case CmdTypeCMSInfo: + if cmsInfoCmd, ok := cmd.(interface { + Val() CMSInfo + Err() error + }); ok { + return cmsInfoCmd.Val(), cmsInfoCmd.Err() + } + case CmdTypeTopKInfo: + if topKInfoCmd, ok := cmd.(interface { + Val() TopKInfo + Err() error + }); ok { + return topKInfoCmd.Val(), topKInfoCmd.Err() + } + case CmdTypeTDigestInfo: + if tDigestInfoCmd, ok := cmd.(interface { + Val() TDigestInfo + Err() error + }); ok { + return tDigestInfoCmd.Val(), tDigestInfoCmd.Err() + } + case CmdTypeFTSearch: + if ftSearchCmd, ok := cmd.(interface { + Val() FTSearchResult + Err() error + }); ok { + return ftSearchCmd.Val(), ftSearchCmd.Err() + } + case CmdTypeFTInfo: + if ftInfoCmd, ok := cmd.(interface { + Val() FTInfoResult + Err() error + }); ok { + return ftInfoCmd.Val(), ftInfoCmd.Err() + } + case CmdTypeFTSpellCheck: + if ftSpellCheckCmd, ok := cmd.(interface { + Val() []SpellCheckResult + Err() error + }); ok { + return ftSpellCheckCmd.Val(), ftSpellCheckCmd.Err() + } + case CmdTypeFTSynDump: + if ftSynDumpCmd, ok := cmd.(interface { + Val() []FTSynDumpResult + Err() error + }); ok { + return ftSynDumpCmd.Val(), ftSynDumpCmd.Err() + } + case CmdTypeAggregate: + if aggregateCmd, ok := cmd.(interface { + Val() *FTAggregateResult + Err() error + }); ok { + return aggregateCmd.Val(), aggregateCmd.Err() + } + case CmdTypeTSTimestampValue: + if tsTimestampValueCmd, ok := cmd.(interface { + Val() TSTimestampValue + Err() error + }); ok { + return tsTimestampValueCmd.Val(), tsTimestampValueCmd.Err() + } + case CmdTypeTSTimestampValueSlice: + if tsTimestampValueSliceCmd, ok := cmd.(interface { + Val() []TSTimestampValue + Err() error + }); ok { + return tsTimestampValueSliceCmd.Val(), tsTimestampValueSliceCmd.Err() + } + case CmdTypeStringSlice: + if stringSliceCmd, ok := cmd.(interface { + Val() []string + Err() error + }); ok { + return stringSliceCmd.Val(), stringSliceCmd.Err() + } + case CmdTypeIntSlice: + if intSliceCmd, ok := cmd.(interface { + Val() []int64 + Err() error + }); ok { + return intSliceCmd.Val(), intSliceCmd.Err() + } + case CmdTypeBoolSlice: + if boolSliceCmd, ok := cmd.(interface { + Val() []bool + Err() error + }); ok { + return boolSliceCmd.Val(), boolSliceCmd.Err() + } + case CmdTypeFloatSlice: + if floatSliceCmd, ok := cmd.(interface { + Val() []float64 + Err() error + }); ok { + return floatSliceCmd.Val(), floatSliceCmd.Err() + } + case CmdTypeSlice: + if sliceCmd, ok := cmd.(interface { + Val() []interface{} + Err() error + }); ok { + return sliceCmd.Val(), sliceCmd.Err() + } + case CmdTypeKeyValueSlice: + if keyValueSliceCmd, ok := cmd.(interface { + Val() []KeyValue + Err() error + }); ok { + return keyValueSliceCmd.Val(), keyValueSliceCmd.Err() + } + case CmdTypeMapStringString: + if mapCmd, ok := cmd.(interface { + Val() map[string]string + Err() error + }); ok { + return mapCmd.Val(), mapCmd.Err() + } + case CmdTypeMapStringInt: + if mapCmd, ok := cmd.(interface { + Val() map[string]int64 + Err() error + }); ok { + return mapCmd.Val(), mapCmd.Err() + } + case CmdTypeMapStringInterfaceSlice: + if mapCmd, ok := cmd.(interface { + Val() []map[string]interface{} + Err() error + }); ok { + return mapCmd.Val(), mapCmd.Err() + } + case CmdTypeMapStringInterface: + if mapCmd, ok := cmd.(interface { + Val() map[string]interface{} + Err() error + }); ok { + return mapCmd.Val(), mapCmd.Err() + } + case CmdTypeMapStringStringSlice: + if mapCmd, ok := cmd.(interface { + Val() []map[string]string + Err() error + }); ok { + return mapCmd.Val(), mapCmd.Err() + } + case CmdTypeMapMapStringInterface: + if mapCmd, ok := cmd.(interface { + Val() map[string]interface{} + Err() error + }); ok { + return mapCmd.Val(), mapCmd.Err() + } + default: + // For unknown command types, return nil + return nil, nil + } + } + + // If we can't get the command type, return nil + return nil, nil +} diff --git a/vendor/github.com/redis/go-redis/v9/command_policy_resolver.go b/vendor/github.com/redis/go-redis/v9/command_policy_resolver.go new file mode 100644 index 00000000..da8c6d31 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/command_policy_resolver.go @@ -0,0 +1,209 @@ +package redis + +import ( + "context" + "strings" + + "github.com/redis/go-redis/v9/internal/routing" +) + +type ( + module = string + commandName = string +) + +var defaultPolicies = map[module]map[commandName]*routing.CommandPolicy{ + "ft": { + "create": { + Request: routing.ReqDefault, + Response: routing.RespDefaultKeyless, + }, + "search": { + Request: routing.ReqDefault, + Response: routing.RespDefaultKeyless, + Tips: map[string]string{ + routing.ReadOnlyCMD: "", + }, + }, + "aggregate": { + Request: routing.ReqDefault, + Response: routing.RespDefaultKeyless, + Tips: map[string]string{ + routing.ReadOnlyCMD: "", + }, + }, + "dictadd": { + Request: routing.ReqDefault, + Response: routing.RespDefaultKeyless, + }, + "dictdump": { + Request: routing.ReqDefault, + Response: routing.RespDefaultKeyless, + Tips: map[string]string{ + routing.ReadOnlyCMD: "", + }, + }, + "dictdel": { + Request: routing.ReqDefault, + Response: routing.RespDefaultKeyless, + }, + "suglen": { + Request: routing.ReqDefault, + Response: routing.RespDefaultHashSlot, + Tips: map[string]string{ + routing.ReadOnlyCMD: "", + }, + }, + "cursor": { + Request: routing.ReqSpecial, + Response: routing.RespDefaultKeyless, + Tips: map[string]string{ + routing.ReadOnlyCMD: "", + }, + }, + "sugadd": { + Request: routing.ReqDefault, + Response: routing.RespDefaultHashSlot, + }, + "sugget": { + Request: routing.ReqDefault, + Response: routing.RespDefaultHashSlot, + Tips: map[string]string{ + routing.ReadOnlyCMD: "", + }, + }, + "sugdel": { + Request: routing.ReqDefault, + Response: routing.RespDefaultHashSlot, + }, + "spellcheck": { + Request: routing.ReqDefault, + Response: routing.RespDefaultKeyless, + Tips: map[string]string{ + routing.ReadOnlyCMD: "", + }, + }, + "explain": { + Request: routing.ReqDefault, + Response: routing.RespDefaultKeyless, + Tips: map[string]string{ + routing.ReadOnlyCMD: "", + }, + }, + "explaincli": { + Request: routing.ReqDefault, + Response: routing.RespDefaultKeyless, + Tips: map[string]string{ + routing.ReadOnlyCMD: "", + }, + }, + "aliasadd": { + Request: routing.ReqDefault, + Response: routing.RespDefaultKeyless, + }, + "aliasupdate": { + Request: routing.ReqDefault, + Response: routing.RespDefaultKeyless, + }, + "aliasdel": { + Request: routing.ReqDefault, + Response: routing.RespDefaultKeyless, + }, + "info": { + Request: routing.ReqDefault, + Response: routing.RespDefaultKeyless, + Tips: map[string]string{ + routing.ReadOnlyCMD: "", + }, + }, + "tagvals": { + Request: routing.ReqDefault, + Response: routing.RespDefaultKeyless, + Tips: map[string]string{ + routing.ReadOnlyCMD: "", + }, + }, + "syndump": { + Request: routing.ReqDefault, + Response: routing.RespDefaultKeyless, + Tips: map[string]string{ + routing.ReadOnlyCMD: "", + }, + }, + "synupdate": { + Request: routing.ReqDefault, + Response: routing.RespDefaultKeyless, + }, + "profile": { + Request: routing.ReqDefault, + Response: routing.RespDefaultKeyless, + Tips: map[string]string{ + routing.ReadOnlyCMD: "", + }, + }, + "alter": { + Request: routing.ReqDefault, + Response: routing.RespDefaultKeyless, + }, + "dropindex": { + Request: routing.ReqDefault, + Response: routing.RespDefaultKeyless, + }, + "drop": { + Request: routing.ReqDefault, + Response: routing.RespDefaultKeyless, + }, + }, +} + +type CommandInfoResolveFunc func(ctx context.Context, cmd Cmder) *routing.CommandPolicy + +type commandInfoResolver struct { + resolveFunc CommandInfoResolveFunc + fallBackResolver *commandInfoResolver +} + +func NewCommandInfoResolver(resolveFunc CommandInfoResolveFunc) *commandInfoResolver { + return &commandInfoResolver{ + resolveFunc: resolveFunc, + } +} + +func NewDefaultCommandPolicyResolver() *commandInfoResolver { + return NewCommandInfoResolver(func(ctx context.Context, cmd Cmder) *routing.CommandPolicy { + module := "core" + command := cmd.Name() + cmdParts := strings.Split(command, ".") + if len(cmdParts) == 2 { + module = cmdParts[0] + command = cmdParts[1] + } + + if policy, ok := defaultPolicies[module][command]; ok { + return policy + } + + return nil + }) +} + +func (r *commandInfoResolver) GetCommandPolicy(ctx context.Context, cmd Cmder) *routing.CommandPolicy { + if r.resolveFunc == nil { + return nil + } + + policy := r.resolveFunc(ctx, cmd) + if policy != nil { + return policy + } + + if r.fallBackResolver != nil { + return r.fallBackResolver.GetCommandPolicy(ctx, cmd) + } + + return nil +} + +func (r *commandInfoResolver) SetFallbackResolver(fallbackResolver *commandInfoResolver) { + r.fallBackResolver = fallbackResolver +} diff --git a/vendor/github.com/redis/go-redis/v9/commands.go b/vendor/github.com/redis/go-redis/v9/commands.go index 546ebafb..219fe464 100644 --- a/vendor/github.com/redis/go-redis/v9/commands.go +++ b/vendor/github.com/redis/go-redis/v9/commands.go @@ -55,6 +55,11 @@ func appendArgs(dst, src []interface{}) []interface{} { return appendArg(dst, src[0]) } + if cap(dst) < len(dst)+len(src) { + newDst := make([]interface{}, len(dst), len(dst)+len(src)) + copy(newDst, dst) + dst = newDst + } dst = append(dst, src...) return dst } @@ -81,6 +86,8 @@ func appendArg(dst []interface{}, arg interface{}) []interface{} { return dst case time.Time, time.Duration, encoding.BinaryMarshaler, net.IP: return append(dst, arg) + case nil: + return dst default: // scan struct field v := reflect.ValueOf(arg) @@ -153,6 +160,12 @@ func isEmptyValue(v reflect.Value) bool { return v.Float() == 0 case reflect.Interface, reflect.Pointer: return v.IsNil() + case reflect.Struct: + if v.Type() == reflect.TypeOf(time.Time{}) { + return v.IsZero() + } + // Only supports the struct time.Time, + // subsequent iterations will follow the func Scan support decoder. } return false } @@ -185,6 +198,7 @@ type Cmdable interface { ClientID(ctx context.Context) *IntCmd ClientUnblock(ctx context.Context, id int64) *IntCmd ClientUnblockWithError(ctx context.Context, id int64) *IntCmd + ClientMaintNotifications(ctx context.Context, enabled bool, endpointType string) *StatusCmd ConfigGet(ctx context.Context, parameter string) *MapStringStringCmd ConfigResetStat(ctx context.Context) *StatusCmd ConfigSet(ctx context.Context, parameter, value string) *StatusCmd @@ -202,16 +216,19 @@ type Cmdable interface { ShutdownNoSave(ctx context.Context) *StatusCmd SlaveOf(ctx context.Context, host, port string) *StatusCmd SlowLogGet(ctx context.Context, num int64) *SlowLogCmd + SlowLogLen(ctx context.Context) *IntCmd + SlowLogReset(ctx context.Context) *StatusCmd Time(ctx context.Context) *TimeCmd DebugObject(ctx context.Context, key string) *StringCmd MemoryUsage(ctx context.Context, key string, samples ...int) *IntCmd + Latency(ctx context.Context) *LatencyCmd + LatencyReset(ctx context.Context, events ...interface{}) *StatusCmd ModuleLoadex(ctx context.Context, conf *ModuleLoadexConfig) *StringCmd ACLCmdable BitMapCmdable ClusterCmdable - GearsCmdable GenericCmdable GeoCmdable HashCmdable @@ -220,12 +237,14 @@ type Cmdable interface { ProbabilisticCmdable PubSubCmdable ScriptingFunctionsCmdable + SearchCmdable SetCmdable SortedSetCmdable StringCmdable StreamCmdable TimeseriesCmdable JSONCmdable + VectorSetCmdable } type StatefulCmdable interface { @@ -244,6 +263,7 @@ var ( _ Cmdable = (*Tx)(nil) _ Cmdable = (*Ring)(nil) _ Cmdable = (*ClusterClient)(nil) + _ Cmdable = (*Pipeline)(nil) ) type cmdable func(ctx context.Context, cmd Cmder) error @@ -309,7 +329,7 @@ func (c statefulCmdable) ClientSetInfo(ctx context.Context, info LibraryInfo) *S var cmd *StatusCmd if info.LibName != nil { - libName := fmt.Sprintf("go-redis(%s,%s)", *info.LibName, runtime.Version()) + libName := fmt.Sprintf("go-redis(%s,%s)", *info.LibName, internal.ReplaceSpaces(runtime.Version())) cmd = NewStatusCmd(ctx, "client", "setinfo", "LIB-NAME", libName) } else { cmd = NewStatusCmd(ctx, "client", "setinfo", "LIB-VER", *info.LibVer) @@ -330,7 +350,7 @@ func (info LibraryInfo) Validate() error { return nil } -// Hello Set the resp protocol used. +// Hello sets the resp protocol used. func (c statefulCmdable) Hello(ctx context.Context, ver int, username, password, clientName string, ) *MapStringInterfaceCmd { @@ -422,6 +442,15 @@ func (c cmdable) Ping(ctx context.Context) *StatusCmd { return cmd } +func (c cmdable) Do(ctx context.Context, args ...interface{}) *Cmd { + cmd := NewCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// Quit closes the connection. +// +// Deprecated: Just close the connection instead as of Redis 7.2.0. func (c cmdable) Quit(_ context.Context) *StatusCmd { panic("not implemented") } @@ -503,6 +532,23 @@ func (c cmdable) ClientInfo(ctx context.Context) *ClientInfoCmd { return cmd } +// ClientMaintNotifications enables or disables maintenance notifications for maintenance upgrades. +// When enabled, the client will receive push notifications about Redis maintenance events. +func (c cmdable) ClientMaintNotifications(ctx context.Context, enabled bool, endpointType string) *StatusCmd { + args := []interface{}{"client", "maint_notifications"} + if enabled { + if endpointType == "" { + endpointType = "none" + } + args = append(args, "on", "moving-endpoint-type", endpointType) + } else { + args = append(args, "off") + } + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + // ------------------------------------------------------------------------------------------------ func (c cmdable) ConfigGet(ctx context.Context, parameter string) *MapStringStringCmd { @@ -627,6 +673,9 @@ func (c cmdable) ShutdownNoSave(ctx context.Context) *StatusCmd { return c.shutdown(ctx, "nosave") } +// SlaveOf sets a Redis server as a replica of another, or promotes it to being a master. +// +// Deprecated: Use ReplicaOf instead as of Redis 5.0.0. func (c cmdable) SlaveOf(ctx context.Context, host, port string) *StatusCmd { cmd := NewStatusCmd(ctx, "slaveof", host, port) _ = c(ctx, cmd) @@ -639,6 +688,34 @@ func (c cmdable) SlowLogGet(ctx context.Context, num int64) *SlowLogCmd { return cmd } +func (c cmdable) SlowLogLen(ctx context.Context) *IntCmd { + cmd := NewIntCmd(ctx, "slowlog", "len") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SlowLogReset(ctx context.Context) *StatusCmd { + cmd := NewStatusCmd(ctx, "slowlog", "reset") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Latency(ctx context.Context) *LatencyCmd { + cmd := NewLatencyCmd(ctx, "latency", "latest") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LatencyReset(ctx context.Context, events ...interface{}) *StatusCmd { + args := make([]interface{}, 2+len(events)) + args[0] = "latency" + args[1] = "reset" + copy(args[2:], events) + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + func (c cmdable) Sync(_ context.Context) { panic("not implemented") } @@ -659,7 +736,9 @@ func (c cmdable) MemoryUsage(ctx context.Context, key string, samples ...int) *I args := []interface{}{"memory", "usage", key} if len(samples) > 0 { if len(samples) != 1 { - panic("MemoryUsage expects single sample count") + cmd := NewIntCmd(ctx) + cmd.SetErr(errors.New("MemoryUsage expects single sample count")) + return cmd } args = append(args, "SAMPLES", samples[0]) } diff --git a/vendor/github.com/redis/go-redis/v9/docker-compose.yml b/vendor/github.com/redis/go-redis/v9/docker-compose.yml new file mode 100644 index 00000000..8299fd9d --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/docker-compose.yml @@ -0,0 +1,176 @@ +--- + +x-default-image: &default-image ${CLIENT_LIBS_TEST_IMAGE:-redislabs/client-libs-test:8.6.0} + +services: + redis: + image: *default-image + platform: linux/amd64 + container_name: redis-standalone + environment: + - TLS_ENABLED=yes + - TLS_CLIENT_CNS=testcertuser + - TLS_AUTH_CLIENTS_USER=CN + - REDIS_CLUSTER=no + - PORT=6379 + - TLS_PORT=6666 + command: ${REDIS_EXTRA_ARGS:---enable-debug-command yes --enable-module-command yes --tls-auth-clients optional --save ""} + ports: + - 6379:6379 + - 6666:6666 # TLS port + volumes: + - "./dockers/standalone:/redis/work" + profiles: + - standalone + - sentinel + - all-stack + - all + - e2e + + osscluster: + image: *default-image + platform: linux/amd64 + container_name: redis-osscluster + environment: + - NODES=6 + - PORT=16600 + command: "--cluster-enabled yes" + ports: + - "16600-16605:16600-16605" + volumes: + - "./dockers/osscluster:/redis/work" + profiles: + - cluster + - all-stack + - all + + cae-resp-proxy: + image: redislabs/client-resp-proxy:latest + container_name: cae-resp-proxy + environment: + - TARGET_HOST=redis + - TARGET_PORT=6379 + - LISTEN_PORT=17000,17001,17002,17003 # 4 proxy nodes: initially show 3, swap in 4th during SMIGRATED + - LISTEN_HOST=0.0.0.0 + - API_PORT=3000 + - DEFAULT_INTERCEPTORS=cluster,hitless + ports: + - "17000:17000" # Proxy node 1 (host:container) + - "17001:17001" # Proxy node 2 (host:container) + - "17002:17002" # Proxy node 3 (host:container) + - "17003:17003" # Proxy node 4 (host:container) - hidden initially, swapped in during SMIGRATED + - "18100:3000" # HTTP API port (host:container) + depends_on: + - redis + profiles: + - e2e + - all + + proxy-fault-injector: + build: + context: . + dockerfile: maintnotifications/e2e/cmd/proxy-fi-server/Dockerfile + container_name: proxy-fault-injector + ports: + - "15000:5000" # Fault injector API port (host:container) + depends_on: + - cae-resp-proxy + environment: + - PROXY_API_URL=http://cae-resp-proxy:3000 + profiles: + - e2e + - all + + osscluster-tls: + image: *default-image + platform: linux/amd64 + container_name: redis-osscluster-tls + environment: + - NODES=6 + - PORT=6430 + - TLS_PORT=5430 + - TLS_ENABLED=yes + - TLS_CLIENT_CNS=testcertuser + - TLS_AUTH_CLIENTS_USER=CN + - REDIS_CLUSTER=yes + - REPLICAS=1 + command: "--tls-auth-clients optional --cluster-announce-ip 127.0.0.1" + ports: + - "6430-6435:6430-6435" # Regular ports + - "5430-5435:5430-5435" # TLS ports (set via TLS_PORT env var) + - "16430-16435:16430-16435" # Cluster bus ports (PORT + 10000) + volumes: + - "./dockers/osscluster-tls:/redis/work" + profiles: + - cluster-tls + - all + + sentinel-cluster: + image: *default-image + platform: linux/amd64 + container_name: redis-sentinel-cluster + network_mode: "host" + environment: + - NODES=3 + - TLS_ENABLED=yes + - TLS_CLIENT_CNS=testcertuser + - TLS_AUTH_CLIENTS_USER=CN + - REDIS_CLUSTER=no + - PORT=9121 + command: ${REDIS_EXTRA_ARGS:---enable-debug-command yes --enable-module-command yes --tls-auth-clients optional --save ""} + #ports: + # - "9121-9123:9121-9123" + volumes: + - "./dockers/sentinel-cluster:/redis/work" + profiles: + - sentinel + - all-stack + - all + + sentinel: + image: *default-image + platform: linux/amd64 + container_name: redis-sentinel + depends_on: + - sentinel-cluster + environment: + - NODES=3 + - REDIS_CLUSTER=no + - PORT=26379 + command: ${REDIS_EXTRA_ARGS:---sentinel} + network_mode: "host" + #ports: + # - 26379:26379 + # - 26380:26380 + # - 26381:26381 + volumes: + - "./dockers/sentinel.conf:/redis/config-default/redis.conf" + - "./dockers/sentinel:/redis/work" + profiles: + - sentinel + - all-stack + - all + + ring-cluster: + image: *default-image + platform: linux/amd64 + container_name: redis-ring-cluster + environment: + - NODES=3 + - TLS_ENABLED=yes + - TLS_CLIENT_CNS=testcertuser + - TLS_AUTH_CLIENTS_USER=CN + - REDIS_CLUSTER=no + - PORT=6390 + command: ${REDIS_EXTRA_ARGS:---enable-debug-command yes --enable-module-command yes --tls-auth-clients optional --save ""} + ports: + - 6390:6390 + - 6391:6391 + - 6392:6392 + volumes: + - "./dockers/ring:/redis/work" + profiles: + - ring + - cluster + - all-stack + - all diff --git a/vendor/github.com/redis/go-redis/v9/error.go b/vendor/github.com/redis/go-redis/v9/error.go index 8a59913b..d2462a49 100644 --- a/vendor/github.com/redis/go-redis/v9/error.go +++ b/vendor/github.com/redis/go-redis/v9/error.go @@ -7,6 +7,7 @@ import ( "net" "strings" + "github.com/redis/go-redis/v9/internal" "github.com/redis/go-redis/v9/internal/pool" "github.com/redis/go-redis/v9/internal/proto" ) @@ -14,6 +15,19 @@ import ( // ErrClosed performs any operation on the closed client will return this error. var ErrClosed = pool.ErrClosed +// ErrPoolExhausted is returned from a pool connection method +// when the maximum number of database connections in the pool has been reached. +var ErrPoolExhausted = pool.ErrPoolExhausted + +// ErrPoolTimeout timed out waiting to get a connection from the connection pool. +var ErrPoolTimeout = pool.ErrPoolTimeout + +// ErrCrossSlot is returned when keys are used in the same Redis command and +// the keys are not in the same hash slot. This error is returned by Redis +// Cluster and will be returned by the client when TxPipeline or TxPipelined +// is used on a ClusterClient with keys in different slots. +var ErrCrossSlot = proto.RedisError("CROSSSLOT Keys in request don't hash to the same slot") + // HasErrorPrefix checks if the err is a Redis error and the message contains a prefix. func HasErrorPrefix(err error, prefix string) bool { var rErr Error @@ -37,23 +51,86 @@ type Error interface { var _ Error = proto.RedisError("") +func isContextError(err error) bool { + // Check for wrapped context errors using errors.Is + return errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) +} + +// isTimeoutError checks if an error is a timeout error, even if wrapped. +// Returns (isTimeout, shouldRetryOnTimeout) where: +// - isTimeout: true if the error is any kind of timeout error +// - shouldRetryOnTimeout: true if Timeout() method returns true +func isTimeoutError(err error) (isTimeout bool, hasTimeoutFlag bool) { + // Check for timeoutError interface (works with wrapped errors) + var te timeoutError + if errors.As(err, &te) { + return true, te.Timeout() + } + + // Check for net.Error specifically (common case for network timeouts) + var netErr net.Error + if errors.As(err, &netErr) { + return true, netErr.Timeout() + } + + return false, false +} + func shouldRetry(err error, retryTimeout bool) bool { - switch err { - case io.EOF, io.ErrUnexpectedEOF: - return true - case nil, context.Canceled, context.DeadlineExceeded: + if err == nil { return false } - if v, ok := err.(timeoutError); ok { - if v.Timeout() { + // Check for EOF errors (works with wrapped errors) + if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) { + return true + } + + // Check for context errors (works with wrapped errors) + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + return false + } + + // Check for pool timeout (works with wrapped errors) + if errors.Is(err, pool.ErrPoolTimeout) { + // connection pool timeout, increase retries. #3289 + return true + } + + // Check for timeout errors (works with wrapped errors) + if isTimeout, hasTimeoutFlag := isTimeoutError(err); isTimeout { + if hasTimeoutFlag { return retryTimeout } return true } + // Check for typed Redis errors using errors.As (works with wrapped errors) + if proto.IsMaxClientsError(err) { + return true + } + if proto.IsLoadingError(err) { + return true + } + if proto.IsReadOnlyError(err) { + return true + } + if proto.IsMasterDownError(err) { + return true + } + if proto.IsClusterDownError(err) { + return true + } + if proto.IsTryAgainError(err) { + return true + } + if proto.IsNoReplicasError(err) { + return true + } + + // Fallback to string checking for backward compatibility with plain errors s := err.Error() - if s == "ERR max number of clients reached" { + if strings.HasPrefix(s, "ERR max number of clients reached") { return true } if strings.HasPrefix(s, "LOADING ") { @@ -68,20 +145,39 @@ func shouldRetry(err error, retryTimeout bool) bool { if strings.HasPrefix(s, "TRYAGAIN ") { return true } + if strings.HasPrefix(s, "MASTERDOWN ") { + return true + } + if strings.HasPrefix(s, "NOREPLICAS ") { + return true + } return false } func isRedisError(err error) bool { - _, ok := err.(proto.RedisError) - return ok + // Check if error implements the Error interface (works with wrapped errors) + var redisErr Error + if errors.As(err, &redisErr) { + return true + } + // Also check for proto.RedisError specifically + var protoRedisErr proto.RedisError + return errors.As(err, &protoRedisErr) } func isBadConn(err error, allowTimeout bool, addr string) bool { - switch err { - case nil: + if err == nil { return false - case context.Canceled, context.DeadlineExceeded: + } + + // Check for context errors (works with wrapped errors) + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + return true + } + + // Check for pool timeout errors (works with wrapped errors) + if errors.Is(err, pool.ErrConnUnusableTimeout) { return true } @@ -102,7 +198,9 @@ func isBadConn(err error, allowTimeout bool, addr string) bool { } if allowTimeout { - if netErr, ok := err.(net.Error); ok && netErr.Timeout() { + // Check for network timeout errors (works with wrapped errors) + var netErr net.Error + if errors.As(err, &netErr) && netErr.Timeout() { return false } } @@ -111,42 +209,151 @@ func isBadConn(err error, allowTimeout bool, addr string) bool { } func isMovedError(err error) (moved bool, ask bool, addr string) { - if !isRedisError(err) { - return + // Check for typed MovedError + if movedErr, ok := proto.IsMovedError(err); ok { + addr = movedErr.Addr() + addr = internal.GetAddr(addr) + return true, false, addr } + // Check for typed AskError + if askErr, ok := proto.IsAskError(err); ok { + addr = askErr.Addr() + addr = internal.GetAddr(addr) + return false, true, addr + } + + // Fallback to string checking for backward compatibility s := err.Error() - switch { - case strings.HasPrefix(s, "MOVED "): - moved = true - case strings.HasPrefix(s, "ASK "): - ask = true - default: - return + if strings.HasPrefix(s, "MOVED ") { + // Parse: MOVED 3999 127.0.0.1:6381 + parts := strings.Split(s, " ") + if len(parts) == 3 { + addr = internal.GetAddr(parts[2]) + return true, false, addr + } + } + if strings.HasPrefix(s, "ASK ") { + // Parse: ASK 3999 127.0.0.1:6381 + parts := strings.Split(s, " ") + if len(parts) == 3 { + addr = internal.GetAddr(parts[2]) + return false, true, addr + } } - ind := strings.LastIndex(s, " ") - if ind == -1 { - return false, false, "" - } - addr = s[ind+1:] - return + return false, false, "" } func isLoadingError(err error) bool { - return strings.HasPrefix(err.Error(), "LOADING ") + return proto.IsLoadingError(err) } func isReadOnlyError(err error) bool { - return strings.HasPrefix(err.Error(), "READONLY ") + return proto.IsReadOnlyError(err) } func isMovedSameConnAddr(err error, addr string) bool { - redisError := err.Error() - if !strings.HasPrefix(redisError, "MOVED ") { - return false + if movedErr, ok := proto.IsMovedError(err); ok { + return strings.HasSuffix(movedErr.Addr(), addr) } - return strings.HasSuffix(redisError, " "+addr) + return false +} + +//------------------------------------------------------------------------------ + +// Typed error checking functions for public use. +// These functions work correctly even when errors are wrapped in hooks. + +// IsLoadingError checks if an error is a Redis LOADING error, even if wrapped. +// LOADING errors occur when Redis is loading the dataset in memory. +func IsLoadingError(err error) bool { + return proto.IsLoadingError(err) +} + +// IsReadOnlyError checks if an error is a Redis READONLY error, even if wrapped. +// READONLY errors occur when trying to write to a read-only replica. +func IsReadOnlyError(err error) bool { + return proto.IsReadOnlyError(err) +} + +// IsClusterDownError checks if an error is a Redis CLUSTERDOWN error, even if wrapped. +// CLUSTERDOWN errors occur when the cluster is down. +func IsClusterDownError(err error) bool { + return proto.IsClusterDownError(err) +} + +// IsTryAgainError checks if an error is a Redis TRYAGAIN error, even if wrapped. +// TRYAGAIN errors occur when a command cannot be processed and should be retried. +func IsTryAgainError(err error) bool { + return proto.IsTryAgainError(err) +} + +// IsMasterDownError checks if an error is a Redis MASTERDOWN error, even if wrapped. +// MASTERDOWN errors occur when the master is down. +func IsMasterDownError(err error) bool { + return proto.IsMasterDownError(err) +} + +// IsMaxClientsError checks if an error is a Redis max clients error, even if wrapped. +// This error occurs when the maximum number of clients has been reached. +func IsMaxClientsError(err error) bool { + return proto.IsMaxClientsError(err) +} + +// IsMovedError checks if an error is a Redis MOVED error, even if wrapped. +// MOVED errors occur in cluster mode when a key has been moved to a different node. +// Returns the address of the node where the key has been moved and a boolean indicating if it's a MOVED error. +func IsMovedError(err error) (addr string, ok bool) { + if movedErr, isMovedErr := proto.IsMovedError(err); isMovedErr { + return movedErr.Addr(), true + } + return "", false +} + +// IsAskError checks if an error is a Redis ASK error, even if wrapped. +// ASK errors occur in cluster mode when a key is being migrated and the client should ask another node. +// Returns the address of the node to ask and a boolean indicating if it's an ASK error. +func IsAskError(err error) (addr string, ok bool) { + if askErr, isAskErr := proto.IsAskError(err); isAskErr { + return askErr.Addr(), true + } + return "", false +} + +// IsAuthError checks if an error is a Redis authentication error, even if wrapped. +// Authentication errors occur when: +// - NOAUTH: Redis requires authentication but none was provided +// - WRONGPASS: Redis authentication failed due to incorrect password +// - unauthenticated: Error returned when password changed +func IsAuthError(err error) bool { + return proto.IsAuthError(err) +} + +// IsPermissionError checks if an error is a Redis permission error, even if wrapped. +// Permission errors (NOPERM) occur when a user does not have permission to execute a command. +func IsPermissionError(err error) bool { + return proto.IsPermissionError(err) +} + +// IsExecAbortError checks if an error is a Redis EXECABORT error, even if wrapped. +// EXECABORT errors occur when a transaction is aborted. +func IsExecAbortError(err error) bool { + return proto.IsExecAbortError(err) +} + +// IsOOMError checks if an error is a Redis OOM (Out Of Memory) error, even if wrapped. +// OOM errors occur when Redis is out of memory. +func IsOOMError(err error) bool { + return proto.IsOOMError(err) +} + +// IsNoReplicasError checks if an error is a Redis NOREPLICAS error, even if wrapped. +// NOREPLICAS errors occur when not enough replicas acknowledge a write operation. +// This typically happens with WAIT/WAITAOF commands or CLUSTER SETSLOT with synchronous +// replication when the required number of replicas cannot confirm the write within the timeout. +func IsNoReplicasError(err error) bool { + return proto.IsNoReplicasError(err) } //------------------------------------------------------------------------------ diff --git a/vendor/github.com/redis/go-redis/v9/gears_commands.go b/vendor/github.com/redis/go-redis/v9/gears_commands.go deleted file mode 100644 index e0d49a6b..00000000 --- a/vendor/github.com/redis/go-redis/v9/gears_commands.go +++ /dev/null @@ -1,149 +0,0 @@ -package redis - -import ( - "context" - "fmt" - "strings" -) - -type GearsCmdable interface { - TFunctionLoad(ctx context.Context, lib string) *StatusCmd - TFunctionLoadArgs(ctx context.Context, lib string, options *TFunctionLoadOptions) *StatusCmd - TFunctionDelete(ctx context.Context, libName string) *StatusCmd - TFunctionList(ctx context.Context) *MapStringInterfaceSliceCmd - TFunctionListArgs(ctx context.Context, options *TFunctionListOptions) *MapStringInterfaceSliceCmd - TFCall(ctx context.Context, libName string, funcName string, numKeys int) *Cmd - TFCallArgs(ctx context.Context, libName string, funcName string, numKeys int, options *TFCallOptions) *Cmd - TFCallASYNC(ctx context.Context, libName string, funcName string, numKeys int) *Cmd - TFCallASYNCArgs(ctx context.Context, libName string, funcName string, numKeys int, options *TFCallOptions) *Cmd -} - -type TFunctionLoadOptions struct { - Replace bool - Config string -} - -type TFunctionListOptions struct { - Withcode bool - Verbose int - Library string -} - -type TFCallOptions struct { - Keys []string - Arguments []string -} - -// TFunctionLoad - load a new JavaScript library into Redis. -// For more information - https://redis.io/commands/tfunction-load/ -func (c cmdable) TFunctionLoad(ctx context.Context, lib string) *StatusCmd { - args := []interface{}{"TFUNCTION", "LOAD", lib} - cmd := NewStatusCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) TFunctionLoadArgs(ctx context.Context, lib string, options *TFunctionLoadOptions) *StatusCmd { - args := []interface{}{"TFUNCTION", "LOAD"} - if options != nil { - if options.Replace { - args = append(args, "REPLACE") - } - if options.Config != "" { - args = append(args, "CONFIG", options.Config) - } - } - args = append(args, lib) - cmd := NewStatusCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -// TFunctionDelete - delete a JavaScript library from Redis. -// For more information - https://redis.io/commands/tfunction-delete/ -func (c cmdable) TFunctionDelete(ctx context.Context, libName string) *StatusCmd { - args := []interface{}{"TFUNCTION", "DELETE", libName} - cmd := NewStatusCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -// TFunctionList - list the functions with additional information about each function. -// For more information - https://redis.io/commands/tfunction-list/ -func (c cmdable) TFunctionList(ctx context.Context) *MapStringInterfaceSliceCmd { - args := []interface{}{"TFUNCTION", "LIST"} - cmd := NewMapStringInterfaceSliceCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) TFunctionListArgs(ctx context.Context, options *TFunctionListOptions) *MapStringInterfaceSliceCmd { - args := []interface{}{"TFUNCTION", "LIST"} - if options != nil { - if options.Withcode { - args = append(args, "WITHCODE") - } - if options.Verbose != 0 { - v := strings.Repeat("v", options.Verbose) - args = append(args, v) - } - if options.Library != "" { - args = append(args, "LIBRARY", options.Library) - } - } - cmd := NewMapStringInterfaceSliceCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -// TFCall - invoke a function. -// For more information - https://redis.io/commands/tfcall/ -func (c cmdable) TFCall(ctx context.Context, libName string, funcName string, numKeys int) *Cmd { - lf := libName + "." + funcName - args := []interface{}{"TFCALL", lf, numKeys} - cmd := NewCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) TFCallArgs(ctx context.Context, libName string, funcName string, numKeys int, options *TFCallOptions) *Cmd { - lf := libName + "." + funcName - args := []interface{}{"TFCALL", lf, numKeys} - if options != nil { - for _, key := range options.Keys { - args = append(args, key) - } - for _, key := range options.Arguments { - args = append(args, key) - } - } - cmd := NewCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -// TFCallASYNC - invoke an asynchronous JavaScript function (coroutine). -// For more information - https://redis.io/commands/TFCallASYNC/ -func (c cmdable) TFCallASYNC(ctx context.Context, libName string, funcName string, numKeys int) *Cmd { - lf := fmt.Sprintf("%s.%s", libName, funcName) - args := []interface{}{"TFCALLASYNC", lf, numKeys} - cmd := NewCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) TFCallASYNCArgs(ctx context.Context, libName string, funcName string, numKeys int, options *TFCallOptions) *Cmd { - lf := fmt.Sprintf("%s.%s", libName, funcName) - args := []interface{}{"TFCALLASYNC", lf, numKeys} - if options != nil { - for _, key := range options.Keys { - args = append(args, key) - } - for _, key := range options.Arguments { - args = append(args, key) - } - } - cmd := NewCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} diff --git a/vendor/github.com/redis/go-redis/v9/generic_commands.go b/vendor/github.com/redis/go-redis/v9/generic_commands.go index bf1fb47d..c7100222 100644 --- a/vendor/github.com/redis/go-redis/v9/generic_commands.go +++ b/vendor/github.com/redis/go-redis/v9/generic_commands.go @@ -3,6 +3,8 @@ package redis import ( "context" "time" + + "github.com/redis/go-redis/v9/internal/hashtag" ) type GenericCmdable interface { @@ -19,6 +21,7 @@ type GenericCmdable interface { Keys(ctx context.Context, pattern string) *StringSliceCmd Migrate(ctx context.Context, host, port, key string, db int, timeout time.Duration) *StatusCmd Move(ctx context.Context, key string, db int) *BoolCmd + ObjectFreq(ctx context.Context, key string) *IntCmd ObjectRefCount(ctx context.Context, key string) *IntCmd ObjectEncoding(ctx context.Context, key string) *StringCmd ObjectIdleTime(ctx context.Context, key string) *DurationCmd @@ -159,6 +162,12 @@ func (c cmdable) Move(ctx context.Context, key string, db int) *BoolCmd { return cmd } +func (c cmdable) ObjectFreq(ctx context.Context, key string) *IntCmd { + cmd := NewIntCmd(ctx, "object", "freq", key) + _ = c(ctx, cmd) + return cmd +} + func (c cmdable) ObjectRefCount(ctx context.Context, key string) *IntCmd { cmd := NewIntCmd(ctx, "object", "refcount", key) _ = c(ctx, cmd) @@ -356,6 +365,9 @@ func (c cmdable) Scan(ctx context.Context, cursor uint64, match string, count in args = append(args, "count", count) } cmd := NewScanCmd(ctx, c, args...) + if hashtag.Present(match) { + cmd.SetFirstKeyPos(3) + } _ = c(ctx, cmd) return cmd } @@ -372,6 +384,9 @@ func (c cmdable) ScanType(ctx context.Context, cursor uint64, match string, coun args = append(args, "type", keyType) } cmd := NewScanCmd(ctx, c, args...) + if hashtag.Present(match) { + cmd.SetFirstKeyPos(3) + } _ = c(ctx, cmd) return cmd } diff --git a/vendor/github.com/redis/go-redis/v9/geo_commands.go b/vendor/github.com/redis/go-redis/v9/geo_commands.go index f047b98a..0f274289 100644 --- a/vendor/github.com/redis/go-redis/v9/geo_commands.go +++ b/vendor/github.com/redis/go-redis/v9/geo_commands.go @@ -33,7 +33,10 @@ func (c cmdable) GeoAdd(ctx context.Context, key string, geoLocation ...*GeoLoca return cmd } -// GeoRadius is a read-only GEORADIUS_RO command. +// GeoRadius queries a geospatial index for members within a distance from a coordinate. +// This is a read-only variant that does not support Store or StoreDist options. +// +// Deprecated: Use GeoSearch with BYRADIUS argument instead as of Redis 6.2.0. func (c cmdable) GeoRadius( ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery, ) *GeoLocationCmd { @@ -60,7 +63,10 @@ func (c cmdable) GeoRadiusStore( return cmd } -// GeoRadiusByMember is a read-only GEORADIUSBYMEMBER_RO command. +// GeoRadiusByMember queries a geospatial index for members within a distance from a member. +// This is a read-only variant that does not support Store or StoreDist options. +// +// Deprecated: Use GeoSearch with BYRADIUS and FROMMEMBER arguments instead as of Redis 6.2.0. func (c cmdable) GeoRadiusByMember( ctx context.Context, key, member string, query *GeoRadiusQuery, ) *GeoLocationCmd { diff --git a/vendor/github.com/redis/go-redis/v9/hash_commands.go b/vendor/github.com/redis/go-redis/v9/hash_commands.go index 2c62a75a..b78860a5 100644 --- a/vendor/github.com/redis/go-redis/v9/hash_commands.go +++ b/vendor/github.com/redis/go-redis/v9/hash_commands.go @@ -1,12 +1,20 @@ package redis -import "context" +import ( + "context" + "time" + + "github.com/redis/go-redis/v9/internal/hashtag" +) type HashCmdable interface { HDel(ctx context.Context, key string, fields ...string) *IntCmd HExists(ctx context.Context, key, field string) *BoolCmd HGet(ctx context.Context, key, field string) *StringCmd HGetAll(ctx context.Context, key string) *MapStringStringCmd + HGetDel(ctx context.Context, key string, fields ...string) *StringSliceCmd + HGetEX(ctx context.Context, key string, fields ...string) *StringSliceCmd + HGetEXWithArgs(ctx context.Context, key string, options *HGetEXOptions, fields ...string) *StringSliceCmd HIncrBy(ctx context.Context, key, field string, incr int64) *IntCmd HIncrByFloat(ctx context.Context, key, field string, incr float64) *FloatCmd HKeys(ctx context.Context, key string) *StringSliceCmd @@ -14,11 +22,28 @@ type HashCmdable interface { HMGet(ctx context.Context, key string, fields ...string) *SliceCmd HSet(ctx context.Context, key string, values ...interface{}) *IntCmd HMSet(ctx context.Context, key string, values ...interface{}) *BoolCmd + HSetEX(ctx context.Context, key string, fieldsAndValues ...string) *IntCmd + HSetEXWithArgs(ctx context.Context, key string, options *HSetEXOptions, fieldsAndValues ...string) *IntCmd HSetNX(ctx context.Context, key, field string, value interface{}) *BoolCmd HScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd + HScanNoValues(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd HVals(ctx context.Context, key string) *StringSliceCmd HRandField(ctx context.Context, key string, count int) *StringSliceCmd HRandFieldWithValues(ctx context.Context, key string, count int) *KeyValueSliceCmd + HStrLen(ctx context.Context, key, field string) *IntCmd + HExpire(ctx context.Context, key string, expiration time.Duration, fields ...string) *IntSliceCmd + HExpireWithArgs(ctx context.Context, key string, expiration time.Duration, expirationArgs HExpireArgs, fields ...string) *IntSliceCmd + HPExpire(ctx context.Context, key string, expiration time.Duration, fields ...string) *IntSliceCmd + HPExpireWithArgs(ctx context.Context, key string, expiration time.Duration, expirationArgs HExpireArgs, fields ...string) *IntSliceCmd + HExpireAt(ctx context.Context, key string, tm time.Time, fields ...string) *IntSliceCmd + HExpireAtWithArgs(ctx context.Context, key string, tm time.Time, expirationArgs HExpireArgs, fields ...string) *IntSliceCmd + HPExpireAt(ctx context.Context, key string, tm time.Time, fields ...string) *IntSliceCmd + HPExpireAtWithArgs(ctx context.Context, key string, tm time.Time, expirationArgs HExpireArgs, fields ...string) *IntSliceCmd + HPersist(ctx context.Context, key string, fields ...string) *IntSliceCmd + HExpireTime(ctx context.Context, key string, fields ...string) *IntSliceCmd + HPExpireTime(ctx context.Context, key string, fields ...string) *IntSliceCmd + HTTL(ctx context.Context, key string, fields ...string) *IntSliceCmd + HPTTL(ctx context.Context, key string, fields ...string) *IntSliceCmd } func (c cmdable) HDel(ctx context.Context, key string, fields ...string) *IntCmd { @@ -91,16 +116,16 @@ func (c cmdable) HMGet(ctx context.Context, key string, fields ...string) *Slice // HSet accepts values in following formats: // -// - HSet("myhash", "key1", "value1", "key2", "value2") +// - HSet(ctx, "myhash", "key1", "value1", "key2", "value2") // -// - HSet("myhash", []string{"key1", "value1", "key2", "value2"}) +// - HSet(ctx, "myhash", []string{"key1", "value1", "key2", "value2"}) // -// - HSet("myhash", map[string]interface{}{"key1": "value1", "key2": "value2"}) +// - HSet(ctx, "myhash", map[string]interface{}{"key1": "value1", "key2": "value2"}) // // Playing struct With "redis" tag. // type MyHash struct { Key1 string `redis:"key1"`; Key2 int `redis:"key2"` } // -// - HSet("myhash", MyHash{"value1", "value2"}) Warn: redis-server >= 4.0 +// - HSet(ctx, "myhash", MyHash{"value1", "value2"}) Warn: redis-server >= 4.0 // // For struct, can be a structure pointer type, we only parse the field whose tag is redis. // if you don't want the field to be read, you can use the `redis:"-"` flag to ignore it, @@ -169,6 +194,426 @@ func (c cmdable) HScan(ctx context.Context, key string, cursor uint64, match str args = append(args, "count", count) } cmd := NewScanCmd(ctx, c, args...) + if hashtag.Present(match) { + cmd.SetFirstKeyPos(4) + } + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HStrLen(ctx context.Context, key, field string) *IntCmd { + cmd := NewIntCmd(ctx, "hstrlen", key, field) + _ = c(ctx, cmd) + return cmd +} +func (c cmdable) HScanNoValues(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd { + args := []interface{}{"hscan", key, cursor} + if match != "" { + args = append(args, "match", match) + } + if count > 0 { + args = append(args, "count", count) + } + args = append(args, "novalues") + cmd := NewScanCmd(ctx, c, args...) + if hashtag.Present(match) { + cmd.SetFirstKeyPos(4) + } + _ = c(ctx, cmd) + return cmd +} + +type HExpireArgs struct { + NX bool + XX bool + GT bool + LT bool +} + +// HExpire - Sets the expiration time for specified fields in a hash in seconds. +// The command constructs an argument list starting with "HEXPIRE", followed by the key, duration, any conditional flags, and the specified fields. +// Available since Redis 7.4 CE. +// For more information refer to [HEXPIRE Documentation]. +// +// [HEXPIRE Documentation]: https://redis.io/commands/hexpire/ +func (c cmdable) HExpire(ctx context.Context, key string, expiration time.Duration, fields ...string) *IntSliceCmd { + args := []interface{}{"HEXPIRE", key, formatSec(ctx, expiration), "FIELDS", len(fields)} + + for _, field := range fields { + args = append(args, field) + } + cmd := NewIntSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// HExpireWithArgs - Sets the expiration time for specified fields in a hash in seconds. +// It requires a key, an expiration duration, a struct with boolean flags for conditional expiration settings (NX, XX, GT, LT), and a list of fields. +// The command constructs an argument list starting with "HEXPIRE", followed by the key, duration, any conditional flags, and the specified fields. +// Available since Redis 7.4 CE. +// For more information refer to [HEXPIRE Documentation]. +// +// [HEXPIRE Documentation]: https://redis.io/commands/hexpire/ +func (c cmdable) HExpireWithArgs(ctx context.Context, key string, expiration time.Duration, expirationArgs HExpireArgs, fields ...string) *IntSliceCmd { + args := []interface{}{"HEXPIRE", key, formatSec(ctx, expiration)} + + // only if one argument is true, we can add it to the args + // if more than one argument is true, it will cause an error + if expirationArgs.NX { + args = append(args, "NX") + } else if expirationArgs.XX { + args = append(args, "XX") + } else if expirationArgs.GT { + args = append(args, "GT") + } else if expirationArgs.LT { + args = append(args, "LT") + } + + args = append(args, "FIELDS", len(fields)) + + for _, field := range fields { + args = append(args, field) + } + cmd := NewIntSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// HPExpire - Sets the expiration time for specified fields in a hash in milliseconds. +// Similar to HExpire, it accepts a key, an expiration duration in milliseconds, a struct with expiration condition flags, and a list of fields. +// The command modifies the standard time.Duration to milliseconds for the Redis command. +// Available since Redis 7.4 CE. +// For more information refer to [HPEXPIRE Documentation]. +// +// [HPEXPIRE Documentation]: https://redis.io/commands/hpexpire/ +func (c cmdable) HPExpire(ctx context.Context, key string, expiration time.Duration, fields ...string) *IntSliceCmd { + args := []interface{}{"HPEXPIRE", key, formatMs(ctx, expiration), "FIELDS", len(fields)} + + for _, field := range fields { + args = append(args, field) + } + cmd := NewIntSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// HPExpireWithArgs - Sets the expiration time for specified fields in a hash in milliseconds. +// It requires a key, an expiration duration, a struct with boolean flags for conditional expiration settings (NX, XX, GT, LT), and a list of fields. +// The command constructs an argument list starting with "HPEXPIRE", followed by the key, duration, any conditional flags, and the specified fields. +// Available since Redis 7.4 CE. +// For more information refer to [HPEXPIRE Documentation]. +// +// [HPEXPIRE Documentation]: https://redis.io/commands/hpexpire/ +func (c cmdable) HPExpireWithArgs(ctx context.Context, key string, expiration time.Duration, expirationArgs HExpireArgs, fields ...string) *IntSliceCmd { + args := []interface{}{"HPEXPIRE", key, formatMs(ctx, expiration)} + + // only if one argument is true, we can add it to the args + // if more than one argument is true, it will cause an error + if expirationArgs.NX { + args = append(args, "NX") + } else if expirationArgs.XX { + args = append(args, "XX") + } else if expirationArgs.GT { + args = append(args, "GT") + } else if expirationArgs.LT { + args = append(args, "LT") + } + + args = append(args, "FIELDS", len(fields)) + + for _, field := range fields { + args = append(args, field) + } + cmd := NewIntSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// HExpireAt - Sets the expiration time for specified fields in a hash to a UNIX timestamp in seconds. +// Takes a key, a UNIX timestamp, a struct of conditional flags, and a list of fields. +// The command sets absolute expiration times based on the UNIX timestamp provided. +// Available since Redis 7.4 CE. +// For more information refer to [HExpireAt Documentation]. +// +// [HExpireAt Documentation]: https://redis.io/commands/hexpireat/ +func (c cmdable) HExpireAt(ctx context.Context, key string, tm time.Time, fields ...string) *IntSliceCmd { + + args := []interface{}{"HEXPIREAT", key, tm.Unix(), "FIELDS", len(fields)} + + for _, field := range fields { + args = append(args, field) + } + cmd := NewIntSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HExpireAtWithArgs(ctx context.Context, key string, tm time.Time, expirationArgs HExpireArgs, fields ...string) *IntSliceCmd { + args := []interface{}{"HEXPIREAT", key, tm.Unix()} + + // only if one argument is true, we can add it to the args + // if more than one argument is true, it will cause an error + if expirationArgs.NX { + args = append(args, "NX") + } else if expirationArgs.XX { + args = append(args, "XX") + } else if expirationArgs.GT { + args = append(args, "GT") + } else if expirationArgs.LT { + args = append(args, "LT") + } + + args = append(args, "FIELDS", len(fields)) + + for _, field := range fields { + args = append(args, field) + } + cmd := NewIntSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// HPExpireAt - Sets the expiration time for specified fields in a hash to a UNIX timestamp in milliseconds. +// Similar to HExpireAt but for timestamps in milliseconds. It accepts the same parameters and adjusts the UNIX time to milliseconds. +// Available since Redis 7.4 CE. +// For more information refer to [HExpireAt Documentation]. +// +// [HExpireAt Documentation]: https://redis.io/commands/hexpireat/ +func (c cmdable) HPExpireAt(ctx context.Context, key string, tm time.Time, fields ...string) *IntSliceCmd { + args := []interface{}{"HPEXPIREAT", key, tm.UnixNano() / int64(time.Millisecond), "FIELDS", len(fields)} + + for _, field := range fields { + args = append(args, field) + } + cmd := NewIntSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HPExpireAtWithArgs(ctx context.Context, key string, tm time.Time, expirationArgs HExpireArgs, fields ...string) *IntSliceCmd { + args := []interface{}{"HPEXPIREAT", key, tm.UnixNano() / int64(time.Millisecond)} + + // only if one argument is true, we can add it to the args + // if more than one argument is true, it will cause an error + if expirationArgs.NX { + args = append(args, "NX") + } else if expirationArgs.XX { + args = append(args, "XX") + } else if expirationArgs.GT { + args = append(args, "GT") + } else if expirationArgs.LT { + args = append(args, "LT") + } + + args = append(args, "FIELDS", len(fields)) + + for _, field := range fields { + args = append(args, field) + } + cmd := NewIntSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// HPersist - Removes the expiration time from specified fields in a hash. +// Accepts a key and the fields themselves. +// This command ensures that each field specified will have its expiration removed if present. +// Available since Redis 7.4 CE. +// For more information refer to [HPersist Documentation]. +// +// [HPersist Documentation]: https://redis.io/commands/hpersist/ +func (c cmdable) HPersist(ctx context.Context, key string, fields ...string) *IntSliceCmd { + args := []interface{}{"HPERSIST", key, "FIELDS", len(fields)} + + for _, field := range fields { + args = append(args, field) + } + cmd := NewIntSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// HExpireTime - Retrieves the expiration time for specified fields in a hash as a UNIX timestamp in seconds. +// Requires a key and the fields themselves to fetch their expiration timestamps. +// This command returns the expiration times for each field or error/status codes for each field as specified. +// Available since Redis 7.4 CE. +// For more information refer to [HExpireTime Documentation]. +// +// [HExpireTime Documentation]: https://redis.io/commands/hexpiretime/ +// For more information - https://redis.io/commands/hexpiretime/ +func (c cmdable) HExpireTime(ctx context.Context, key string, fields ...string) *IntSliceCmd { + args := []interface{}{"HEXPIRETIME", key, "FIELDS", len(fields)} + + for _, field := range fields { + args = append(args, field) + } + cmd := NewIntSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// HPExpireTime - Retrieves the expiration time for specified fields in a hash as a UNIX timestamp in milliseconds. +// Similar to HExpireTime, adjusted for timestamps in milliseconds. It requires the same parameters. +// Provides the expiration timestamp for each field in milliseconds. +// Available since Redis 7.4 CE. +// For more information refer to [HExpireTime Documentation]. +// +// [HExpireTime Documentation]: https://redis.io/commands/hexpiretime/ +// For more information - https://redis.io/commands/hexpiretime/ +func (c cmdable) HPExpireTime(ctx context.Context, key string, fields ...string) *IntSliceCmd { + args := []interface{}{"HPEXPIRETIME", key, "FIELDS", len(fields)} + + for _, field := range fields { + args = append(args, field) + } + cmd := NewIntSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// HTTL - Retrieves the remaining time to live for specified fields in a hash in seconds. +// Requires a key and the fields themselves. It returns the TTL for each specified field. +// This command fetches the TTL in seconds for each field or returns error/status codes as appropriate. +// Available since Redis 7.4 CE. +// For more information refer to [HTTL Documentation]. +// +// [HTTL Documentation]: https://redis.io/commands/httl/ +func (c cmdable) HTTL(ctx context.Context, key string, fields ...string) *IntSliceCmd { + args := []interface{}{"HTTL", key, "FIELDS", len(fields)} + + for _, field := range fields { + args = append(args, field) + } + cmd := NewIntSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// HPTTL - Retrieves the remaining time to live for specified fields in a hash in milliseconds. +// Similar to HTTL, but returns the TTL in milliseconds. It requires a key and the specified fields. +// This command provides the TTL in milliseconds for each field or returns error/status codes as needed. +// Available since Redis 7.4 CE. +// For more information refer to [HPTTL Documentation]. +// +// [HPTTL Documentation]: https://redis.io/commands/hpttl/ +// For more information - https://redis.io/commands/hpttl/ +func (c cmdable) HPTTL(ctx context.Context, key string, fields ...string) *IntSliceCmd { + args := []interface{}{"HPTTL", key, "FIELDS", len(fields)} + + for _, field := range fields { + args = append(args, field) + } + cmd := NewIntSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HGetDel(ctx context.Context, key string, fields ...string) *StringSliceCmd { + args := []interface{}{"HGETDEL", key, "FIELDS", len(fields)} + for _, field := range fields { + args = append(args, field) + } + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HGetEX(ctx context.Context, key string, fields ...string) *StringSliceCmd { + args := []interface{}{"HGETEX", key, "FIELDS", len(fields)} + for _, field := range fields { + args = append(args, field) + } + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// HGetEXExpirationType represents an expiration option for the HGETEX command. +type HGetEXExpirationType string + +const ( + HGetEXExpirationEX HGetEXExpirationType = "EX" + HGetEXExpirationPX HGetEXExpirationType = "PX" + HGetEXExpirationEXAT HGetEXExpirationType = "EXAT" + HGetEXExpirationPXAT HGetEXExpirationType = "PXAT" + HGetEXExpirationPERSIST HGetEXExpirationType = "PERSIST" +) + +type HGetEXOptions struct { + ExpirationType HGetEXExpirationType + ExpirationVal int64 +} + +func (c cmdable) HGetEXWithArgs(ctx context.Context, key string, options *HGetEXOptions, fields ...string) *StringSliceCmd { + args := []interface{}{"HGETEX", key} + if options.ExpirationType != "" { + args = append(args, string(options.ExpirationType)) + if options.ExpirationType != HGetEXExpirationPERSIST { + args = append(args, options.ExpirationVal) + } + } + + args = append(args, "FIELDS", len(fields)) + for _, field := range fields { + args = append(args, field) + } + + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +type HSetEXCondition string + +const ( + HSetEXFNX HSetEXCondition = "FNX" // Only set the fields if none of them already exist. + HSetEXFXX HSetEXCondition = "FXX" // Only set the fields if all already exist. +) + +type HSetEXExpirationType string + +const ( + HSetEXExpirationEX HSetEXExpirationType = "EX" + HSetEXExpirationPX HSetEXExpirationType = "PX" + HSetEXExpirationEXAT HSetEXExpirationType = "EXAT" + HSetEXExpirationPXAT HSetEXExpirationType = "PXAT" + HSetEXExpirationKEEPTTL HSetEXExpirationType = "KEEPTTL" +) + +type HSetEXOptions struct { + Condition HSetEXCondition + ExpirationType HSetEXExpirationType + ExpirationVal int64 +} + +func (c cmdable) HSetEX(ctx context.Context, key string, fieldsAndValues ...string) *IntCmd { + args := []interface{}{"HSETEX", key, "FIELDS", len(fieldsAndValues) / 2} + for _, field := range fieldsAndValues { + args = append(args, field) + } + + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HSetEXWithArgs(ctx context.Context, key string, options *HSetEXOptions, fieldsAndValues ...string) *IntCmd { + args := []interface{}{"HSETEX", key} + if options.Condition != "" { + args = append(args, string(options.Condition)) + } + if options.ExpirationType != "" { + args = append(args, string(options.ExpirationType)) + if options.ExpirationType != HSetEXExpirationKEEPTTL { + args = append(args, options.ExpirationVal) + } + } + args = append(args, "FIELDS", len(fieldsAndValues)/2) + for _, field := range fieldsAndValues { + args = append(args, field) + } + + cmd := NewIntCmd(ctx, args...) _ = c(ctx, cmd) return cmd } diff --git a/vendor/github.com/redis/go-redis/v9/hotkeys_commands.go b/vendor/github.com/redis/go-redis/v9/hotkeys_commands.go new file mode 100644 index 00000000..024db3ff --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/hotkeys_commands.go @@ -0,0 +1,122 @@ +package redis + +import ( + "context" + "errors" + "strings" +) + +// HOTKEYS commands are only available on standalone *Client instances. +// They are NOT available on ClusterClient, Ring, or UniversalClient because +// HOTKEYS is a stateful command requiring session affinity - all operations +// (START, GET, STOP, RESET) must be sent to the same Redis node. +// +// If you are using UniversalClient and need HOTKEYS functionality, you must +// type assert to *Client first: +// +// if client, ok := universalClient.(*redis.Client); ok { +// result, err := client.HotKeysStart(ctx, args) +// // ... +// } + +// HotKeysMetric represents the metrics that can be tracked by the HOTKEYS command. +type HotKeysMetric string + +const ( + // HotKeysMetricCPU tracks CPU time spent on the key (in microseconds). + HotKeysMetricCPU HotKeysMetric = "CPU" + // HotKeysMetricNET tracks network bytes used by the key (ingress + egress + replication). + HotKeysMetricNET HotKeysMetric = "NET" +) + +// HotKeysStartArgs contains the arguments for the HOTKEYS START command. +// This command is only available on standalone clients due to its stateful nature +// requiring session affinity. It must NOT be used on cluster or pooled clients. +type HotKeysStartArgs struct { + // Metrics to track. At least one must be specified. + Metrics []HotKeysMetric + // Count is the number of top keys to report. + // Default: 10, Min: 10, Max: 64 + Count uint8 + // Duration is the auto-stop tracking after this many seconds. + // Default: 0 (no auto-stop) + Duration int64 + // Sample is the sample ratio - track keys with probability 1/sample. + // Default: 1 (track every key), Min: 1 + Sample int64 + // Slots specifies specific hash slots to track (0-16383). + // All specified slots must be hosted by the receiving node. + // If not specified, all slots are tracked. + Slots []uint16 +} + +// ErrHotKeysNoMetrics is returned when HotKeysStart is called without any metrics specified. +var ErrHotKeysNoMetrics = errors.New("redis: at least one metric must be specified for HOTKEYS START") + +// HotKeysStart starts collecting hotkeys data. +// At least one metric must be specified in args.Metrics. +// This command is only available on standalone clients. +func (c *Client) HotKeysStart(ctx context.Context, args *HotKeysStartArgs) *StatusCmd { + cmdArgs := make([]interface{}, 0, 16) + cmdArgs = append(cmdArgs, "hotkeys", "start") + + // Validate that at least one metric is specified + if len(args.Metrics) == 0 { + cmd := NewStatusCmd(ctx, cmdArgs...) + cmd.SetErr(ErrHotKeysNoMetrics) + return cmd + } + + cmdArgs = append(cmdArgs, "metrics", len(args.Metrics)) + for _, metric := range args.Metrics { + cmdArgs = append(cmdArgs, strings.ToLower(string(metric))) + } + + if args.Count > 0 { + cmdArgs = append(cmdArgs, "count", args.Count) + } + + if args.Duration > 0 { + cmdArgs = append(cmdArgs, "duration", args.Duration) + } + + if args.Sample > 0 { + cmdArgs = append(cmdArgs, "sample", args.Sample) + } + + if len(args.Slots) > 0 { + cmdArgs = append(cmdArgs, "slots", len(args.Slots)) + for _, slot := range args.Slots { + cmdArgs = append(cmdArgs, slot) + } + } + + cmd := NewStatusCmd(ctx, cmdArgs...) + _ = c.Process(ctx, cmd) + return cmd +} + +// HotKeysStop stops the ongoing hotkeys collection session. +// This command is only available on standalone clients. +func (c *Client) HotKeysStop(ctx context.Context) *StatusCmd { + cmd := NewStatusCmd(ctx, "hotkeys", "stop") + _ = c.Process(ctx, cmd) + return cmd +} + +// HotKeysReset discards the last hotkeys collection session results. +// Returns an error if tracking is currently active. +// This command is only available on standalone clients. +func (c *Client) HotKeysReset(ctx context.Context) *StatusCmd { + cmd := NewStatusCmd(ctx, "hotkeys", "reset") + _ = c.Process(ctx, cmd) + return cmd +} + +// HotKeysGet retrieves the results of the ongoing or last hotkeys collection session. +// This command is only available on standalone clients. +func (c *Client) HotKeysGet(ctx context.Context) *HotKeysCmd { + cmd := NewHotKeysCmd(ctx, "hotkeys", "get") + _ = c.Process(ctx, cmd) + return cmd +} diff --git a/vendor/github.com/redis/go-redis/v9/internal/auth/streaming/conn_reauth_credentials_listener.go b/vendor/github.com/redis/go-redis/v9/internal/auth/streaming/conn_reauth_credentials_listener.go new file mode 100644 index 00000000..22bfedf7 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/internal/auth/streaming/conn_reauth_credentials_listener.go @@ -0,0 +1,100 @@ +package streaming + +import ( + "github.com/redis/go-redis/v9/auth" + "github.com/redis/go-redis/v9/internal/pool" +) + +// ConnReAuthCredentialsListener is a credentials listener for a specific connection +// that triggers re-authentication when credentials change. +// +// This listener implements the auth.CredentialsListener interface and is subscribed +// to a StreamingCredentialsProvider. When new credentials are received via OnNext, +// it marks the connection for re-authentication through the manager. +// +// The re-authentication is always performed asynchronously to avoid blocking the +// credentials provider and to prevent potential deadlocks with the pool semaphore. +// The actual re-auth happens when the connection is returned to the pool in an idle state. +// +// Lifecycle: +// - Created during connection initialization via Manager.Listener() +// - Subscribed to the StreamingCredentialsProvider +// - Receives credential updates via OnNext() +// - Cleaned up when connection is removed from pool via Manager.RemoveListener() +type ConnReAuthCredentialsListener struct { + // reAuth is the function to re-authenticate the connection with new credentials + reAuth func(conn *pool.Conn, credentials auth.Credentials) error + + // onErr is the function to call when re-authentication or acquisition fails + onErr func(conn *pool.Conn, err error) + + // conn is the connection this listener is associated with + conn *pool.Conn + + // manager is the streaming credentials manager for coordinating re-auth + manager *Manager +} + +// OnNext is called when new credentials are received from the StreamingCredentialsProvider. +// +// This method marks the connection for asynchronous re-authentication. The actual +// re-authentication happens in the background when the connection is returned to the +// pool and is in an idle state. +// +// Asynchronous re-auth is used to: +// - Avoid blocking the credentials provider's notification goroutine +// - Prevent deadlocks with the pool's semaphore (especially with small pool sizes) +// - Ensure re-auth happens when the connection is safe to use (not processing commands) +// +// The reAuthFn callback receives: +// - nil if the connection was successfully acquired for re-auth +// - error if acquisition timed out or failed +// +// Thread-safe: Called by the credentials provider's notification goroutine. +func (c *ConnReAuthCredentialsListener) OnNext(credentials auth.Credentials) { + if c.conn == nil || c.conn.IsClosed() || c.manager == nil || c.reAuth == nil { + return + } + + // Always use async reauth to avoid complex pool semaphore issues + // The synchronous path can cause deadlocks in the pool's semaphore mechanism + // when called from the Subscribe goroutine, especially with small pool sizes. + // The connection pool hook will re-authenticate the connection when it is + // returned to the pool in a clean, idle state. + c.manager.MarkForReAuth(c.conn, func(err error) { + // err is from connection acquisition (timeout, etc.) + if err != nil { + // Log the error + c.OnError(err) + return + } + // err is from reauth command execution + err = c.reAuth(c.conn, credentials) + if err != nil { + // Log the error + c.OnError(err) + return + } + }) +} + +// OnError is called when an error occurs during credential streaming or re-authentication. +// +// This method can be called from: +// - The StreamingCredentialsProvider when there's an error in the credentials stream +// - The re-auth process when connection acquisition times out +// - The re-auth process when the AUTH command fails +// +// The error is delegated to the onErr callback provided during listener creation. +// +// Thread-safe: Can be called from multiple goroutines (provider, re-auth worker). +func (c *ConnReAuthCredentialsListener) OnError(err error) { + if c.onErr == nil { + return + } + + c.onErr(c.conn, err) +} + +// Ensure ConnReAuthCredentialsListener implements the CredentialsListener interface. +var _ auth.CredentialsListener = (*ConnReAuthCredentialsListener)(nil) diff --git a/vendor/github.com/redis/go-redis/v9/internal/auth/streaming/cred_listeners.go b/vendor/github.com/redis/go-redis/v9/internal/auth/streaming/cred_listeners.go new file mode 100644 index 00000000..66e6eafd --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/internal/auth/streaming/cred_listeners.go @@ -0,0 +1,77 @@ +package streaming + +import ( + "sync" + + "github.com/redis/go-redis/v9/auth" +) + +// CredentialsListeners is a thread-safe collection of credentials listeners +// indexed by connection ID. +// +// This collection is used by the Manager to maintain a registry of listeners +// for each connection in the pool. Listeners are reused when connections are +// reinitialized (e.g., after a handoff) to avoid creating duplicate subscriptions +// to the StreamingCredentialsProvider. +// +// The collection supports concurrent access from multiple goroutines during +// connection initialization, credential updates, and connection removal. +type CredentialsListeners struct { + // listeners maps connection ID to credentials listener + listeners map[uint64]auth.CredentialsListener + + // lock protects concurrent access to the listeners map + lock sync.RWMutex +} + +// NewCredentialsListeners creates a new thread-safe credentials listeners collection. +func NewCredentialsListeners() *CredentialsListeners { + return &CredentialsListeners{ + listeners: make(map[uint64]auth.CredentialsListener), + } +} + +// Add adds or updates a credentials listener for a connection. +// +// If a listener already exists for the connection ID, it is replaced. +// This is safe because the old listener should have been unsubscribed +// before the connection was reinitialized. +// +// Thread-safe: Can be called concurrently from multiple goroutines. +func (c *CredentialsListeners) Add(connID uint64, listener auth.CredentialsListener) { + c.lock.Lock() + defer c.lock.Unlock() + if c.listeners == nil { + c.listeners = make(map[uint64]auth.CredentialsListener) + } + c.listeners[connID] = listener +} + +// Get retrieves the credentials listener for a connection. +// +// Returns: +// - listener: The credentials listener for the connection, or nil if not found +// - ok: true if a listener exists for the connection ID, false otherwise +// +// Thread-safe: Can be called concurrently from multiple goroutines. +func (c *CredentialsListeners) Get(connID uint64) (auth.CredentialsListener, bool) { + c.lock.RLock() + defer c.lock.RUnlock() + if len(c.listeners) == 0 { + return nil, false + } + listener, ok := c.listeners[connID] + return listener, ok +} + +// Remove removes the credentials listener for a connection. +// +// This is called when a connection is removed from the pool to prevent +// memory leaks. If no listener exists for the connection ID, this is a no-op. +// +// Thread-safe: Can be called concurrently from multiple goroutines. +func (c *CredentialsListeners) Remove(connID uint64) { + c.lock.Lock() + defer c.lock.Unlock() + delete(c.listeners, connID) +} diff --git a/vendor/github.com/redis/go-redis/v9/internal/auth/streaming/manager.go b/vendor/github.com/redis/go-redis/v9/internal/auth/streaming/manager.go new file mode 100644 index 00000000..f785927e --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/internal/auth/streaming/manager.go @@ -0,0 +1,137 @@ +package streaming + +import ( + "errors" + "time" + + "github.com/redis/go-redis/v9/auth" + "github.com/redis/go-redis/v9/internal/pool" +) + +// Manager coordinates streaming credentials and re-authentication for a connection pool. +// +// The manager is responsible for: +// - Creating and managing per-connection credentials listeners +// - Providing the pool hook for re-authentication +// - Coordinating between credentials updates and pool operations +// +// When credentials change via a StreamingCredentialsProvider: +// 1. The credentials listener (ConnReAuthCredentialsListener) receives the update +// 2. It calls MarkForReAuth on the manager +// 3. The manager delegates to the pool hook +// 4. The pool hook schedules background re-authentication +// +// The manager maintains a registry of credentials listeners indexed by connection ID, +// allowing listener reuse when connections are reinitialized (e.g., after handoff). +type Manager struct { + // credentialsListeners maps connection ID to credentials listener + credentialsListeners *CredentialsListeners + + // pool is the connection pool being managed + pool pool.Pooler + + // poolHookRef is the re-authentication pool hook + poolHookRef *ReAuthPoolHook +} + +// NewManager creates a new streaming credentials manager. +// +// Parameters: +// - pl: The connection pool to manage +// - reAuthTimeout: Maximum time to wait for acquiring a connection for re-authentication +// +// The manager creates a ReAuthPoolHook sized to match the pool size, ensuring that +// re-auth operations don't exhaust the connection pool. +func NewManager(pl pool.Pooler, reAuthTimeout time.Duration) *Manager { + m := &Manager{ + pool: pl, + poolHookRef: NewReAuthPoolHook(pl.Size(), reAuthTimeout), + credentialsListeners: NewCredentialsListeners(), + } + m.poolHookRef.manager = m + return m +} + +// PoolHook returns the pool hook for re-authentication. +// +// This hook should be registered with the connection pool to enable +// automatic re-authentication when credentials change. +func (m *Manager) PoolHook() pool.PoolHook { + return m.poolHookRef +} + +// Listener returns or creates a credentials listener for a connection. +// +// This method is called during connection initialization to set up the +// credentials listener. If a listener already exists for the connection ID +// (e.g., after a handoff), it is reused. +// +// Parameters: +// - poolCn: The connection to create/get a listener for +// - reAuth: Function to re-authenticate the connection with new credentials +// - onErr: Function to call when re-authentication fails +// +// Returns: +// - auth.CredentialsListener: The listener to subscribe to the credentials provider +// - error: Non-nil if poolCn is nil +// +// Note: The reAuth and onErr callbacks are captured once when the listener is +// created and reused for the connection's lifetime. They should not change. +// +// Thread-safe: Can be called concurrently during connection initialization. +func (m *Manager) Listener( + poolCn *pool.Conn, + reAuth func(*pool.Conn, auth.Credentials) error, + onErr func(*pool.Conn, error), +) (auth.CredentialsListener, error) { + if poolCn == nil { + return nil, errors.New("poolCn cannot be nil") + } + connID := poolCn.GetID() + // if we reconnect the underlying network connection, the streaming credentials listener will continue to work + // so we can get the old listener from the cache and use it. + // subscribing the same (an already subscribed) listener for a StreamingCredentialsProvider SHOULD be a no-op + listener, ok := m.credentialsListeners.Get(connID) + if !ok || listener == nil { + // Create new listener for this connection + // Note: Callbacks (reAuth, onErr) are captured once and reused for the connection's lifetime + newCredListener := &ConnReAuthCredentialsListener{ + conn: poolCn, + reAuth: reAuth, + onErr: onErr, + manager: m, + } + + m.credentialsListeners.Add(connID, newCredListener) + listener = newCredListener + } + return listener, nil +} + +// MarkForReAuth marks a connection for re-authentication. +// +// This method is called by the credentials listener when new credentials are +// received. It delegates to the pool hook to schedule background re-authentication. +// +// Parameters: +// - poolCn: The connection to re-authenticate +// - reAuthFn: Function to call for re-authentication, receives error if acquisition fails +// +// Thread-safe: Called by credentials listeners when credentials change. +func (m *Manager) MarkForReAuth(poolCn *pool.Conn, reAuthFn func(error)) { + connID := poolCn.GetID() + m.poolHookRef.MarkForReAuth(connID, reAuthFn) +} + +// RemoveListener removes the credentials listener for a connection. +// +// This method is called by the pool hook's OnRemove to clean up listeners +// when connections are removed from the pool. +// +// Parameters: +// - connID: The connection ID whose listener should be removed +// +// Thread-safe: Called during connection removal. +func (m *Manager) RemoveListener(connID uint64) { + m.credentialsListeners.Remove(connID) +} diff --git a/vendor/github.com/redis/go-redis/v9/internal/auth/streaming/pool_hook.go b/vendor/github.com/redis/go-redis/v9/internal/auth/streaming/pool_hook.go new file mode 100644 index 00000000..aaf4f609 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/internal/auth/streaming/pool_hook.go @@ -0,0 +1,241 @@ +package streaming + +import ( + "context" + "sync" + "time" + + "github.com/redis/go-redis/v9/internal" + "github.com/redis/go-redis/v9/internal/pool" +) + +// ReAuthPoolHook is a pool hook that manages background re-authentication of connections +// when credentials change via a streaming credentials provider. +// +// The hook uses a semaphore-based worker pool to limit concurrent re-authentication +// operations and prevent pool exhaustion. When credentials change, connections are +// marked for re-authentication and processed asynchronously in the background. +// +// The re-authentication process: +// 1. OnPut: When a connection is returned to the pool, check if it needs re-auth +// 2. If yes, schedule it for background processing (move from shouldReAuth to scheduledReAuth) +// 3. A worker goroutine acquires the connection (waits until it's not in use) +// 4. Executes the re-auth function while holding the connection +// 5. Releases the connection back to the pool +// +// The hook ensures that: +// - Only one re-auth operation runs per connection at a time +// - Connections are not used for commands during re-authentication +// - Re-auth operations timeout if they can't acquire the connection +// - Resources are properly cleaned up on connection removal +type ReAuthPoolHook struct { + // shouldReAuth maps connection ID to re-auth function + // Connections in this map need re-authentication but haven't been scheduled yet + shouldReAuth map[uint64]func(error) + shouldReAuthLock sync.RWMutex + + // workers is a semaphore limiting concurrent re-auth operations + // Initialized with poolSize tokens to prevent pool exhaustion + // Uses FastSemaphore for better performance with eventual fairness + workers *internal.FastSemaphore + + // reAuthTimeout is the maximum time to wait for acquiring a connection for re-auth + reAuthTimeout time.Duration + + // scheduledReAuth maps connection ID to scheduled status + // Connections in this map have a background worker attempting re-authentication + scheduledReAuth map[uint64]bool + scheduledLock sync.RWMutex + + // manager is a back-reference for cleanup operations + manager *Manager +} + +// NewReAuthPoolHook creates a new re-authentication pool hook. +// +// Parameters: +// - poolSize: Maximum number of concurrent re-auth operations (typically matches pool size) +// - reAuthTimeout: Maximum time to wait for acquiring a connection for re-authentication +// +// The poolSize parameter is used to initialize the worker semaphore, ensuring that +// re-auth operations don't exhaust the connection pool. +func NewReAuthPoolHook(poolSize int, reAuthTimeout time.Duration) *ReAuthPoolHook { + return &ReAuthPoolHook{ + shouldReAuth: make(map[uint64]func(error)), + scheduledReAuth: make(map[uint64]bool), + workers: internal.NewFastSemaphore(int32(poolSize)), + reAuthTimeout: reAuthTimeout, + } +} + +// MarkForReAuth marks a connection for re-authentication. +// +// This method is called when credentials change and a connection needs to be +// re-authenticated. The actual re-authentication happens asynchronously when +// the connection is returned to the pool (in OnPut). +// +// Parameters: +// - connID: The connection ID to mark for re-authentication +// - reAuthFn: Function to call for re-authentication, receives error if acquisition fails +// +// Thread-safe: Can be called concurrently from multiple goroutines. +func (r *ReAuthPoolHook) MarkForReAuth(connID uint64, reAuthFn func(error)) { + r.shouldReAuthLock.Lock() + defer r.shouldReAuthLock.Unlock() + r.shouldReAuth[connID] = reAuthFn +} + +// OnGet is called when a connection is retrieved from the pool. +// +// This hook checks if the connection needs re-authentication or has a scheduled +// re-auth operation. If so, it rejects the connection (returns accept=false), +// causing the pool to try another connection. +// +// Returns: +// - accept: false if connection needs re-auth, true otherwise +// - err: always nil (errors are not used in this hook) +// +// Thread-safe: Called concurrently by multiple goroutines getting connections. +func (r *ReAuthPoolHook) OnGet(_ context.Context, conn *pool.Conn, _ bool) (accept bool, err error) { + connID := conn.GetID() + r.shouldReAuthLock.RLock() + _, shouldReAuth := r.shouldReAuth[connID] + r.shouldReAuthLock.RUnlock() + // This connection was marked for reauth while in the pool, + // reject the connection + if shouldReAuth { + // simply reject the connection, it will be re-authenticated in OnPut + return false, nil + } + r.scheduledLock.RLock() + _, hasScheduled := r.scheduledReAuth[connID] + r.scheduledLock.RUnlock() + // has scheduled reauth, reject the connection + if hasScheduled { + // simply reject the connection, it currently has a reauth scheduled + // and the worker is waiting for slot to execute the reauth + return false, nil + } + return true, nil +} + +// OnPut is called when a connection is returned to the pool. +// +// This hook checks if the connection needs re-authentication. If so, it schedules +// a background goroutine to perform the re-auth asynchronously. The goroutine: +// 1. Waits for a worker slot (semaphore) +// 2. Acquires the connection (waits until not in use) +// 3. Executes the re-auth function +// 4. Releases the connection and worker slot +// +// The connection is always pooled (not removed) since re-auth happens in background. +// +// Returns: +// - shouldPool: always true (connection stays in pool during background re-auth) +// - shouldRemove: always false +// - err: always nil +// +// Thread-safe: Called concurrently by multiple goroutines returning connections. +func (r *ReAuthPoolHook) OnPut(_ context.Context, conn *pool.Conn) (bool, bool, error) { + if conn == nil { + // noop + return true, false, nil + } + connID := conn.GetID() + // Check if reauth is needed and get the function with proper locking + r.shouldReAuthLock.RLock() + reAuthFn, ok := r.shouldReAuth[connID] + r.shouldReAuthLock.RUnlock() + + if ok { + // Acquire both locks to atomically move from shouldReAuth to scheduledReAuth + // This prevents race conditions where OnGet might miss the transition + r.shouldReAuthLock.Lock() + r.scheduledLock.Lock() + r.scheduledReAuth[connID] = true + delete(r.shouldReAuth, connID) + r.scheduledLock.Unlock() + r.shouldReAuthLock.Unlock() + go func() { + r.workers.AcquireBlocking() + // safety first + if conn == nil || (conn != nil && conn.IsClosed()) { + r.workers.Release() + return + } + defer func() { + if rec := recover(); rec != nil { + // once again - safety first + internal.Logger.Printf(context.Background(), "panic in reauth worker: %v", rec) + } + r.scheduledLock.Lock() + delete(r.scheduledReAuth, connID) + r.scheduledLock.Unlock() + r.workers.Release() + }() + + // Create timeout context for connection acquisition + // This prevents indefinite waiting if the connection is stuck + ctx, cancel := context.WithTimeout(context.Background(), r.reAuthTimeout) + defer cancel() + + // Try to acquire the connection for re-authentication + // We need to ensure the connection is IDLE (not IN_USE) before transitioning to UNUSABLE + // This prevents re-authentication from interfering with active commands + // Use AwaitAndTransition to wait for the connection to become IDLE + stateMachine := conn.GetStateMachine() + if stateMachine == nil { + // No state machine - should not happen, but handle gracefully + reAuthFn(pool.ErrConnUnusableTimeout) + return + } + + // Use predefined slice to avoid allocation + _, err := stateMachine.AwaitAndTransition(ctx, pool.ValidFromIdle(), pool.StateUnusable) + if err != nil { + // Timeout or other error occurred, cannot acquire connection + reAuthFn(err) + return + } + + // safety first + if !conn.IsClosed() { + // Successfully acquired the connection, perform reauth + reAuthFn(nil) + } + + // Release the connection: transition from UNUSABLE back to IDLE + stateMachine.Transition(pool.StateIdle) + }() + } + + // the reauth will happen in background, as far as the pool is concerned: + // pool the connection, don't remove it, no error + return true, false, nil +} + +// OnRemove is called when a connection is removed from the pool. +// +// This hook cleans up all state associated with the connection: +// - Removes from shouldReAuth map (pending re-auth) +// - Removes from scheduledReAuth map (active re-auth) +// - Removes credentials listener from manager +// +// This prevents memory leaks and ensures that removed connections don't have +// lingering re-auth operations or listeners. +// +// Thread-safe: Called when connections are removed due to errors, timeouts, or pool closure. +func (r *ReAuthPoolHook) OnRemove(_ context.Context, conn *pool.Conn, _ error) { + connID := conn.GetID() + r.shouldReAuthLock.Lock() + r.scheduledLock.Lock() + delete(r.scheduledReAuth, connID) + delete(r.shouldReAuth, connID) + r.scheduledLock.Unlock() + r.shouldReAuthLock.Unlock() + if r.manager != nil { + r.manager.RemoveListener(connID) + } +} + +var _ pool.PoolHook = (*ReAuthPoolHook)(nil) diff --git a/vendor/github.com/redis/go-redis/v9/internal/hashtag/hashtag.go b/vendor/github.com/redis/go-redis/v9/internal/hashtag/hashtag.go index f13ee816..ea56fd6c 100644 --- a/vendor/github.com/redis/go-redis/v9/internal/hashtag/hashtag.go +++ b/vendor/github.com/redis/go-redis/v9/internal/hashtag/hashtag.go @@ -56,6 +56,18 @@ func Key(key string) string { return key } +func Present(key string) bool { + if key == "" { + return false + } + if s := strings.IndexByte(key, '{'); s > -1 { + if e := strings.IndexByte(key[s+1:], '}'); e > 0 { + return true + } + } + return false +} + func RandomSlot() int { return rand.Intn(slotNumber) } diff --git a/vendor/github.com/redis/go-redis/v9/internal/interfaces/interfaces.go b/vendor/github.com/redis/go-redis/v9/internal/interfaces/interfaces.go new file mode 100644 index 00000000..8f856971 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/internal/interfaces/interfaces.go @@ -0,0 +1,59 @@ +// Package interfaces provides shared interfaces used by both the main redis package +// and the maintnotifications upgrade package to avoid circular dependencies. +package interfaces + +import ( + "context" + "net" + "time" +) + +// NotificationProcessor is (most probably) a push.NotificationProcessor +// forward declaration to avoid circular imports +type NotificationProcessor interface { + RegisterHandler(pushNotificationName string, handler interface{}, protected bool) error + UnregisterHandler(pushNotificationName string) error + GetHandler(pushNotificationName string) interface{} +} + +// ClientInterface defines the interface that clients must implement for maintnotifications upgrades. +type ClientInterface interface { + // GetOptions returns the client options. + GetOptions() OptionsInterface + + // GetPushProcessor returns the client's push notification processor. + GetPushProcessor() NotificationProcessor +} + +// OptionsInterface defines the interface for client options. +// Uses an adapter pattern to avoid circular dependencies. +type OptionsInterface interface { + // GetReadTimeout returns the read timeout. + GetReadTimeout() time.Duration + + // GetWriteTimeout returns the write timeout. + GetWriteTimeout() time.Duration + + // GetNetwork returns the network type. + GetNetwork() string + + // GetAddr returns the connection address. + GetAddr() string + + // GetNodeAddress returns the address of the Redis node as reported by the server. + // For cluster clients, this is the endpoint from CLUSTER SLOTS before any transformation. + // For standalone clients, this defaults to Addr. + GetNodeAddress() string + + // IsTLSEnabled returns true if TLS is enabled. + IsTLSEnabled() bool + + // GetProtocol returns the protocol version. + GetProtocol() int + + // GetPoolSize returns the connection pool size. + GetPoolSize() int + + // NewDialer returns a new dialer function for the connection. + NewDialer() func(context.Context) (net.Conn, error) +} diff --git a/vendor/github.com/redis/go-redis/v9/internal/log.go b/vendor/github.com/redis/go-redis/v9/internal/log.go index c8b9213d..0bfffc31 100644 --- a/vendor/github.com/redis/go-redis/v9/internal/log.go +++ b/vendor/github.com/redis/go-redis/v9/internal/log.go @@ -7,20 +7,73 @@ import ( "os" ) +// TODO (ned): Revisit logging +// Add more standardized approach with log levels and configurability + type Logging interface { Printf(ctx context.Context, format string, v ...interface{}) } -type logger struct { +type DefaultLogger struct { log *log.Logger } -func (l *logger) Printf(ctx context.Context, format string, v ...interface{}) { +func (l *DefaultLogger) Printf(ctx context.Context, format string, v ...interface{}) { _ = l.log.Output(2, fmt.Sprintf(format, v...)) } +func NewDefaultLogger() Logging { + return &DefaultLogger{ + log: log.New(os.Stderr, "redis: ", log.LstdFlags|log.Lshortfile), + } +} + // Logger calls Output to print to the stderr. // Arguments are handled in the manner of fmt.Print. -var Logger Logging = &logger{ - log: log.New(os.Stderr, "redis: ", log.LstdFlags|log.Lshortfile), +var Logger Logging = NewDefaultLogger() + +var LogLevel LogLevelT = LogLevelError + +// LogLevelT represents the logging level +type LogLevelT int + +// Log level constants for the entire go-redis library +const ( + LogLevelError LogLevelT = iota // 0 - errors only + LogLevelWarn // 1 - warnings and errors + LogLevelInfo // 2 - info, warnings, and errors + LogLevelDebug // 3 - debug, info, warnings, and errors +) + +// String returns the string representation of the log level +func (l LogLevelT) String() string { + switch l { + case LogLevelError: + return "ERROR" + case LogLevelWarn: + return "WARN" + case LogLevelInfo: + return "INFO" + case LogLevelDebug: + return "DEBUG" + default: + return "UNKNOWN" + } +} + +// IsValid returns true if the log level is valid +func (l LogLevelT) IsValid() bool { + return l >= LogLevelError && l <= LogLevelDebug +} + +func (l LogLevelT) WarnOrAbove() bool { + return l >= LogLevelWarn +} + +func (l LogLevelT) InfoOrAbove() bool { + return l >= LogLevelInfo +} + +func (l LogLevelT) DebugOrAbove() bool { + return l >= LogLevelDebug } diff --git a/vendor/github.com/redis/go-redis/v9/internal/otel/metrics.go b/vendor/github.com/redis/go-redis/v9/internal/otel/metrics.go new file mode 100644 index 00000000..a4840825 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/internal/otel/metrics.go @@ -0,0 +1,279 @@ +package otel + +import ( + "context" + "crypto/rand" + "encoding/hex" + "sync" + "time" + + "github.com/redis/go-redis/v9/internal/pool" +) + +// generateUniqueID generates a short unique identifier for pool names. +func generateUniqueID() string { + b := make([]byte, 4) + if _, err := rand.Read(b); err != nil { + return "" + } + return hex.EncodeToString(b) +} + +// Cmder is a minimal interface for command information needed for metrics. +// This avoids circular dependencies with the main redis package. +type Cmder interface { + Name() string + FullName() string + Args() []interface{} + Err() error +} + +// Recorder is the interface for recording metrics. +type Recorder interface { + // RecordOperationDuration records the total operation duration (including all retries) + // dbIndex is the Redis database index (0-15) + RecordOperationDuration(ctx context.Context, duration time.Duration, cmd Cmder, attempts int, err error, cn *pool.Conn, dbIndex int) + + // RecordPipelineOperationDuration records the total pipeline/transaction duration. + // operationName should be "PIPELINE" for regular pipelines or "MULTI" for transactions. + // cmdCount is the number of commands in the pipeline. + // err is the error from the pipeline execution (can be nil). + // dbIndex is the Redis database index (0-15) + RecordPipelineOperationDuration(ctx context.Context, duration time.Duration, operationName string, cmdCount int, attempts int, err error, cn *pool.Conn, dbIndex int) + + // RecordConnectionCreateTime records the time it took to create a new connection + RecordConnectionCreateTime(ctx context.Context, duration time.Duration, cn *pool.Conn) + + // RecordConnectionRelaxedTimeout records when connection timeout is relaxed/unrelaxed + // delta: +1 for relaxed, -1 for unrelaxed + // poolName: name of the connection pool (e.g., "main", "pubsub") + // notificationType: the notification type that triggered the timeout relaxation (e.g., "MOVING") + RecordConnectionRelaxedTimeout(ctx context.Context, delta int, cn *pool.Conn, poolName, notificationType string) + + // RecordConnectionHandoff records when a connection is handed off to another node + // poolName: name of the connection pool (e.g., "main", "pubsub") + RecordConnectionHandoff(ctx context.Context, cn *pool.Conn, poolName string) + + // RecordError records client errors (ASK, MOVED, handshake failures, etc.) + // errorType: type of error (e.g., "ASK", "MOVED", "HANDSHAKE_FAILED") + // statusCode: Redis response status code if available (e.g., "MOVED", "ASK") + // isInternal: whether this is an internal error + // retryAttempts: number of retry attempts made + RecordError(ctx context.Context, errorType string, cn *pool.Conn, statusCode string, isInternal bool, retryAttempts int) + + // RecordMaintenanceNotification records when a maintenance notification is received + // notificationType: the type of notification (e.g., "MOVING", "MIGRATING", etc.) + RecordMaintenanceNotification(ctx context.Context, cn *pool.Conn, notificationType string) + + // RecordConnectionWaitTime records the time spent waiting for a connection from the pool + RecordConnectionWaitTime(ctx context.Context, duration time.Duration, cn *pool.Conn) + + // RecordConnectionClosed records when a connection is closed + // reason: reason for closing (e.g., "idle", "max_lifetime", "error", "pool_closed") + // err: the error that caused the close (nil for non-error closures) + RecordConnectionClosed(ctx context.Context, cn *pool.Conn, reason string, err error) + + // RecordPubSubMessage records a Pub/Sub message + // direction: "sent" or "received" + // channel: channel name (may be hidden for cardinality reduction) + // sharded: true for sharded pub/sub (SPUBLISH/SSUBSCRIBE) + RecordPubSubMessage(ctx context.Context, cn *pool.Conn, direction, channel string, sharded bool) + + // RecordStreamLag records the lag for stream consumer group processing + // lag: time difference between message creation and consumption + // streamName: name of the stream (may be hidden for cardinality reduction) + // consumerGroup: name of the consumer group + // consumerName: name of the consumer + RecordStreamLag(ctx context.Context, lag time.Duration, cn *pool.Conn, streamName, consumerGroup, consumerName string) +} + +type PubSubPooler interface { + Stats() *pool.PubSubStats +} + +type PoolRegistrar interface { + // RegisterPool is called when a new client is created with its connection pools. + // poolName: identifier for the pool (e.g., "main_abc123") + // pool: the connection pool + RegisterPool(poolName string, pool pool.Pooler) + // UnregisterPool is called when a client is closed to remove its pool from the registry. + // pool: the connection pool to unregister + UnregisterPool(pool pool.Pooler) + // RegisterPubSubPool is called when a new client is created with a PubSub pool. + // poolName: identifier for the pool (e.g., "main_abc123_pubsub") + // pool: the PubSub connection pool + RegisterPubSubPool(poolName string, pool PubSubPooler) + // UnregisterPubSubPool is called when a PubSub client is closed to remove its pool. + // pool: the PubSub connection pool to unregister + UnregisterPubSubPool(pool PubSubPooler) +} + +var ( + // recorderMu protects globalRecorder and operation duration callbacks + recorderMu sync.RWMutex + + // Global recorder instance (initialized by extra/redisotel-native) + globalRecorder Recorder = noopRecorder{} + + // Callbacks for operation duration metrics + operationDurationCallback func(ctx context.Context, duration time.Duration, cmd Cmder, attempts int, err error, cn *pool.Conn, dbIndex int) + pipelineOperationDurationCallback func(ctx context.Context, duration time.Duration, operationName string, cmdCount int, attempts int, err error, cn *pool.Conn, dbIndex int) +) + +// GetOperationDurationCallback returns the callback for operation duration. +func GetOperationDurationCallback() func(ctx context.Context, duration time.Duration, cmd Cmder, attempts int, err error, cn *pool.Conn, dbIndex int) { + recorderMu.RLock() + cb := operationDurationCallback + recorderMu.RUnlock() + return cb +} + +// GetPipelineOperationDurationCallback returns the callback for pipeline operation duration. +func GetPipelineOperationDurationCallback() func(ctx context.Context, duration time.Duration, operationName string, cmdCount int, attempts int, err error, cn *pool.Conn, dbIndex int) { + recorderMu.RLock() + cb := pipelineOperationDurationCallback + recorderMu.RUnlock() + return cb +} + +// getRecorder returns the current global recorder under a read lock. +func getRecorder() Recorder { + recorderMu.RLock() + r := globalRecorder + recorderMu.RUnlock() + return r +} + +// SetGlobalRecorder sets the global recorder (called by Init() in extra/redisotel-native) +func SetGlobalRecorder(r Recorder) { + recorderMu.Lock() + if r == nil { + globalRecorder = noopRecorder{} + operationDurationCallback = nil + pipelineOperationDurationCallback = nil + recorderMu.Unlock() + // Unregister all pool metric callbacks atomically + pool.SetAllMetricCallbacks(nil) + return + } + globalRecorder = r + + // Register operation duration callbacks + // These capture r directly since we want them to use the specific recorder + // that was set at this point in time + operationDurationCallback = func(ctx context.Context, duration time.Duration, cmd Cmder, attempts int, err error, cn *pool.Conn, dbIndex int) { + getRecorder().RecordOperationDuration(ctx, duration, cmd, attempts, err, cn, dbIndex) + } + pipelineOperationDurationCallback = func(ctx context.Context, duration time.Duration, operationName string, cmdCount int, attempts int, err error, cn *pool.Conn, dbIndex int) { + getRecorder().RecordPipelineOperationDuration(ctx, duration, operationName, cmdCount, attempts, err, cn, dbIndex) + } + recorderMu.Unlock() + + // Register all pool metric callbacks atomically + // These use getRecorder() to safely access the current recorder + pool.SetAllMetricCallbacks(&pool.MetricCallbacks{ + ConnectionCreateTime: func(ctx context.Context, duration time.Duration, cn *pool.Conn) { + getRecorder().RecordConnectionCreateTime(ctx, duration, cn) + }, + ConnectionRelaxedTimeout: func(ctx context.Context, delta int, cn *pool.Conn, poolName, notificationType string) { + getRecorder().RecordConnectionRelaxedTimeout(ctx, delta, cn, poolName, notificationType) + }, + ConnectionHandoff: func(ctx context.Context, cn *pool.Conn, poolName string) { + getRecorder().RecordConnectionHandoff(ctx, cn, poolName) + }, + Error: func(ctx context.Context, errorType string, cn *pool.Conn, statusCode string, isInternal bool, retryAttempts int) { + getRecorder().RecordError(ctx, errorType, cn, statusCode, isInternal, retryAttempts) + }, + MaintenanceNotification: func(ctx context.Context, cn *pool.Conn, notificationType string) { + getRecorder().RecordMaintenanceNotification(ctx, cn, notificationType) + }, + ConnectionWaitTime: func(ctx context.Context, duration time.Duration, cn *pool.Conn) { + getRecorder().RecordConnectionWaitTime(ctx, duration, cn) + }, + ConnectionClosed: func(ctx context.Context, cn *pool.Conn, reason string, err error) { + getRecorder().RecordConnectionClosed(ctx, cn, reason, err) + }, + }) +} + +// RecordOperationDuration records the total operation duration. +// dbIndex is the Redis database index (0-15). +func RecordOperationDuration(ctx context.Context, duration time.Duration, cmd Cmder, attempts int, err error, cn *pool.Conn, dbIndex int) { + getRecorder().RecordOperationDuration(ctx, duration, cmd, attempts, err, cn, dbIndex) +} + +// RecordPipelineOperationDuration records the total pipeline/transaction duration. +// This is called from redis.go after pipeline/transaction execution completes. +// operationName should be "PIPELINE" for regular pipelines or "MULTI" for transactions. +// err is the error from the pipeline execution (can be nil). +// dbIndex is the Redis database index (0-15). +func RecordPipelineOperationDuration(ctx context.Context, duration time.Duration, operationName string, cmdCount int, attempts int, err error, cn *pool.Conn, dbIndex int) { + getRecorder().RecordPipelineOperationDuration(ctx, duration, operationName, cmdCount, attempts, err, cn, dbIndex) +} + +// RecordConnectionCreateTime records the time it took to create a new connection. +func RecordConnectionCreateTime(ctx context.Context, duration time.Duration, cn *pool.Conn) { + getRecorder().RecordConnectionCreateTime(ctx, duration, cn) +} + +// RecordPubSubMessage records a Pub/Sub message sent or received. +func RecordPubSubMessage(ctx context.Context, cn *pool.Conn, direction, channel string, sharded bool) { + getRecorder().RecordPubSubMessage(ctx, cn, direction, channel, sharded) +} + +// RecordStreamLag records the lag between message creation and consumption in a stream. +func RecordStreamLag(ctx context.Context, lag time.Duration, cn *pool.Conn, streamName, consumerGroup, consumerName string) { + getRecorder().RecordStreamLag(ctx, lag, cn, streamName, consumerGroup, consumerName) +} + +type noopRecorder struct{} + +func (noopRecorder) RecordOperationDuration(context.Context, time.Duration, Cmder, int, error, *pool.Conn, int) { +} +func (noopRecorder) RecordPipelineOperationDuration(context.Context, time.Duration, string, int, int, error, *pool.Conn, int) { +} +func (noopRecorder) RecordConnectionCreateTime(context.Context, time.Duration, *pool.Conn) {} +func (noopRecorder) RecordConnectionRelaxedTimeout(context.Context, int, *pool.Conn, string, string) { +} +func (noopRecorder) RecordConnectionHandoff(context.Context, *pool.Conn, string) {} +func (noopRecorder) RecordError(context.Context, string, *pool.Conn, string, bool, int) {} +func (noopRecorder) RecordMaintenanceNotification(context.Context, *pool.Conn, string) {} + +func (noopRecorder) RecordConnectionWaitTime(context.Context, time.Duration, *pool.Conn) {} +func (noopRecorder) RecordConnectionClosed(context.Context, *pool.Conn, string, error) {} + +func (noopRecorder) RecordPubSubMessage(context.Context, *pool.Conn, string, string, bool) {} + +func (noopRecorder) RecordStreamLag(context.Context, time.Duration, *pool.Conn, string, string, string) { +} + +// RegisterPools registers connection pools with the global recorder. +func RegisterPools(connPool pool.Pooler, pubSubPool PubSubPooler, addr string) { + // Check if the global recorder implements PoolRegistrar + if registrar, ok := globalRecorder.(PoolRegistrar); ok { + // Generate a unique ID for this client's pools + uniqueID := generateUniqueID() + + if connPool != nil { + poolName := addr + "_" + uniqueID + registrar.RegisterPool(poolName, connPool) + } + if pubSubPool != nil { + poolName := addr + "_" + uniqueID + "_pubsub" + registrar.RegisterPubSubPool(poolName, pubSubPool) + } + } +} + +// UnregisterPools removes connection pools from the global recorder +func UnregisterPools(connPool pool.Pooler, pubSubPool PubSubPooler) { + // Check if the global recorder implements PoolRegistrar + if registrar, ok := globalRecorder.(PoolRegistrar); ok { + if connPool != nil { + registrar.UnregisterPool(connPool) + } + if pubSubPool != nil { + registrar.UnregisterPubSubPool(pubSubPool) + } + } +} diff --git a/vendor/github.com/redis/go-redis/v9/internal/pool/conn.go b/vendor/github.com/redis/go-redis/v9/internal/pool/conn.go index 7f45bc0b..f0af63c6 100644 --- a/vendor/github.com/redis/go-redis/v9/internal/pool/conn.go +++ b/vendor/github.com/redis/go-redis/v9/internal/pool/conn.go @@ -1,64 +1,830 @@ +// Package pool implements the pool management package pool import ( "bufio" "context" + "errors" + "fmt" "net" + "sync" "sync/atomic" "time" + "github.com/redis/go-redis/v9/internal" + "github.com/redis/go-redis/v9/internal/maintnotifications/logs" "github.com/redis/go-redis/v9/internal/proto" ) var noDeadline = time.Time{} +// Preallocated errors for hot paths to avoid allocations +var ( + errAlreadyMarkedForHandoff = errors.New("connection is already marked for handoff") + errNotMarkedForHandoff = errors.New("connection was not marked for handoff") + errHandoffStateChanged = errors.New("handoff state changed during marking") + errConnectionNotAvailable = errors.New("redis: connection not available") + errConnNotAvailableForWrite = errors.New("redis: connection not available for write operation") +) + +// getCachedTimeNs returns the current time in nanoseconds. +// This function previously used a global cache updated by a background goroutine, +// but that caused unnecessary CPU usage when the client was idle (ticker waking up +// the scheduler every 50ms). We now use time.Now() directly, which is fast enough +// on modern systems (vDSO on Linux) and only adds ~1-2% overhead in extreme +// high-concurrency benchmarks while eliminating idle CPU usage. +func getCachedTimeNs() int64 { + return time.Now().UnixNano() +} + +// GetCachedTimeNs returns the current time in nanoseconds. +// Exported for use by other packages that need fast time access. +func GetCachedTimeNs() int64 { + return getCachedTimeNs() +} + +// Global atomic counter for connection IDs +var connIDCounter uint64 + +// HandoffState represents the atomic state for connection handoffs +// This struct is stored atomically to prevent race conditions between +// checking handoff status and reading handoff parameters +type HandoffState struct { + ShouldHandoff bool // Whether connection should be handed off + Endpoint string // New endpoint for handoff + SeqID int64 // Sequence ID from MOVING notification +} + +// atomicNetConn is a wrapper to ensure consistent typing in atomic.Value +type atomicNetConn struct { + conn net.Conn +} + +// generateConnID generates a fast unique identifier for a connection with zero allocations +func generateConnID() uint64 { + return atomic.AddUint64(&connIDCounter, 1) +} + type Conn struct { - usedAt int64 // atomic - netConn net.Conn + // Connection identifier for unique tracking + id uint64 + + usedAt atomic.Int64 + lastPutAt atomic.Int64 + dialStartNs atomic.Int64 // Time when dial started (for connection create time metric) + + // Lock-free netConn access using atomic.Value + // Contains *atomicNetConn wrapper, accessed atomically for better performance + netConnAtomic atomic.Value // stores *atomicNetConn rd *proto.Reader bw *bufio.Writer wr *proto.Writer - Inited bool + // Lightweight mutex to protect reader operations during handoff + // Only used for the brief period during SetNetConn and HasBufferedData/PeekReplyTypeSafe + readerMu sync.RWMutex + + // State machine for connection state management + // Replaces: usable, Inited, used + // Provides thread-safe state transitions with FIFO waiting queue + // States: CREATED → INITIALIZING → IDLE ⇄ IN_USE + // ↓ + // UNUSABLE (handoff/reauth) + // ↓ + // IDLE/CLOSED + stateMachine *ConnStateMachine + + // Handoff metadata - managed separately from state machine + // These are atomic for lock-free access during handoff operations + handoffStateAtomic atomic.Value // stores *HandoffState + handoffRetriesAtomic atomic.Uint32 // retry counter + pooled bool + pubsub bool + closed atomic.Bool createdAt time.Time + expiresAt time.Time + poolName string // Name of the pool this connection belongs to (for metrics) + + // maintenanceNotifications upgrade support: relaxed timeouts during migrations/failovers + + // Using atomic operations for lock-free access to avoid mutex contention + relaxedReadTimeoutNs atomic.Int64 // time.Duration as nanoseconds + relaxedWriteTimeoutNs atomic.Int64 // time.Duration as nanoseconds + relaxedDeadlineNs atomic.Int64 // time.Time as nanoseconds since epoch + + // Counter to track multiple relaxed timeout setters if we have nested calls + // will be decremented when ClearRelaxedTimeout is called or deadline is reached + // if counter reaches 0, we clear the relaxed timeouts + relaxedCounter atomic.Int32 + + // Connection initialization function for reconnections + initConnFunc func(context.Context, *Conn) error + + onClose func() error } func NewConn(netConn net.Conn) *Conn { + return NewConnWithBufferSize(netConn, proto.DefaultBufferSize, proto.DefaultBufferSize) +} + +func NewConnWithBufferSize(netConn net.Conn, readBufSize, writeBufSize int) *Conn { + now := time.Now() cn := &Conn{ - netConn: netConn, - createdAt: time.Now(), + createdAt: now, + id: generateConnID(), // Generate unique ID for this connection + stateMachine: NewConnStateMachine(), } - cn.rd = proto.NewReader(netConn) - cn.bw = bufio.NewWriter(netConn) + + // Use specified buffer sizes, or fall back to 32KiB defaults if 0 + if readBufSize > 0 { + cn.rd = proto.NewReaderSize(netConn, readBufSize) + } else { + cn.rd = proto.NewReader(netConn) // Uses 32KiB default + } + + if writeBufSize > 0 { + cn.bw = bufio.NewWriterSize(netConn, writeBufSize) + } else { + cn.bw = bufio.NewWriterSize(netConn, proto.DefaultBufferSize) + } + + // Store netConn atomically for lock-free access using wrapper + cn.netConnAtomic.Store(&atomicNetConn{conn: netConn}) + cn.wr = proto.NewWriter(cn.bw) - cn.SetUsedAt(time.Now()) + cn.SetUsedAt(now) + // Initialize handoff state atomically + initialHandoffState := &HandoffState{ + ShouldHandoff: false, + Endpoint: "", + SeqID: 0, + } + cn.handoffStateAtomic.Store(initialHandoffState) return cn } func (cn *Conn) UsedAt() time.Time { - unix := atomic.LoadInt64(&cn.usedAt) - return time.Unix(unix, 0) + return time.Unix(0, cn.usedAt.Load()) +} +func (cn *Conn) SetUsedAt(tm time.Time) { + cn.usedAt.Store(tm.UnixNano()) } -func (cn *Conn) SetUsedAt(tm time.Time) { - atomic.StoreInt64(&cn.usedAt, tm.Unix()) +func (cn *Conn) UsedAtNs() int64 { + return cn.usedAt.Load() +} +func (cn *Conn) SetUsedAtNs(ns int64) { + cn.usedAt.Store(ns) +} + +func (cn *Conn) LastPutAtNs() int64 { + return cn.lastPutAt.Load() +} +func (cn *Conn) SetLastPutAtNs(ns int64) { + cn.lastPutAt.Store(ns) +} + +// GetDialStartNs returns the time when the dial started (in nanoseconds since epoch). +// This is used to calculate the full connection creation time (TCP + handshake). +func (cn *Conn) GetDialStartNs() int64 { + return cn.dialStartNs.Load() +} + +// PoolName returns the name of the pool this connection belongs to. +// This is used for metrics to identify which pool a connection is from. +func (cn *Conn) PoolName() string { + return cn.poolName +} + +// SetPoolName sets the name of the pool this connection belongs to. +// This should be called when the connection is added to a pool. +func (cn *Conn) SetPoolName(name string) { + cn.poolName = name +} + +// Backward-compatible wrapper methods for state machine +// These maintain the existing API while using the new state machine internally + +// CompareAndSwapUsable atomically compares and swaps the usable flag (lock-free). +// +// This is used by background operations (handoff, re-auth) to acquire exclusive +// access to a connection. The operation sets usable to false, preventing the pool +// from returning the connection to clients. +// +// Returns true if the swap was successful (old value matched), false otherwise. +// +// Implementation note: This is a compatibility wrapper around the state machine. +// It checks if the current state is "usable" (IDLE or IN_USE) and transitions accordingly. +// Deprecated: Use GetStateMachine().TryTransition() directly for better state management. +func (cn *Conn) CompareAndSwapUsable(old, new bool) bool { + currentState := cn.stateMachine.GetState() + + // Check if current state matches the "old" usable value + currentUsable := (currentState == StateIdle || currentState == StateInUse) + if currentUsable != old { + return false + } + + // If we're trying to set to the same value, succeed immediately + if old == new { + return true + } + + // Transition based on new value + if new { + // Trying to make usable - transition from UNUSABLE to IDLE + // This should only work from UNUSABLE or INITIALIZING states + // Use predefined slice to avoid allocation + _, err := cn.stateMachine.TryTransition( + validFromInitializingOrUnusable, + StateIdle, + ) + return err == nil + } + // Trying to make unusable - transition from IDLE to UNUSABLE + // This is typically for acquiring the connection for background operations + // Use predefined slice to avoid allocation + _, err := cn.stateMachine.TryTransition( + validFromIdle, + StateUnusable, + ) + return err == nil +} + +// IsUsable returns true if the connection is safe to use for new commands (lock-free). +// +// A connection is "usable" when it's in a stable state and can be returned to clients. +// It becomes unusable during: +// - Handoff operations (network connection replacement) +// - Re-authentication (credential updates) +// - Other background operations that need exclusive access +// +// Note: CREATED state is considered usable because new connections need to pass OnGet() hook +// before initialization. The initialization happens after OnGet() in the client code. +func (cn *Conn) IsUsable() bool { + state := cn.stateMachine.GetState() + // CREATED, IDLE, and IN_USE states are considered usable + // CREATED: new connection, not yet initialized (will be initialized by client) + // IDLE: initialized and ready to be acquired + // IN_USE: usable but currently acquired by someone + return state == StateCreated || state == StateIdle || state == StateInUse +} + +// SetUsable sets the usable flag for the connection (lock-free). +// +// Deprecated: Use GetStateMachine().Transition() directly for better state management. +// This method is kept for backwards compatibility. +// +// This should be called to mark a connection as usable after initialization or +// to release it after a background operation completes. +// +// Prefer CompareAndSwapUsable() when acquiring exclusive access to avoid race conditions. +// Deprecated: Use GetStateMachine().Transition() directly for better state management. +func (cn *Conn) SetUsable(usable bool) { + if usable { + // Transition to IDLE state (ready to be acquired) + cn.stateMachine.Transition(StateIdle) + } else { + // Transition to UNUSABLE state (for background operations) + cn.stateMachine.Transition(StateUnusable) + } +} + +// IsInited returns true if the connection has been initialized. +// This is a backward-compatible wrapper around the state machine. +func (cn *Conn) IsInited() bool { + state := cn.stateMachine.GetState() + // Connection is initialized if it's in IDLE or any post-initialization state + return state != StateCreated && state != StateInitializing && state != StateClosed +} + +// Used - State machine based implementation + +// CompareAndSwapUsed atomically compares and swaps the used flag (lock-free). +// This method is kept for backwards compatibility. +// +// This is the preferred method for acquiring a connection from the pool, as it +// ensures that only one goroutine marks the connection as used. +// +// Implementation: Uses state machine transitions IDLE ⇄ IN_USE +// +// Returns true if the swap was successful (old value matched), false otherwise. +// Deprecated: Use GetStateMachine().TryTransition() directly for better state management. +func (cn *Conn) CompareAndSwapUsed(old, new bool) bool { + if old == new { + // No change needed + currentState := cn.stateMachine.GetState() + currentUsed := (currentState == StateInUse) + return currentUsed == old + } + + if !old && new { + // Acquiring: IDLE → IN_USE + // Use predefined slice to avoid allocation + _, err := cn.stateMachine.TryTransition(validFromCreatedOrIdle, StateInUse) + return err == nil + } else { + // Releasing: IN_USE → IDLE + // Use predefined slice to avoid allocation + _, err := cn.stateMachine.TryTransition(validFromInUse, StateIdle) + return err == nil + } +} + +// IsUsed returns true if the connection is currently in use (lock-free). +// +// Deprecated: Use GetStateMachine().GetState() == StateInUse directly for better clarity. +// This method is kept for backwards compatibility. +// +// A connection is "used" when it has been retrieved from the pool and is +// actively processing a command. Background operations (like re-auth) should +// wait until the connection is not used before executing commands. +func (cn *Conn) IsUsed() bool { + return cn.stateMachine.GetState() == StateInUse +} + +// SetUsed sets the used flag for the connection (lock-free). +// +// This should be called when returning a connection to the pool (set to false) +// or when a single-connection pool retrieves its connection (set to true). +// +// Prefer CompareAndSwapUsed() when acquiring from a multi-connection pool to +// avoid race conditions. +// Deprecated: Use GetStateMachine().Transition() directly for better state management. +func (cn *Conn) SetUsed(val bool) { + if val { + cn.stateMachine.Transition(StateInUse) + } else { + cn.stateMachine.Transition(StateIdle) + } +} + +// getNetConn returns the current network connection using atomic load (lock-free). +// This is the fast path for accessing netConn without mutex overhead. +func (cn *Conn) getNetConn() net.Conn { + if v := cn.netConnAtomic.Load(); v != nil { + if wrapper, ok := v.(*atomicNetConn); ok { + return wrapper.conn + } + } + return nil +} + +// setNetConn stores the network connection atomically (lock-free). +// This is used for the fast path of connection replacement. +func (cn *Conn) setNetConn(netConn net.Conn) { + cn.netConnAtomic.Store(&atomicNetConn{conn: netConn}) +} + +// Handoff state management - atomic access to handoff metadata + +// ShouldHandoff returns true if connection needs handoff (lock-free). +func (cn *Conn) ShouldHandoff() bool { + if v := cn.handoffStateAtomic.Load(); v != nil { + return v.(*HandoffState).ShouldHandoff + } + return false +} + +// GetHandoffEndpoint returns the new endpoint for handoff (lock-free). +func (cn *Conn) GetHandoffEndpoint() string { + if v := cn.handoffStateAtomic.Load(); v != nil { + return v.(*HandoffState).Endpoint + } + return "" +} + +// GetMovingSeqID returns the sequence ID from the MOVING notification (lock-free). +func (cn *Conn) GetMovingSeqID() int64 { + if v := cn.handoffStateAtomic.Load(); v != nil { + return v.(*HandoffState).SeqID + } + return 0 +} + +// GetHandoffInfo returns all handoff information atomically (lock-free). +// This method prevents race conditions by returning all handoff state in a single atomic operation. +// Returns (shouldHandoff, endpoint, seqID). +func (cn *Conn) GetHandoffInfo() (bool, string, int64) { + if v := cn.handoffStateAtomic.Load(); v != nil { + state := v.(*HandoffState) + return state.ShouldHandoff, state.Endpoint, state.SeqID + } + return false, "", 0 +} + +// HandoffRetries returns the current handoff retry count (lock-free). +func (cn *Conn) HandoffRetries() int { + return int(cn.handoffRetriesAtomic.Load()) +} + +// IncrementAndGetHandoffRetries atomically increments and returns handoff retries (lock-free). +func (cn *Conn) IncrementAndGetHandoffRetries(n int) int { + return int(cn.handoffRetriesAtomic.Add(uint32(n))) +} + +// IsPooled returns true if the connection is managed by a pool and will be pooled on Put. +func (cn *Conn) IsPooled() bool { + return cn.pooled +} + +// IsPubSub returns true if the connection is used for PubSub. +func (cn *Conn) IsPubSub() bool { + return cn.pubsub +} + +// SetRelaxedTimeout sets relaxed timeouts for this connection during maintenanceNotifications upgrades. +// These timeouts will be used for all subsequent commands until the deadline expires. +// Uses atomic operations for lock-free access. +// Note: Metrics should be recorded by the caller (notification handler) which has context about +// the notification type and pool name. +func (cn *Conn) SetRelaxedTimeout(readTimeout, writeTimeout time.Duration) { + cn.relaxedCounter.Add(1) + cn.relaxedReadTimeoutNs.Store(int64(readTimeout)) + cn.relaxedWriteTimeoutNs.Store(int64(writeTimeout)) +} + +// SetRelaxedTimeoutWithDeadline sets relaxed timeouts with an expiration deadline. +// After the deadline, timeouts automatically revert to normal values. +// Uses atomic operations for lock-free access. +func (cn *Conn) SetRelaxedTimeoutWithDeadline(readTimeout, writeTimeout time.Duration, deadline time.Time) { + cn.SetRelaxedTimeout(readTimeout, writeTimeout) + cn.relaxedDeadlineNs.Store(deadline.UnixNano()) +} + +// ClearRelaxedTimeout removes relaxed timeouts, returning to normal timeout behavior. +// Uses atomic operations for lock-free access. +func (cn *Conn) ClearRelaxedTimeout() { + // Atomically decrement counter and check if we should clear + newCount := cn.relaxedCounter.Add(-1) + deadlineNs := cn.relaxedDeadlineNs.Load() + if newCount <= 0 && (deadlineNs == 0 || time.Now().UnixNano() >= deadlineNs) { + // Use atomic load to get current value for CAS to avoid stale value race + current := cn.relaxedCounter.Load() + if current <= 0 && cn.relaxedCounter.CompareAndSwap(current, 0) { + cn.clearRelaxedTimeout() + } + } +} + +func (cn *Conn) clearRelaxedTimeout() { + cn.relaxedReadTimeoutNs.Store(0) + cn.relaxedWriteTimeoutNs.Store(0) + cn.relaxedDeadlineNs.Store(0) + cn.relaxedCounter.Store(0) + + // Note: Metrics for timeout unrelaxing are not recorded here because we don't have + // context about which notification type or pool triggered the relaxation. + // In practice, relaxed timeouts expire automatically via deadline, so explicit + // unrelaxing metrics are less critical than the initial relaxation metrics. +} + +// HasRelaxedTimeout returns true if relaxed timeouts are currently active on this connection. +// This checks both the timeout values and the deadline (if set). +// Uses atomic operations for lock-free access. +func (cn *Conn) HasRelaxedTimeout() bool { + // Fast path: no relaxed timeouts are set + if cn.relaxedCounter.Load() <= 0 { + return false + } + + readTimeoutNs := cn.relaxedReadTimeoutNs.Load() + writeTimeoutNs := cn.relaxedWriteTimeoutNs.Load() + + // If no relaxed timeouts are set, return false + if readTimeoutNs <= 0 && writeTimeoutNs <= 0 { + return false + } + + deadlineNs := cn.relaxedDeadlineNs.Load() + // If no deadline is set, relaxed timeouts are active + if deadlineNs == 0 { + return true + } + + // If deadline is set, check if it's still in the future + return time.Now().UnixNano() < deadlineNs +} + +// getEffectiveReadTimeout returns the timeout to use for read operations. +// If relaxed timeout is set and not expired, it takes precedence over the provided timeout. +// This method automatically clears expired relaxed timeouts using atomic operations. +func (cn *Conn) getEffectiveReadTimeout(normalTimeout time.Duration) time.Duration { + readTimeoutNs := cn.relaxedReadTimeoutNs.Load() + + // Fast path: no relaxed timeout set + if readTimeoutNs <= 0 { + return normalTimeout + } + + deadlineNs := cn.relaxedDeadlineNs.Load() + // If no deadline is set, use relaxed timeout + if deadlineNs == 0 { + return time.Duration(readTimeoutNs) + } + + // Use cached time to avoid expensive syscall (max 50ms staleness is acceptable for timeout checks) + nowNs := getCachedTimeNs() + // Check if deadline has passed + if nowNs < deadlineNs { + // Deadline is in the future, use relaxed timeout + return time.Duration(readTimeoutNs) + } else { + // Deadline has passed, clear relaxed timeouts atomically and use normal timeout + newCount := cn.relaxedCounter.Add(-1) + if newCount <= 0 { + internal.Logger.Printf(context.Background(), logs.UnrelaxedTimeoutAfterDeadline(cn.GetID())) + cn.clearRelaxedTimeout() + } + return normalTimeout + } +} + +// getEffectiveWriteTimeout returns the timeout to use for write operations. +// If relaxed timeout is set and not expired, it takes precedence over the provided timeout. +// This method automatically clears expired relaxed timeouts using atomic operations. +func (cn *Conn) getEffectiveWriteTimeout(normalTimeout time.Duration) time.Duration { + writeTimeoutNs := cn.relaxedWriteTimeoutNs.Load() + + // Fast path: no relaxed timeout set + if writeTimeoutNs <= 0 { + return normalTimeout + } + + deadlineNs := cn.relaxedDeadlineNs.Load() + // If no deadline is set, use relaxed timeout + if deadlineNs == 0 { + return time.Duration(writeTimeoutNs) + } + + // Use cached time to avoid expensive syscall (max 50ms staleness is acceptable for timeout checks) + nowNs := getCachedTimeNs() + // Check if deadline has passed + if nowNs < deadlineNs { + // Deadline is in the future, use relaxed timeout + return time.Duration(writeTimeoutNs) + } else { + // Deadline has passed, clear relaxed timeouts atomically and use normal timeout + newCount := cn.relaxedCounter.Add(-1) + if newCount <= 0 { + internal.Logger.Printf(context.Background(), logs.UnrelaxedTimeoutAfterDeadline(cn.GetID())) + cn.clearRelaxedTimeout() + } + return normalTimeout + } +} + +func (cn *Conn) SetOnClose(fn func() error) { + cn.onClose = fn +} + +// SetInitConnFunc sets the connection initialization function to be called on reconnections. +func (cn *Conn) SetInitConnFunc(fn func(context.Context, *Conn) error) { + cn.initConnFunc = fn +} + +// ExecuteInitConn runs the stored connection initialization function if available. +func (cn *Conn) ExecuteInitConn(ctx context.Context) error { + if cn.initConnFunc != nil { + return cn.initConnFunc(ctx, cn) + } + return fmt.Errorf("redis: no initConnFunc set for conn[%d]", cn.GetID()) } func (cn *Conn) SetNetConn(netConn net.Conn) { - cn.netConn = netConn + // Store the new connection atomically first (lock-free) + cn.setNetConn(netConn) + // Protect reader reset operations to avoid data races + // Use write lock since we're modifying the reader state + cn.readerMu.Lock() cn.rd.Reset(netConn) + cn.readerMu.Unlock() + cn.bw.Reset(netConn) } +// GetNetConn safely returns the current network connection using atomic load (lock-free). +// This method is used by the pool for health checks and provides better performance. +func (cn *Conn) GetNetConn() net.Conn { + return cn.getNetConn() +} + +// SetNetConnAndInitConn replaces the underlying connection and executes the initialization. +// This method ensures only one initialization can happen at a time by using atomic state transitions. +// If another goroutine is currently initializing, this will wait for it to complete. +func (cn *Conn) SetNetConnAndInitConn(ctx context.Context, netConn net.Conn) error { + // Wait for and transition to INITIALIZING state - this prevents concurrent initializations + // Valid from states: CREATED (first init), IDLE (reconnect), UNUSABLE (handoff/reauth) + // If another goroutine is initializing, we'll wait for it to finish + // if the context has a deadline, use that, otherwise use the connection read (relaxed) timeout + // which should be set during handoff. If it is not set, use a 5 second default + deadline, ok := ctx.Deadline() + if !ok { + deadline = time.Now().Add(cn.getEffectiveReadTimeout(5 * time.Second)) + } + waitCtx, cancel := context.WithDeadline(ctx, deadline) + defer cancel() + // Use predefined slice to avoid allocation + finalState, err := cn.stateMachine.AwaitAndTransition( + waitCtx, + validFromCreatedIdleOrUnusable, + StateInitializing, + ) + if err != nil { + return fmt.Errorf("cannot initialize connection from state %s: %w", finalState, err) + } + + // Replace the underlying connection + cn.SetNetConn(netConn) + + // Execute initialization + // NOTE: ExecuteInitConn (via baseClient.initConn) will transition to IDLE on success + // or CLOSED on failure. We don't need to do it here. + // NOTE: Initconn returns conn in IDLE state + initErr := cn.ExecuteInitConn(ctx) + if initErr != nil { + // ExecuteInitConn already transitioned to CLOSED, just return the error + return initErr + } + + // ExecuteInitConn already transitioned to IDLE + return nil +} + +// MarkForHandoff marks the connection for handoff due to MOVING notification. +// Returns an error if the connection is already marked for handoff. +// Note: This only sets metadata - the connection state is not changed until OnPut. +// This allows the current user to finish using the connection before handoff. +func (cn *Conn) MarkForHandoff(newEndpoint string, seqID int64) error { + // Check if already marked for handoff + if cn.ShouldHandoff() { + return errAlreadyMarkedForHandoff + } + + // Set handoff metadata atomically + cn.handoffStateAtomic.Store(&HandoffState{ + ShouldHandoff: true, + Endpoint: newEndpoint, + SeqID: seqID, + }) + return nil +} + +// MarkQueuedForHandoff marks the connection as queued for handoff processing. +// This makes the connection unusable until handoff completes. +// This is called from OnPut hook, where the connection is typically in IN_USE state. +// The pool will preserve the UNUSABLE state and not overwrite it with IDLE. +func (cn *Conn) MarkQueuedForHandoff() error { + // Get current handoff state + currentState := cn.handoffStateAtomic.Load() + if currentState == nil { + return errNotMarkedForHandoff + } + + state := currentState.(*HandoffState) + if !state.ShouldHandoff { + return errNotMarkedForHandoff + } + + // Create new state with ShouldHandoff=false but preserve endpoint and seqID + // This prevents the connection from being queued multiple times while still + // allowing the worker to access the handoff metadata + newState := &HandoffState{ + ShouldHandoff: false, + Endpoint: state.Endpoint, // Preserve endpoint for handoff processing + SeqID: state.SeqID, // Preserve seqID for handoff processing + } + + // Atomic compare-and-swap to update state + if !cn.handoffStateAtomic.CompareAndSwap(currentState, newState) { + // State changed between load and CAS - retry or return error + return errHandoffStateChanged + } + + // Transition to UNUSABLE from IN_USE (normal flow), IDLE (edge cases), or CREATED (tests/uninitialized) + // The connection is typically in IN_USE state when OnPut is called (normal Put flow) + // But in some edge cases or tests, it might be in IDLE or CREATED state + // The pool will detect this state change and preserve it (not overwrite with IDLE) + // Use predefined slice to avoid allocation + finalState, err := cn.stateMachine.TryTransition(validFromCreatedInUseOrIdle, StateUnusable) + if err != nil { + // Check if already in UNUSABLE state (race condition or retry) + // ShouldHandoff should be false now, but check just in case + if finalState == StateUnusable && !cn.ShouldHandoff() { + // Already unusable - this is fine, keep the new handoff state + return nil + } + // Restore the original state if transition fails for other reasons + cn.handoffStateAtomic.Store(currentState) + return fmt.Errorf("failed to mark connection as unusable: %w", err) + } + return nil +} + +// GetID returns the unique identifier for this connection. +func (cn *Conn) GetID() uint64 { + return cn.id +} + +// GetStateMachine returns the connection's state machine for advanced state management. +// This is primarily used by internal packages like maintnotifications for handoff processing. +func (cn *Conn) GetStateMachine() *ConnStateMachine { + return cn.stateMachine +} + +// TryAcquire attempts to acquire the connection for use. +// This is an optimized inline method for the hot path (Get operation). +// +// It tries to transition from IDLE -> IN_USE or CREATED -> CREATED. +// Returns true if the connection was successfully acquired, false otherwise. +// The CREATED->CREATED is done so we can keep the state correct for later +// initialization of the connection in initConn. +// +// Performance: This is faster than calling GetStateMachine() + TryTransitionFast() +// +// NOTE: We directly access cn.stateMachine.state here instead of using the state machine's +// methods. This breaks encapsulation but is necessary for performance. +// The IDLE->IN_USE and CREATED->CREATED transitions don't need +// waiter notification, and benchmarks show 1-3% improvement. If the state machine ever +// needs to notify waiters on these transitions, update this to use TryTransitionFast(). +func (cn *Conn) TryAcquire() bool { + // The || operator short-circuits, so only 1 CAS in the common case + return cn.stateMachine.state.CompareAndSwap(uint32(StateIdle), uint32(StateInUse)) || + cn.stateMachine.state.CompareAndSwap(uint32(StateCreated), uint32(StateCreated)) +} + +// Release releases the connection back to the pool. +// This is an optimized inline method for the hot path (Put operation). +// +// It tries to transition from IN_USE -> IDLE. +// Returns true if the connection was successfully released, false otherwise. +// +// Performance: This is faster than calling GetStateMachine() + TryTransitionFast(). +// +// NOTE: We directly access cn.stateMachine.state here instead of using the state machine's +// methods. This breaks encapsulation but is necessary for performance. +// If the state machine ever needs to notify waiters +// on this transition, update this to use TryTransitionFast(). +func (cn *Conn) Release() bool { + // Inline the hot path - single CAS operation + return cn.stateMachine.state.CompareAndSwap(uint32(StateInUse), uint32(StateIdle)) +} + +// ClearHandoffState clears the handoff state after successful handoff. +// Makes the connection usable again. +func (cn *Conn) ClearHandoffState() { + // Clear handoff metadata + cn.handoffStateAtomic.Store(&HandoffState{ + ShouldHandoff: false, + Endpoint: "", + SeqID: 0, + }) + + // Reset retry counter + cn.handoffRetriesAtomic.Store(0) + + // Mark connection as usable again + // Use state machine directly instead of deprecated SetUsable + // probably done by initConn + cn.stateMachine.Transition(StateIdle) +} + +// HasBufferedData safely checks if the connection has buffered data. +// This method is used to avoid data races when checking for push notifications. +func (cn *Conn) HasBufferedData() bool { + // Use read lock for concurrent access to reader state + cn.readerMu.RLock() + defer cn.readerMu.RUnlock() + return cn.rd.Buffered() > 0 +} + +// PeekReplyTypeSafe safely peeks at the reply type. +// This method is used to avoid data races when checking for push notifications. +func (cn *Conn) PeekReplyTypeSafe() (byte, error) { + // Use read lock for concurrent access to reader state + cn.readerMu.RLock() + defer cn.readerMu.RUnlock() + + if cn.rd.Buffered() <= 0 { + return 0, fmt.Errorf("redis: can't peek reply type, no data available") + } + return cn.rd.PeekReplyType() +} + func (cn *Conn) Write(b []byte) (int, error) { - return cn.netConn.Write(b) + // Lock-free netConn access for better performance + if netConn := cn.getNetConn(); netConn != nil { + return netConn.Write(b) + } + return 0, net.ErrClosed } func (cn *Conn) RemoteAddr() net.Addr { - if cn.netConn != nil { - return cn.netConn.RemoteAddr() + // Lock-free netConn access for better performance + if netConn := cn.getNetConn(); netConn != nil { + return netConn.RemoteAddr() } return nil } @@ -67,7 +833,16 @@ func (cn *Conn) WithReader( ctx context.Context, timeout time.Duration, fn func(rd *proto.Reader) error, ) error { if timeout >= 0 { - if err := cn.netConn.SetReadDeadline(cn.deadline(ctx, timeout)); err != nil { + // Use relaxed timeout if set, otherwise use provided timeout + effectiveTimeout := cn.getEffectiveReadTimeout(timeout) + + // Get the connection directly from atomic storage + netConn := cn.getNetConn() + if netConn == nil { + return errConnectionNotAvailable + } + + if err := netConn.SetReadDeadline(cn.deadline(ctx, effectiveTimeout)); err != nil { return err } } @@ -78,13 +853,25 @@ func (cn *Conn) WithWriter( ctx context.Context, timeout time.Duration, fn func(wr *proto.Writer) error, ) error { if timeout >= 0 { - if err := cn.netConn.SetWriteDeadline(cn.deadline(ctx, timeout)); err != nil { - return err + // Use relaxed timeout if set, otherwise use provided timeout + effectiveTimeout := cn.getEffectiveWriteTimeout(timeout) + + // Set write deadline on the connection + if netConn := cn.getNetConn(); netConn != nil { + if err := netConn.SetWriteDeadline(cn.deadline(ctx, effectiveTimeout)); err != nil { + return err + } + } else { + // Connection is not available - return preallocated error + return errConnNotAvailableForWrite } } + // Reset the buffered writer if needed, should not happen if cn.bw.Buffered() > 0 { - cn.bw.Reset(cn.netConn) + if netConn := cn.getNetConn(); netConn != nil { + cn.bw.Reset(netConn) + } } if err := fn(cn.wr); err != nil { @@ -94,13 +881,47 @@ func (cn *Conn) WithWriter( return cn.bw.Flush() } -func (cn *Conn) Close() error { - return cn.netConn.Close() +func (cn *Conn) IsClosed() bool { + return cn.closed.Load() || cn.stateMachine.GetState() == StateClosed } +func (cn *Conn) Close() error { + cn.closed.Store(true) + + // Transition to CLOSED state + cn.stateMachine.Transition(StateClosed) + + if cn.onClose != nil { + // ignore error + _ = cn.onClose() + } + + // Lock-free netConn access for better performance + if netConn := cn.getNetConn(); netConn != nil { + return netConn.Close() + } + return nil +} + +// MaybeHasData tries to peek at the next byte in the socket without consuming it +// This is used to check if there are push notifications available +// Important: This will work on Linux, but not on Windows +func (cn *Conn) MaybeHasData() bool { + // Lock-free netConn access for better performance + if netConn := cn.getNetConn(); netConn != nil { + return maybeHasData(netConn) + } + return false +} + +// deadline computes the effective deadline time based on context and timeout. +// It updates the usedAt timestamp to now. +// Uses cached time to avoid expensive syscall (max 50ms staleness is acceptable for deadline calculation). func (cn *Conn) deadline(ctx context.Context, timeout time.Duration) time.Time { - tm := time.Now() - cn.SetUsedAt(tm) + // Use cached time for deadline calculation (called 2x per command: read + write) + nowNs := getCachedTimeNs() + cn.SetUsedAtNs(nowNs) + tm := time.Unix(0, nowNs) if timeout > 0 { tm = tm.Add(timeout) diff --git a/vendor/github.com/redis/go-redis/v9/internal/pool/conn_check.go b/vendor/github.com/redis/go-redis/v9/internal/pool/conn_check.go index 83190d39..9e83dd83 100644 --- a/vendor/github.com/redis/go-redis/v9/internal/pool/conn_check.go +++ b/vendor/github.com/redis/go-redis/v9/internal/pool/conn_check.go @@ -12,6 +12,9 @@ import ( var errUnexpectedRead = errors.New("unexpected read from socket") +// connCheck checks if the connection is still alive and if there is data in the socket +// it will try to peek at the next byte without consuming it since we may want to work with it +// later on (e.g. push notifications) func connCheck(conn net.Conn) error { // Reset previous timeout. _ = conn.SetDeadline(time.Time{}) @@ -29,7 +32,9 @@ func connCheck(conn net.Conn) error { if err := rawConn.Read(func(fd uintptr) bool { var buf [1]byte - n, err := syscall.Read(int(fd), buf[:]) + // Use MSG_PEEK to peek at data without consuming it + n, _, err := syscall.Recvfrom(int(fd), buf[:], syscall.MSG_PEEK|syscall.MSG_DONTWAIT) + switch { case n == 0 && err == nil: sysErr = io.EOF @@ -47,3 +52,8 @@ func connCheck(conn net.Conn) error { return sysErr } + +// maybeHasData checks if there is data in the socket without consuming it +func maybeHasData(conn net.Conn) bool { + return connCheck(conn) == errUnexpectedRead +} diff --git a/vendor/github.com/redis/go-redis/v9/internal/pool/conn_check_dummy.go b/vendor/github.com/redis/go-redis/v9/internal/pool/conn_check_dummy.go index 295da126..f971d94c 100644 --- a/vendor/github.com/redis/go-redis/v9/internal/pool/conn_check_dummy.go +++ b/vendor/github.com/redis/go-redis/v9/internal/pool/conn_check_dummy.go @@ -2,8 +2,19 @@ package pool -import "net" +import ( + "errors" + "net" +) -func connCheck(conn net.Conn) error { +// errUnexpectedRead is placeholder error variable for non-unix build constraints +var errUnexpectedRead = errors.New("unexpected read from socket") + +func connCheck(_ net.Conn) error { return nil } + +// since we can't check for data on the socket, we just assume there is some +func maybeHasData(_ net.Conn) bool { + return true +} diff --git a/vendor/github.com/redis/go-redis/v9/internal/pool/conn_state.go b/vendor/github.com/redis/go-redis/v9/internal/pool/conn_state.go new file mode 100644 index 00000000..afdc631c --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/internal/pool/conn_state.go @@ -0,0 +1,343 @@ +package pool + +import ( + "container/list" + "context" + "errors" + "fmt" + "sync" + "sync/atomic" +) + +// ConnState represents the connection state in the state machine. +// States are designed to be lightweight and fast to check. +// +// State Transitions: +// +// CREATED → INITIALIZING → IDLE ⇄ IN_USE +// ↓ +// UNUSABLE (handoff/reauth) +// ↓ +// IDLE/CLOSED +type ConnState uint32 + +const ( + // StateCreated - Connection just created, not yet initialized + StateCreated ConnState = iota + + // StateInitializing - Connection initialization in progress + StateInitializing + + // StateIdle - Connection initialized and idle in pool, ready to be acquired + StateIdle + + // StateInUse - Connection actively processing a command (retrieved from pool) + StateInUse + + // StateUnusable - Connection temporarily unusable due to background operation + // (handoff, reauth, etc.). Cannot be acquired from pool. + StateUnusable + + // StateClosed - Connection closed + StateClosed +) + +// Predefined state slices to avoid allocations in hot paths +var ( + validFromInUse = []ConnState{StateInUse} + validFromCreatedOrIdle = []ConnState{StateCreated, StateIdle} + validFromCreatedInUseOrIdle = []ConnState{StateCreated, StateInUse, StateIdle} + // For AwaitAndTransition calls + validFromCreatedIdleOrUnusable = []ConnState{StateCreated, StateIdle, StateUnusable} + validFromIdle = []ConnState{StateIdle} + // For CompareAndSwapUsable + validFromInitializingOrUnusable = []ConnState{StateInitializing, StateUnusable} +) + +// Accessor functions for predefined slices to avoid allocations in external packages +// These return the same slice instance, so they're zero-allocation + +// ValidFromIdle returns a predefined slice containing only StateIdle. +// Use this to avoid allocations when calling AwaitAndTransition or TryTransition. +func ValidFromIdle() []ConnState { + return validFromIdle +} + +// ValidFromCreatedIdleOrUnusable returns a predefined slice for initialization transitions. +// Use this to avoid allocations when calling AwaitAndTransition or TryTransition. +func ValidFromCreatedIdleOrUnusable() []ConnState { + return validFromCreatedIdleOrUnusable +} + +// String returns a human-readable string representation of the state. +func (s ConnState) String() string { + switch s { + case StateCreated: + return "CREATED" + case StateInitializing: + return "INITIALIZING" + case StateIdle: + return "IDLE" + case StateInUse: + return "IN_USE" + case StateUnusable: + return "UNUSABLE" + case StateClosed: + return "CLOSED" + default: + return fmt.Sprintf("UNKNOWN(%d)", s) + } +} + +var ( + // ErrInvalidStateTransition is returned when a state transition is not allowed + ErrInvalidStateTransition = errors.New("invalid state transition") + + // ErrStateMachineClosed is returned when operating on a closed state machine + ErrStateMachineClosed = errors.New("state machine is closed") + + // ErrTimeout is returned when a state transition times out + ErrTimeout = errors.New("state transition timeout") +) + +// waiter represents a goroutine waiting for a state transition. +// Designed for minimal allocations and fast processing. +type waiter struct { + validStates map[ConnState]struct{} // States we're waiting for + targetState ConnState // State to transition to + done chan error // Signaled when transition completes or times out +} + +// ConnStateMachine manages connection state transitions with FIFO waiting queue. +// Optimized for: +// - Lock-free reads (hot path) +// - Minimal allocations +// - Fast state transitions +// - FIFO fairness for waiters +// Note: Handoff metadata (endpoint, seqID, retries) is managed separately in the Conn struct. +type ConnStateMachine struct { + // Current state - atomic for lock-free reads + state atomic.Uint32 + + // FIFO queue for waiters - only locked during waiter add/remove/notify + mu sync.Mutex + waiters *list.List // List of *waiter + waiterCount atomic.Int32 // Fast lock-free check for waiters (avoids mutex in hot path) +} + +// NewConnStateMachine creates a new connection state machine. +// Initial state is StateCreated. +func NewConnStateMachine() *ConnStateMachine { + sm := &ConnStateMachine{ + waiters: list.New(), + } + sm.state.Store(uint32(StateCreated)) + return sm +} + +// GetState returns the current state (lock-free read). +// This is the hot path - optimized for zero allocations and minimal overhead. +// Note: Zero allocations applies to state reads; converting the returned state to a string +// (via String()) may allocate if the state is unknown. +func (sm *ConnStateMachine) GetState() ConnState { + return ConnState(sm.state.Load()) +} + +// TryTransitionFast is an optimized version for the hot path (Get/Put operations). +// It only handles simple state transitions without waiter notification. +// This is safe because: +// 1. Get/Put don't need to wait for state changes +// 2. Background operations (handoff/reauth) use UNUSABLE state, which this won't match +// 3. If a background operation is in progress (state is UNUSABLE), this fails fast +// +// Returns true if transition succeeded, false otherwise. +// Use this for performance-critical paths where you don't need error details. +// +// Performance: Single CAS operation - as fast as the old atomic bool! +// For multiple from states, use: sm.TryTransitionFast(State1, Target) || sm.TryTransitionFast(State2, Target) +// The || operator short-circuits, so only 1 CAS is executed in the common case. +func (sm *ConnStateMachine) TryTransitionFast(fromState, targetState ConnState) bool { + return sm.state.CompareAndSwap(uint32(fromState), uint32(targetState)) +} + +// TryTransition attempts an immediate state transition without waiting. +// Returns the current state after the transition attempt and an error if the transition failed. +// The returned state is the CURRENT state (after the attempt), not the previous state. +// This is faster than AwaitAndTransition when you don't need to wait. +// Uses compare-and-swap to atomically transition, preventing concurrent transitions. +// This method does NOT wait - it fails immediately if the transition cannot be performed. +// +// Performance: Zero allocations on success path (hot path). +func (sm *ConnStateMachine) TryTransition(validFromStates []ConnState, targetState ConnState) (ConnState, error) { + // Try each valid from state with CAS + // This ensures only ONE goroutine can successfully transition at a time + for _, fromState := range validFromStates { + // Try to atomically swap from fromState to targetState + // If successful, we won the race and can proceed + if sm.state.CompareAndSwap(uint32(fromState), uint32(targetState)) { + // Success! We transitioned atomically + // Hot path optimization: only check for waiters if transition succeeded + // This avoids atomic load on every Get/Put when no waiters exist + if sm.waiterCount.Load() > 0 { + sm.notifyWaiters() + } + return targetState, nil + } + } + + // All CAS attempts failed - state is not valid for this transition + // Return the current state so caller can decide what to do + // Note: This error path allocates, but it's the exceptional case + currentState := sm.GetState() + return currentState, fmt.Errorf("%w: cannot transition from %s to %s (valid from: %v)", + ErrInvalidStateTransition, currentState, targetState, validFromStates) +} + +// Transition unconditionally transitions to the target state. +// Use with caution - prefer AwaitAndTransition or TryTransition for safety. +// This is useful for error paths or when you know the transition is valid. +func (sm *ConnStateMachine) Transition(targetState ConnState) { + sm.state.Store(uint32(targetState)) + sm.notifyWaiters() +} + +// AwaitAndTransition waits for the connection to reach one of the valid states, +// then atomically transitions to the target state. +// Returns the current state after the transition attempt and an error if the operation failed. +// The returned state is the CURRENT state (after the attempt), not the previous state. +// Returns error if timeout expires or context is cancelled. +// +// This method implements FIFO fairness - the first caller to wait gets priority +// when the state becomes available. +// +// Performance notes: +// - If already in a valid state, this is very fast (no allocation, no waiting) +// - If waiting is required, allocates one waiter struct and one channel +func (sm *ConnStateMachine) AwaitAndTransition( + ctx context.Context, + validFromStates []ConnState, + targetState ConnState, +) (ConnState, error) { + // Fast path: try immediate transition with CAS to prevent race conditions + // BUT: only if there are no waiters in the queue (to maintain FIFO ordering) + if sm.waiterCount.Load() == 0 { + for _, fromState := range validFromStates { + // Check if we're already in target state + if fromState == targetState && sm.GetState() == targetState { + return targetState, nil + } + + // Try to atomically swap from fromState to targetState + if sm.state.CompareAndSwap(uint32(fromState), uint32(targetState)) { + // Success! We transitioned atomically + sm.notifyWaiters() + return targetState, nil + } + } + } + + // Fast path failed - check if we should wait or fail + currentState := sm.GetState() + + // Check if closed + if currentState == StateClosed { + return currentState, ErrStateMachineClosed + } + + // Slow path: need to wait for state change + // Create waiter with valid states map for fast lookup + validStatesMap := make(map[ConnState]struct{}, len(validFromStates)) + for _, s := range validFromStates { + validStatesMap[s] = struct{}{} + } + + w := &waiter{ + validStates: validStatesMap, + targetState: targetState, + done: make(chan error, 1), // Buffered to avoid goroutine leak + } + + // Add to FIFO queue + sm.mu.Lock() + elem := sm.waiters.PushBack(w) + sm.waiterCount.Add(1) + sm.mu.Unlock() + + // Wait for state change or timeout + select { + case <-ctx.Done(): + // Timeout or cancellation - remove from queue + sm.mu.Lock() + sm.waiters.Remove(elem) + sm.waiterCount.Add(-1) + sm.mu.Unlock() + return sm.GetState(), ctx.Err() + case err := <-w.done: + // Transition completed (or failed) + // Note: waiterCount is decremented either in notifyWaiters (when the waiter is notified and removed) + // or here (on timeout/cancellation). + return sm.GetState(), err + } +} + +// notifyWaiters checks if any waiters can proceed and notifies them in FIFO order. +// This is called after every state transition. +func (sm *ConnStateMachine) notifyWaiters() { + // Fast path: check atomic counter without acquiring lock + // This eliminates mutex overhead in the common case (no waiters) + if sm.waiterCount.Load() == 0 { + return + } + + sm.mu.Lock() + defer sm.mu.Unlock() + + // Double-check after acquiring lock (waiters might have been processed) + if sm.waiters.Len() == 0 { + return + } + + // Process waiters in FIFO order until no more can be processed + // We loop instead of recursing to avoid stack overflow and mutex issues + for { + processed := false + + // Find the first waiter that can proceed + for elem := sm.waiters.Front(); elem != nil; elem = elem.Next() { + w := elem.Value.(*waiter) + + // Read current state inside the loop to get the latest value + currentState := sm.GetState() + + // Check if current state is valid for this waiter + if _, valid := w.validStates[currentState]; valid { + // Remove from queue first + sm.waiters.Remove(elem) + sm.waiterCount.Add(-1) + + // Use CAS to ensure state hasn't changed since we checked + // This prevents race condition where another thread changes state + // between our check and our transition + if sm.state.CompareAndSwap(uint32(currentState), uint32(w.targetState)) { + // Successfully transitioned - notify waiter + w.done <- nil + processed = true + break + } else { + // State changed - re-add waiter to front of queue to maintain FIFO ordering + // This waiter was first in line and should retain priority + sm.waiters.PushFront(w) + sm.waiterCount.Add(1) + // Continue to next iteration to re-read state + processed = true + break + } + } + } + + // If we didn't process any waiter, we're done + if !processed { + break + } + } +} diff --git a/vendor/github.com/redis/go-redis/v9/internal/pool/hooks.go b/vendor/github.com/redis/go-redis/v9/internal/pool/hooks.go new file mode 100644 index 00000000..a26e1976 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/internal/pool/hooks.go @@ -0,0 +1,165 @@ +package pool + +import ( + "context" + "sync" +) + +// PoolHook defines the interface for connection lifecycle hooks. +type PoolHook interface { + // OnGet is called when a connection is retrieved from the pool. + // It can modify the connection or return an error to prevent its use. + // The accept flag can be used to prevent the connection from being used. + // On Accept = false the connection is rejected and returned to the pool. + // The error can be used to prevent the connection from being used and returned to the pool. + // On Errors, the connection is removed from the pool. + // It has isNewConn flag to indicate if this is a new connection (rather than idle from the pool) + // The flag can be used for gathering metrics on pool hit/miss ratio. + OnGet(ctx context.Context, conn *Conn, isNewConn bool) (accept bool, err error) + + // OnPut is called when a connection is returned to the pool. + // It returns whether the connection should be pooled and whether it should be removed. + OnPut(ctx context.Context, conn *Conn) (shouldPool bool, shouldRemove bool, err error) + + // OnRemove is called when a connection is removed from the pool. + // This happens when: + // - Connection fails health check + // - Connection exceeds max lifetime + // - Pool is being closed + // - Connection encounters an error + // Implementations should clean up any per-connection state. + // The reason parameter indicates why the connection was removed. + OnRemove(ctx context.Context, conn *Conn, reason error) +} + +// PoolHookManager manages multiple pool hooks. +type PoolHookManager struct { + hooks []PoolHook + hooksMu sync.RWMutex +} + +// NewPoolHookManager creates a new pool hook manager. +func NewPoolHookManager() *PoolHookManager { + return &PoolHookManager{ + hooks: make([]PoolHook, 0), + } +} + +// AddHook adds a pool hook to the manager. +// Hooks are called in the order they were added. +func (phm *PoolHookManager) AddHook(hook PoolHook) { + phm.hooksMu.Lock() + defer phm.hooksMu.Unlock() + phm.hooks = append(phm.hooks, hook) +} + +// RemoveHook removes a pool hook from the manager. +func (phm *PoolHookManager) RemoveHook(hook PoolHook) { + phm.hooksMu.Lock() + defer phm.hooksMu.Unlock() + + for i, h := range phm.hooks { + if h == hook { + // Remove hook by swapping with last element and truncating + phm.hooks[i] = phm.hooks[len(phm.hooks)-1] + phm.hooks = phm.hooks[:len(phm.hooks)-1] + break + } + } +} + +// ProcessOnGet calls all OnGet hooks in order. +// If any hook returns an error, processing stops and the error is returned. +func (phm *PoolHookManager) ProcessOnGet(ctx context.Context, conn *Conn, isNewConn bool) (acceptConn bool, err error) { + // Copy slice reference while holding lock (fast) + phm.hooksMu.RLock() + hooks := phm.hooks + phm.hooksMu.RUnlock() + + // Call hooks without holding lock (slow operations) + for _, hook := range hooks { + acceptConn, err := hook.OnGet(ctx, conn, isNewConn) + if err != nil { + return false, err + } + + if !acceptConn { + return false, nil + } + } + return true, nil +} + +// ProcessOnPut calls all OnPut hooks in order. +// The first hook that returns shouldRemove=true or shouldPool=false will stop processing. +func (phm *PoolHookManager) ProcessOnPut(ctx context.Context, conn *Conn) (shouldPool bool, shouldRemove bool, err error) { + // Copy slice reference while holding lock (fast) + phm.hooksMu.RLock() + hooks := phm.hooks + phm.hooksMu.RUnlock() + + shouldPool = true // Default to pooling the connection + + // Call hooks without holding lock (slow operations) + for _, hook := range hooks { + hookShouldPool, hookShouldRemove, hookErr := hook.OnPut(ctx, conn) + + if hookErr != nil { + return false, true, hookErr + } + + // If any hook says to remove or not pool, respect that decision + if hookShouldRemove { + return false, true, nil + } + + if !hookShouldPool { + shouldPool = false + } + } + + return shouldPool, false, nil +} + +// ProcessOnRemove calls all OnRemove hooks in order. +func (phm *PoolHookManager) ProcessOnRemove(ctx context.Context, conn *Conn, reason error) { + // Copy slice reference while holding lock (fast) + phm.hooksMu.RLock() + hooks := phm.hooks + phm.hooksMu.RUnlock() + + // Call hooks without holding lock (slow operations) + for _, hook := range hooks { + hook.OnRemove(ctx, conn, reason) + } +} + +// GetHookCount returns the number of registered hooks (for testing). +func (phm *PoolHookManager) GetHookCount() int { + phm.hooksMu.RLock() + defer phm.hooksMu.RUnlock() + return len(phm.hooks) +} + +// GetHooks returns a copy of all registered hooks. +func (phm *PoolHookManager) GetHooks() []PoolHook { + phm.hooksMu.RLock() + defer phm.hooksMu.RUnlock() + + hooks := make([]PoolHook, len(phm.hooks)) + copy(hooks, phm.hooks) + return hooks +} + +// Clone creates a copy of the hook manager with the same hooks. +// This is used for lock-free atomic updates of the hook manager. +func (phm *PoolHookManager) Clone() *PoolHookManager { + phm.hooksMu.RLock() + defer phm.hooksMu.RUnlock() + + newManager := &PoolHookManager{ + hooks: make([]PoolHook, len(phm.hooks)), + } + copy(newManager.hooks, phm.hooks) + return newManager +} diff --git a/vendor/github.com/redis/go-redis/v9/internal/pool/pool.go b/vendor/github.com/redis/go-redis/v9/internal/pool/pool.go index 986c05d0..aaca530c 100644 --- a/vendor/github.com/redis/go-redis/v9/internal/pool/pool.go +++ b/vendor/github.com/redis/go-redis/v9/internal/pool/pool.go @@ -9,6 +9,8 @@ import ( "time" "github.com/redis/go-redis/v9/internal" + "github.com/redis/go-redis/v9/internal/proto" + "github.com/redis/go-redis/v9/internal/rand" ) var ( @@ -21,25 +23,221 @@ var ( // ErrPoolTimeout timed out waiting to get a connection from the connection pool. ErrPoolTimeout = errors.New("redis: connection pool timeout") + + // ErrConnUnusableTimeout is returned when a connection is not usable and we timed out trying to mark it as unusable. + ErrConnUnusableTimeout = errors.New("redis: timed out trying to mark connection as unusable") + + // errHookRequestedRemoval is returned when a hook requests connection removal. + errHookRequestedRemoval = errors.New("hook requested removal") + + // errConnNotPooled is returned when trying to return a non-pooled connection to the pool. + errConnNotPooled = errors.New("connection not pooled") + // metricCallbackMu protects all global metric callback functions for thread-safe access. + metricCallbackMu sync.RWMutex + + // Global metric callbacks for connection state changes + metricConnectionStateChangeCallback func(ctx context.Context, cn *Conn, fromState, toState string) + + // Global metric callback for connection creation time + metricConnectionCreateTimeCallback func(ctx context.Context, duration time.Duration, cn *Conn) + + // Global metric callback for connection relaxed timeout changes + // Parameters: ctx, delta (+1/-1), cn, poolName, notificationType + metricConnectionRelaxedTimeoutCallback func(ctx context.Context, delta int, cn *Conn, poolName, notificationType string) + + // Global metric callback for connection handoff + // Parameters: ctx, cn, poolName + metricConnectionHandoffCallback func(ctx context.Context, cn *Conn, poolName string) + + // Global metric callback for error tracking + // Parameters: ctx, errorType, cn, statusCode, isInternal, retryAttempts + metricErrorCallback func(ctx context.Context, errorType string, cn *Conn, statusCode string, isInternal bool, retryAttempts int) + + // Global metric callback for maintenance notifications + // Parameters: ctx, cn, notificationType + metricMaintenanceNotificationCallback func(ctx context.Context, cn *Conn, notificationType string) + + // Global metric callback for connection wait time + // Parameters: ctx, duration, cn + metricConnectionWaitTimeCallback func(ctx context.Context, duration time.Duration, cn *Conn) + + // Global metric callback for connection timeouts + // Parameters: ctx, cn, timeoutType + metricConnectionTimeoutCallback func(ctx context.Context, cn *Conn, timeoutType string) + + // Global metric callback for connection closed + // Parameters: ctx, cn, reason, err + metricConnectionClosedCallback func(ctx context.Context, cn *Conn, reason string, err error) + + // errPanicInDial is returned when a panic occurs in the dial function. + errPanicInQueuedNewConn = errors.New("panic in queuedNewConn") + + // popAttempts is the maximum number of attempts to find a usable connection + // when popping from the idle connection pool. This handles cases where connections + // are temporarily marked as unusable (e.g., during maintenanceNotifications upgrades or network issues). + // Value of 50 provides sufficient resilience without excessive overhead. + // This is capped by the idle connection count, so we won't loop excessively. + popAttempts = 50 + + // getAttempts is the maximum number of attempts to get a connection that passes + // hook validation (e.g., maintenanceNotifications upgrade hooks). This protects against race conditions + // where hooks might temporarily reject connections during cluster transitions. + // Value of 3 balances resilience with performance - most hook rejections resolve quickly. + getAttempts = 3 + + minTime = time.Unix(-2208988800, 0) // Jan 1, 1900 + maxTime = minTime.Add(1<<63 - 1) + noExpiration = maxTime ) -var timers = sync.Pool{ - New: func() interface{} { - t := time.NewTimer(time.Hour) - t.Stop() - return t - }, +// MetricCallbacks holds all metric callback functions. +// Use SetAllMetricCallbacks to register all callbacks atomically. +type MetricCallbacks struct { + // ConnectionCreateTime is called when a new connection is created + ConnectionCreateTime func(ctx context.Context, duration time.Duration, cn *Conn) + + // ConnectionRelaxedTimeout is called when connection timeout is relaxed/unrelaxed + // delta: +1 for relaxed, -1 for unrelaxed + ConnectionRelaxedTimeout func(ctx context.Context, delta int, cn *Conn, poolName, notificationType string) + + // ConnectionHandoff is called when a connection is handed off to another node + ConnectionHandoff func(ctx context.Context, cn *Conn, poolName string) + + // Error is called when an error occurs + Error func(ctx context.Context, errorType string, cn *Conn, statusCode string, isInternal bool, retryAttempts int) + + // MaintenanceNotification is called when a maintenance notification is received + MaintenanceNotification func(ctx context.Context, cn *Conn, notificationType string) + + // ConnectionWaitTime is called to record time spent waiting for a connection + ConnectionWaitTime func(ctx context.Context, duration time.Duration, cn *Conn) + + // ConnectionClosed is called when a connection is closed + ConnectionClosed func(ctx context.Context, cn *Conn, reason string, err error) +} + +// SetAllMetricCallbacks sets all metric callbacks atomically. +// Pass nil to clear all callbacks (disable metrics). +// This ensures all callbacks are set together under a single lock, +// preventing inconsistent state during registration. +// +// Note on thread safety: After returning, there is a small window where +// concurrent getMetric* calls may return the old callback value. This is +// acceptable for metrics - at most one event may go to the old recorder +// or be missed during the transition. The callbacks themselves are immutable +// function pointers, so calling an "old" callback is safe. +func SetAllMetricCallbacks(callbacks *MetricCallbacks) { + metricCallbackMu.Lock() + defer metricCallbackMu.Unlock() + + if callbacks == nil { + metricConnectionCreateTimeCallback = nil + metricConnectionRelaxedTimeoutCallback = nil + metricConnectionHandoffCallback = nil + metricErrorCallback = nil + metricMaintenanceNotificationCallback = nil + metricConnectionWaitTimeCallback = nil + metricConnectionClosedCallback = nil + return + } + + metricConnectionCreateTimeCallback = callbacks.ConnectionCreateTime + metricConnectionRelaxedTimeoutCallback = callbacks.ConnectionRelaxedTimeout + metricConnectionHandoffCallback = callbacks.ConnectionHandoff + metricErrorCallback = callbacks.Error + metricMaintenanceNotificationCallback = callbacks.MaintenanceNotification + metricConnectionWaitTimeCallback = callbacks.ConnectionWaitTime + metricConnectionClosedCallback = callbacks.ConnectionClosed +} + +// getMetricConnectionStateChangeCallback returns the metric callback for connection state changes. +func getMetricConnectionStateChangeCallback() func(ctx context.Context, cn *Conn, fromState, toState string) { + metricCallbackMu.RLock() + cb := metricConnectionStateChangeCallback + metricCallbackMu.RUnlock() + return cb +} + +// GetMetricConnectionCreateTimeCallback returns the metric callback for connection creation time. +func GetMetricConnectionCreateTimeCallback() func(ctx context.Context, duration time.Duration, cn *Conn) { + metricCallbackMu.RLock() + cb := metricConnectionCreateTimeCallback + metricCallbackMu.RUnlock() + return cb +} + +// GetMetricConnectionRelaxedTimeoutCallback returns the metric callback for connection relaxed timeout changes. +// This is used by maintnotifications to record relaxed timeout metrics. +func GetMetricConnectionRelaxedTimeoutCallback() func(ctx context.Context, delta int, cn *Conn, poolName, notificationType string) { + metricCallbackMu.RLock() + cb := metricConnectionRelaxedTimeoutCallback + metricCallbackMu.RUnlock() + return cb +} + +// GetMetricConnectionHandoffCallback returns the metric callback for connection handoffs. +// This is used by maintnotifications to record handoff metrics. +func GetMetricConnectionHandoffCallback() func(ctx context.Context, cn *Conn, poolName string) { + metricCallbackMu.RLock() + cb := metricConnectionHandoffCallback + metricCallbackMu.RUnlock() + return cb +} + +// GetMetricErrorCallback returns the metric callback for error tracking. +// This is used by cluster and client code to record error metrics. +func GetMetricErrorCallback() func(ctx context.Context, errorType string, cn *Conn, statusCode string, isInternal bool, retryAttempts int) { + metricCallbackMu.RLock() + cb := metricErrorCallback + metricCallbackMu.RUnlock() + return cb +} + +// GetMetricMaintenanceNotificationCallback returns the metric callback for maintenance notifications. +// This is used by maintnotifications to record notification metrics. +func GetMetricMaintenanceNotificationCallback() func(ctx context.Context, cn *Conn, notificationType string) { + metricCallbackMu.RLock() + cb := metricMaintenanceNotificationCallback + metricCallbackMu.RUnlock() + return cb +} + +func getMetricConnectionWaitTimeCallback() func(ctx context.Context, duration time.Duration, cn *Conn) { + metricCallbackMu.RLock() + cb := metricConnectionWaitTimeCallback + metricCallbackMu.RUnlock() + return cb +} + +func getMetricConnectionTimeoutCallback() func(ctx context.Context, cn *Conn, timeoutType string) { + metricCallbackMu.RLock() + cb := metricConnectionTimeoutCallback + metricCallbackMu.RUnlock() + return cb +} + +func getMetricConnectionClosedCallback() func(ctx context.Context, cn *Conn, reason string, err error) { + metricCallbackMu.RLock() + cb := metricConnectionClosedCallback + metricCallbackMu.RUnlock() + return cb } // Stats contains pool state information and accumulated stats. type Stats struct { - Hits uint32 // number of times free connection was found in the pool - Misses uint32 // number of times free connection was NOT found in the pool - Timeouts uint32 // number of times a wait timeout occurred + Hits uint32 // number of times free connection was found in the pool + Misses uint32 // number of times free connection was NOT found in the pool + Timeouts uint32 // number of times a wait timeout occurred + WaitCount uint32 // number of times a connection was waited + Unusable uint32 // number of times a connection was found to be unusable + WaitDurationNs int64 // total time spent for waiting a connection in nanoseconds - TotalConns uint32 // number of total connections in the pool - IdleConns uint32 // number of idle connections in the pool - StaleConns uint32 // number of stale connections removed from the pool + TotalConns uint32 // number of total connections in the pool + IdleConns uint32 // number of idle connections in the pool + StaleConns uint32 // number of stale connections removed from the pool + PendingRequests uint32 // number of pending requests waiting for a connection + + PubSubStats PubSubStats } type Pooler interface { @@ -54,20 +252,51 @@ type Pooler interface { IdleLen() int Stats() *Stats + // Size returns the maximum pool size (capacity). + // This is used by the streaming credentials manager to size the re-auth worker pool. + Size() int + + AddPoolHook(hook PoolHook) + RemovePoolHook(hook PoolHook) + + // RemoveWithoutTurn removes a connection from the pool without freeing a turn. + // This should be used when removing a connection from a context that didn't acquire + // a turn via Get() (e.g., background workers, cleanup tasks). + // For normal removal after Get(), use Remove() instead. + RemoveWithoutTurn(context.Context, *Conn, error) + Close() error } type Options struct { - Dialer func(context.Context) (net.Conn, error) + Dialer func(context.Context) (net.Conn, error) + ReadBufferSize int + WriteBufferSize int - PoolFIFO bool - PoolSize int - PoolTimeout time.Duration - MinIdleConns int - MaxIdleConns int - MaxActiveConns int - ConnMaxIdleTime time.Duration - ConnMaxLifetime time.Duration + PoolFIFO bool + PoolSize int32 + MaxConcurrentDials int + DialTimeout time.Duration + PoolTimeout time.Duration + MinIdleConns int32 + MaxIdleConns int32 + MaxActiveConns int32 + ConnMaxIdleTime time.Duration + ConnMaxLifetime time.Duration + ConnMaxLifetimeJitter time.Duration + PushNotificationsEnabled bool + + // DialerRetries is the maximum number of retry attempts when dialing fails. + // Default: 5 + DialerRetries int + + // DialerRetryTimeout is the backoff duration between retry attempts. + // Default: 100ms + DialerRetryTimeout time.Duration + + // Name is a unique identifier for this pool, used in metrics. + // Format: addr_uniqueID (e.g., "localhost:6379_a1b2c3d4") + Name string } type lastDialErrorWrap struct { @@ -80,71 +309,165 @@ type ConnPool struct { dialErrorsNum uint32 // atomic lastDialError atomic.Value - queue chan struct{} + queue chan struct{} + dialsInProgress chan struct{} + dialsQueue *wantConnQueue + // Fast semaphore for connection limiting with eventual fairness + // Uses fast path optimization to avoid timer allocation when tokens are available + semaphore *internal.FastSemaphore connsMu sync.Mutex - conns []*Conn + conns map[uint64]*Conn idleConns []*Conn - poolSize int - idleConnsLen int + poolSize atomic.Int32 + idleConnsLen atomic.Int32 + idleCheckInProgress atomic.Bool + idleCheckNeeded atomic.Bool - stats Stats + stats Stats + waitDurationNs atomic.Int64 _closed uint32 // atomic + + // Pool hooks manager for flexible connection processing + // Using atomic.Pointer for lock-free reads in hot paths (Get/Put) + hookManager atomic.Pointer[PoolHookManager] } var _ Pooler = (*ConnPool)(nil) func NewConnPool(opt *Options) *ConnPool { p := &ConnPool{ - cfg: opt, - - queue: make(chan struct{}, opt.PoolSize), - conns: make([]*Conn, 0, opt.PoolSize), - idleConns: make([]*Conn, 0, opt.PoolSize), + cfg: opt, + semaphore: internal.NewFastSemaphore(opt.PoolSize), + queue: make(chan struct{}, opt.PoolSize), + conns: make(map[uint64]*Conn), + dialsInProgress: make(chan struct{}, opt.MaxConcurrentDials), + dialsQueue: newWantConnQueue(), + idleConns: make([]*Conn, 0, opt.PoolSize), } - p.connsMu.Lock() - p.checkMinIdleConns() - p.connsMu.Unlock() + // Only create MinIdleConns if explicitly requested (> 0) + // This avoids creating connections during pool initialization for tests + if opt.MinIdleConns > 0 { + p.connsMu.Lock() + p.checkMinIdleConns() + p.connsMu.Unlock() + } return p } +// initializeHooks sets up the pool hooks system. +func (p *ConnPool) initializeHooks() { + manager := NewPoolHookManager() + p.hookManager.Store(manager) +} + +// AddPoolHook adds a pool hook to the pool. +func (p *ConnPool) AddPoolHook(hook PoolHook) { + // Lock-free read of current manager + manager := p.hookManager.Load() + if manager == nil { + p.initializeHooks() + manager = p.hookManager.Load() + } + + // Create new manager with added hook + newManager := manager.Clone() + newManager.AddHook(hook) + + // Atomically swap to new manager + p.hookManager.Store(newManager) +} + +// RemovePoolHook removes a pool hook from the pool. +func (p *ConnPool) RemovePoolHook(hook PoolHook) { + manager := p.hookManager.Load() + if manager != nil { + // Create new manager with removed hook + newManager := manager.Clone() + newManager.RemoveHook(hook) + + // Atomically swap to new manager + p.hookManager.Store(newManager) + } +} + func (p *ConnPool) checkMinIdleConns() { - if p.cfg.MinIdleConns == 0 { + // If a check is already in progress, mark that we need another check and return + if !p.idleCheckInProgress.CompareAndSwap(false, true) { + p.idleCheckNeeded.Store(true) return } - for p.poolSize < p.cfg.PoolSize && p.idleConnsLen < p.cfg.MinIdleConns { - select { - case p.queue <- struct{}{}: - p.poolSize++ - p.idleConnsLen++ + if p.cfg.MinIdleConns == 0 { + p.idleCheckInProgress.Store(false) + return + } + + // Keep checking until no more checks are needed + // This handles the case where multiple Remove() calls happen concurrently + for { + // Clear the "check needed" flag before we start + p.idleCheckNeeded.Store(false) + + // Only create idle connections if we haven't reached the total pool size limit + // MinIdleConns should be a subset of PoolSize, not additional connections + for p.poolSize.Load() < p.cfg.PoolSize && p.idleConnsLen.Load() < p.cfg.MinIdleConns { + // Try to acquire a semaphore token + if !p.semaphore.TryAcquire() { + // Semaphore is full, can't create more connections right now + // Break out of inner loop to check if we need to retry + break + } + + p.poolSize.Add(1) + p.idleConnsLen.Add(1) go func() { + defer func() { + if err := recover(); err != nil { + p.poolSize.Add(-1) + p.idleConnsLen.Add(-1) + + p.freeTurn() + internal.Logger.Printf(context.Background(), "addIdleConn panic: %+v", err) + } + }() + err := p.addIdleConn() if err != nil && err != ErrClosed { - p.connsMu.Lock() - p.poolSize-- - p.idleConnsLen-- - p.connsMu.Unlock() + p.poolSize.Add(-1) + p.idleConnsLen.Add(-1) } - p.freeTurn() }() - default: + } + + // If no one requested another check while we were working, we're done + if !p.idleCheckNeeded.Load() { + p.idleCheckInProgress.Store(false) return } + + // Otherwise, loop again to handle the new requests } } func (p *ConnPool) addIdleConn() error { - cn, err := p.dialConn(context.TODO(), true) + ctx, cancel := context.WithTimeout(context.Background(), p.cfg.DialTimeout) + defer cancel() + + cn, err := p.dialConn(ctx, true) if err != nil { return err } + // NOTE: Connection is in CREATED state and will be initialized by redis.go:initConn() + // when first acquired from the pool. Do NOT transition to IDLE here - that happens + // after initialization completes. + p.connsMu.Lock() defer p.connsMu.Unlock() @@ -154,11 +477,15 @@ func (p *ConnPool) addIdleConn() error { return ErrClosed } - p.conns = append(p.conns, cn) + p.conns[cn.GetID()] = cn p.idleConns = append(p.idleConns, cn) return nil } +// NewConn creates a new connection and returns it to the user. +// This will still obey MaxActiveConns but will not include it in the pool and won't increase the pool size. +// +// NOTE: If you directly get a connection from the pool, it won't be pooled and won't support maintnotifications upgrades. func (p *ConnPool) NewConn(ctx context.Context) (*Conn, error) { return p.newConn(ctx, false) } @@ -168,28 +495,59 @@ func (p *ConnPool) newConn(ctx context.Context, pooled bool) (*Conn, error) { return nil, ErrClosed } - if p.cfg.MaxActiveConns > 0 && p.poolSize >= p.cfg.MaxActiveConns { + if p.cfg.MaxActiveConns > 0 && p.poolSize.Load() >= p.cfg.MaxActiveConns { return nil, ErrPoolExhausted } - cn, err := p.dialConn(ctx, pooled) + // Protect against nil context due to race condition in queuedNewConn + // where the context can be set to nil after timeout/cancellation + if ctx == nil { + ctx = context.Background() + } + + dialCtx, cancel := context.WithTimeout(ctx, p.cfg.DialTimeout) + defer cancel() + cn, err := p.dialConn(dialCtx, pooled) if err != nil { return nil, err } + // NOTE: Connection is in CREATED state and will be initialized by redis.go:initConn() + // when first used. Do NOT transition to IDLE here - that happens after initialization completes. + // The state machine flow is: CREATED → INITIALIZING (in initConn) → IDLE (after init success) + + if p.cfg.MaxActiveConns > 0 && p.poolSize.Load() > p.cfg.MaxActiveConns { + _ = cn.Close() + return nil, ErrPoolExhausted + } + p.connsMu.Lock() defer p.connsMu.Unlock() + if p.closed() { + _ = cn.Close() + return nil, ErrClosed + } + // Check if pool was closed while we were waiting for the lock + if p.conns == nil { + p.conns = make(map[uint64]*Conn) + } + p.conns[cn.GetID()] = cn - p.conns = append(p.conns, cn) if pooled { // If pool is full remove the cn on next Put. - if p.poolSize >= p.cfg.PoolSize { + currentPoolSize := p.poolSize.Load() + if currentPoolSize >= p.cfg.PoolSize { cn.pooled = false } else { - p.poolSize++ + p.poolSize.Add(1) } } + // Notify metrics: new connection created and idle + if cb := getMetricConnectionStateChangeCallback(); cb != nil { + cb(ctx, cn, "", "idle") + } + return cn, nil } @@ -202,18 +560,86 @@ func (p *ConnPool) dialConn(ctx context.Context, pooled bool) (*Conn, error) { return nil, p.getLastDialError() } - netConn, err := p.cfg.Dialer(ctx) - if err != nil { - p.setLastDialError(err) - if atomic.AddUint32(&p.dialErrorsNum, 1) == uint32(p.cfg.PoolSize) { - go p.tryDial() - } - return nil, err + // Record dial start time for connection creation metric + // This will be used after handshake completes in redis.go _getConn() + // Only call time.Now() if callback is registered to avoid overhead + var dialStartNs int64 + if GetMetricConnectionCreateTimeCallback() != nil { + dialStartNs = time.Now().UnixNano() } - cn := NewConn(netConn) - cn.pooled = pooled - return cn, nil + // Retry dialing with backoff + // the context timeout is already handled by the context passed in + // so we may never reach the max retries, higher values don't hurt + maxRetries := p.cfg.DialerRetries + if maxRetries <= 0 { + maxRetries = 5 // Default value + } + backoffDuration := p.cfg.DialerRetryTimeout + if backoffDuration <= 0 { + backoffDuration = 100 * time.Millisecond // Default value + } + + var lastErr error + shouldLoop := true + // when the timeout is reached, we should stop retrying + // but keep the lastErr to return to the caller + // instead of a generic context deadline exceeded error + attempt := 0 + for attempt = 0; (attempt < maxRetries) && shouldLoop; attempt++ { + netConn, err := p.cfg.Dialer(ctx) + if err != nil { + lastErr = err + // Add backoff delay for retry attempts + // (not for the first attempt, do at least one) + select { + case <-ctx.Done(): + shouldLoop = false + case <-time.After(backoffDuration): + // Continue with retry + } + continue + } + + cn := NewConnWithBufferSize(netConn, p.cfg.ReadBufferSize, p.cfg.WriteBufferSize) + cn.pooled = pooled + // Store dial start time only if we recorded it + if dialStartNs > 0 { + cn.dialStartNs.Store(dialStartNs) + } + cn.expiresAt = p.calcConnExpiresAt() + // Set pool name for metrics + cn.SetPoolName(p.cfg.Name) + + return cn, nil + } + + internal.Logger.Printf(ctx, "redis: connection pool: failed to dial after %d attempts: %v", attempt, lastErr) + // All retries failed - handle error tracking + p.setLastDialError(lastErr) + if atomic.AddUint32(&p.dialErrorsNum, 1) == uint32(p.cfg.PoolSize) { + go p.tryDial() + } + return nil, lastErr +} + +// calcConnExpiresAt calculates the expiration time for a connection. +// It applies random jitter to prevent all connections from expiring simultaneously, +// avoiding the "thundering herd" problem where all connections expire at once. +// Returns noExpiration if ConnMaxLifetime is not set. +func (p *ConnPool) calcConnExpiresAt() time.Time { + if p.cfg.ConnMaxLifetime <= 0 { + return noExpiration + } + + if p.cfg.ConnMaxLifetimeJitter <= 0 { + return time.Now().Add(p.cfg.ConnMaxLifetime) + } + + jitter := p.cfg.ConnMaxLifetimeJitter + jitterRange := jitter.Nanoseconds() * 2 + jitterNs := rand.Int63n(jitterRange) - jitter.Nanoseconds() + return time.Now().Add(p.cfg.ConnMaxLifetime + time.Duration(jitterNs)) } func (p *ConnPool) tryDial() { @@ -222,15 +648,19 @@ func (p *ConnPool) tryDial() { return } - conn, err := p.cfg.Dialer(context.Background()) + ctx, cancel := context.WithTimeout(context.Background(), p.cfg.DialTimeout) + + conn, err := p.cfg.Dialer(ctx) if err != nil { p.setLastDialError(err) time.Sleep(time.Second) + cancel() continue } atomic.StoreUint32(&p.dialErrorsNum, 0) _ = conn.Close() + cancel() return } } @@ -249,17 +679,59 @@ func (p *ConnPool) getLastDialError() error { // Get returns existed connection from the pool or creates a new one. func (p *ConnPool) Get(ctx context.Context) (*Conn, error) { + return p.getConn(ctx) +} + +// getConn returns a connection from the pool. +func (p *ConnPool) getConn(ctx context.Context) (cn *Conn, err error) { if p.closed() { return nil, ErrClosed } - if err := p.waitTurn(ctx); err != nil { + // Track pending requests in pool stats + // NOTE: We only track in stats, not via callback. The AsyncGauge reads stats directly. + atomic.AddUint32(&p.stats.PendingRequests, 1) + defer func() { + if err != nil { + // Failed to get connection, decrement pending requests + atomic.AddUint32(&p.stats.PendingRequests, ^uint32(0)) // -1 + } + }() + + // Track wait time - only call time.Now() if callback is registered + var waitStart time.Time + waitTimeCallback := getMetricConnectionWaitTimeCallback() + if waitTimeCallback != nil { + waitStart = time.Now() + } + if err = p.waitTurn(ctx); err != nil { + // Record timeout if applicable + if err == ErrPoolTimeout { + if cb := getMetricConnectionTimeoutCallback(); cb != nil { + cb(ctx, nil, "pool") + } + // Record general error metric for pool timeout + if cb := GetMetricErrorCallback(); cb != nil { + cb(ctx, "POOL_TIMEOUT", nil, "POOL_TIMEOUT", true, 0) + } + } return nil, err } + var waitDuration time.Duration + if waitTimeCallback != nil { + waitDuration = time.Since(waitStart) + } + + // Use cached time for health checks (max 50ms staleness is acceptable) + nowNs := getCachedTimeNs() + + // Lock-free atomic read - no mutex overhead! + hookManager := p.hookManager.Load() + + for attempts := 0; attempts < getAttempts; attempts++ { - for { p.connsMu.Lock() - cn, err := p.popIdle() + cn, err = p.popIdle() p.connsMu.Unlock() if err != nil { @@ -271,127 +743,472 @@ func (p *ConnPool) Get(ctx context.Context) (*Conn, error) { break } - if !p.isHealthyConn(cn) { + if !p.isHealthyConn(cn, nowNs) { _ = p.CloseConn(cn) continue } + // Process connection using the hooks system + // Combine error and rejection checks to reduce branches + if hookManager != nil { + acceptConn, hookErr := hookManager.ProcessOnGet(ctx, cn, false) + if hookErr != nil || !acceptConn { + if hookErr != nil { + internal.Logger.Printf(ctx, "redis: connection pool: failed to process idle connection by hook: %v", hookErr) + _ = p.CloseConn(cn) + } else { + internal.Logger.Printf(ctx, "redis: connection pool: conn[%d] rejected by hook, returning to pool", cn.GetID()) + // Return connection to pool without freeing the turn that this Get() call holds. + // We use putConnWithoutTurn() to run all the Put hooks and logic without freeing a turn. + p.putConnWithoutTurn(ctx, cn) + cn = nil + } + continue + } + } + atomic.AddUint32(&p.stats.Hits, 1) + + // Notify metrics: connection moved from idle to used + if cb := getMetricConnectionStateChangeCallback(); cb != nil { + cb(ctx, cn, "idle", "used") + } + + // Record wait time (use cached callback from above) + if waitTimeCallback != nil { + waitTimeCallback(ctx, waitDuration, cn) + } + + // Decrement pending requests (connection acquired successfully) + // NOTE: We only track in stats, not via callback. The AsyncGauge reads stats directly. + atomic.AddUint32(&p.stats.PendingRequests, ^uint32(0)) // -1 + return cn, nil } atomic.AddUint32(&p.stats.Misses, 1) - newcn, err := p.newConn(ctx, true) + var newcn *Conn + newcn, err = p.queuedNewConn(ctx) if err != nil { - p.freeTurn() return nil, err } + // Process connection using the hooks system + // This includes the handshake (HELLO/AUTH) via initConn hook + if hookManager != nil { + var acceptConn bool + acceptConn, err = hookManager.ProcessOnGet(ctx, newcn, true) + // both errors and accept=false mean a hook rejected the connection + // this should not happen with a new connection, but we handle it gracefully + if err != nil || !acceptConn { + // Failed to process connection, discard it + internal.Logger.Printf(ctx, "redis: connection pool: failed to process new connection conn[%d] by hook: accept=%v, err=%v", newcn.GetID(), acceptConn, err) + _ = p.CloseConn(newcn) + return nil, err + } + } + + // Notify metrics: new connection is created and used + if cb := getMetricConnectionStateChangeCallback(); cb != nil { + cb(ctx, newcn, "", "used") + } + + // Record wait time (use cached callback from above) + if waitTimeCallback != nil { + waitTimeCallback(ctx, waitDuration, newcn) + } + + // Decrement pending requests (connection acquired successfully) + // NOTE: We only track in stats, not via callback. The AsyncGauge reads stats directly. + atomic.AddUint32(&p.stats.PendingRequests, ^uint32(0)) // -1 + return newcn, nil } -func (p *ConnPool) waitTurn(ctx context.Context) error { +func (p *ConnPool) queuedNewConn(ctx context.Context) (*Conn, error) { select { + case p.dialsInProgress <- struct{}{}: + // Got permission, proceed to create connection case <-ctx.Done(): - return ctx.Err() - default: + p.freeTurn() + return nil, ctx.Err() } - select { - case p.queue <- struct{}{}: - return nil - default: - } + dialCtx, cancel := context.WithTimeout(context.Background(), p.cfg.DialTimeout) - timer := timers.Get().(*time.Timer) - timer.Reset(p.cfg.PoolTimeout) + w := &wantConn{ + ctx: dialCtx, + cancelCtx: cancel, + result: make(chan wantConnResult, 1), + } + var err error + defer func() { + if err != nil { + if cn := w.cancel(); cn != nil && p.putIdleConn(ctx, cn) { + p.freeTurn() + } + } + }() + + p.dialsQueue.discardDoneAtFront() + p.dialsQueue.enqueue(w) + + go func(w *wantConn) { + var freeTurnCalled bool + defer func() { + if err := recover(); err != nil { + w.tryDeliver(nil, errPanicInQueuedNewConn) + p.dialsQueue.discardDoneAtFront() + if !freeTurnCalled { + p.freeTurn() + } + internal.Logger.Printf(context.Background(), "queuedNewConn panic: %+v", err) + } + }() + + defer w.cancelCtx() + defer func() { <-p.dialsInProgress }() // Release connection creation permission + + dialCtx := w.getCtxForDial() + cn, cnErr := p.newConn(dialCtx, true) + if cnErr != nil { + w.tryDeliver(nil, cnErr) // deliver error to caller, notify connection creation failed + p.dialsQueue.discardDoneAtFront() + p.freeTurn() + freeTurnCalled = true + return + } + + delivered := w.tryDeliver(cn, cnErr) + p.dialsQueue.discardDoneAtFront() + if !delivered && p.putIdleConn(dialCtx, cn) { + p.freeTurn() + freeTurnCalled = true + } + }(w) select { case <-ctx.Done(): - if !timer.Stop() { - <-timer.C - } - timers.Put(timer) - return ctx.Err() - case p.queue <- struct{}{}: - if !timer.Stop() { - <-timer.C - } - timers.Put(timer) - return nil - case <-timer.C: - timers.Put(timer) - atomic.AddUint32(&p.stats.Timeouts, 1) - return ErrPoolTimeout + err = ctx.Err() + return nil, err + case result := <-w.result: + err = result.err + return result.cn, err } } +// putIdleConn puts a connection back to the pool or passes it to the next waiting request. +// +// It returns true if the connection was put back to the pool, +// which means the turn needs to be freed directly by the caller, +// or false if the connection was passed to the next waiting request, +// which means the turn will be freed by the waiting goroutine after it returns. +func (p *ConnPool) putIdleConn(ctx context.Context, cn *Conn) bool { + for { + w, ok := p.dialsQueue.dequeue() + if !ok { + break + } + if w.tryDeliver(cn, nil) { + return false + } + } + + p.connsMu.Lock() + defer p.connsMu.Unlock() + + if p.closed() { + _ = cn.Close() + return true + } + + // poolSize is increased in newConn + p.idleConns = append(p.idleConns, cn) + p.idleConnsLen.Add(1) + + return true +} + +func (p *ConnPool) waitTurn(ctx context.Context) error { + // Fast path: check context first + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + // Fast path: try to acquire without blocking + if p.semaphore.TryAcquire() { + return nil + } + + // Slow path: need to wait + start := time.Now() + err := p.semaphore.Acquire(ctx, p.cfg.PoolTimeout, ErrPoolTimeout) + + switch err { + case nil: + // Successfully acquired after waiting + p.waitDurationNs.Add(time.Now().UnixNano() - start.UnixNano()) + atomic.AddUint32(&p.stats.WaitCount, 1) + case ErrPoolTimeout: + atomic.AddUint32(&p.stats.Timeouts, 1) + } + + return err +} + func (p *ConnPool) freeTurn() { - <-p.queue + p.semaphore.Release() } func (p *ConnPool) popIdle() (*Conn, error) { if p.closed() { return nil, ErrClosed } + defer p.checkMinIdleConns() + n := len(p.idleConns) if n == 0 { return nil, nil } var cn *Conn - if p.cfg.PoolFIFO { - cn = p.idleConns[0] - copy(p.idleConns, p.idleConns[1:]) - p.idleConns = p.idleConns[:n-1] - } else { - idx := n - 1 - cn = p.idleConns[idx] - p.idleConns = p.idleConns[:idx] + attempts := 0 + + maxAttempts := min(popAttempts, n) + for attempts < maxAttempts { + if len(p.idleConns) == 0 { + return nil, nil + } + + if p.cfg.PoolFIFO { + cn = p.idleConns[0] + copy(p.idleConns, p.idleConns[1:]) + p.idleConns = p.idleConns[:len(p.idleConns)-1] + } else { + idx := len(p.idleConns) - 1 + cn = p.idleConns[idx] + p.idleConns = p.idleConns[:idx] + } + attempts++ + + // Hot path optimization: try IDLE → IN_USE or CREATED → IN_USE transition + // Using inline TryAcquire() method for better performance (avoids pointer dereference) + if cn.TryAcquire() { + // Successfully acquired the connection + p.idleConnsLen.Add(-1) + break + } + + // Connection is in UNUSABLE, INITIALIZING, or other state - skip it + + // Connection is not in a valid state (might be UNUSABLE for handoff/re-auth, INITIALIZING, etc.) + // Put it back in the pool and try the next one + if p.cfg.PoolFIFO { + // FIFO: put at end (will be picked up last since we pop from front) + p.idleConns = append(p.idleConns, cn) + } else { + // LIFO: put at beginning (will be picked up last since we pop from end) + p.idleConns = append([]*Conn{cn}, p.idleConns...) + } + cn = nil } - p.idleConnsLen-- - p.checkMinIdleConns() + + // If we exhausted all attempts without finding a usable connection, return nil + if attempts > 1 && attempts >= maxAttempts && int32(attempts) >= p.poolSize.Load() { + internal.Logger.Printf(context.Background(), "redis: connection pool: failed to get a usable connection after %d attempts", attempts) + return nil, nil + } + return cn, nil } func (p *ConnPool) Put(ctx context.Context, cn *Conn) { - if cn.rd.Buffered() > 0 { - internal.Logger.Printf(ctx, "Conn has unread data") - p.Remove(ctx, cn, BadConnError{}) + p.putConn(ctx, cn, true) +} + +// putConnWithoutTurn is an internal method that puts a connection back to the pool +// without freeing a turn. This is used when returning a rejected connection from +// within Get(), where the turn is still held by the Get() call. +func (p *ConnPool) putConnWithoutTurn(ctx context.Context, cn *Conn) { + p.putConn(ctx, cn, false) +} + +// putConn is the internal implementation of Put that optionally frees a turn. +func (p *ConnPool) putConn(ctx context.Context, cn *Conn, freeTurn bool) { + // Guard against nil connection + if cn == nil { + internal.Logger.Printf(ctx, "putConn called with nil connection") + if freeTurn { + p.freeTurn() + } + return + } + + // Process connection using the hooks system + shouldPool := true + shouldRemove := false + var err error + + if cn.HasBufferedData() { + // Peek at the reply type to check if it's a push notification + if replyType, err := cn.PeekReplyTypeSafe(); err != nil || replyType != proto.RespPush { + // Not a push notification or error peeking, remove connection + internal.Logger.Printf(ctx, "Conn has unread data (not push notification), removing it") + p.removeConnInternal(ctx, cn, err, freeTurn) + return + } + // It's a push notification, allow pooling (client will handle it) + } + + // Lock-free atomic read - no mutex overhead! + hookManager := p.hookManager.Load() + + if hookManager != nil { + shouldPool, shouldRemove, err = hookManager.ProcessOnPut(ctx, cn) + if err != nil { + internal.Logger.Printf(ctx, "Connection hook error: %v", err) + p.removeConnInternal(ctx, cn, err, freeTurn) + return + } + } + + // Combine all removal checks into one - reduces branches + if shouldRemove || !shouldPool { + p.removeConnInternal(ctx, cn, errHookRequestedRemoval, freeTurn) return } if !cn.pooled { - p.Remove(ctx, cn, nil) + p.removeConnInternal(ctx, cn, errConnNotPooled, freeTurn) return } var shouldCloseConn bool - p.connsMu.Lock() + if p.cfg.MaxIdleConns == 0 || p.idleConnsLen.Load() < p.cfg.MaxIdleConns { + // Hot path optimization: try fast IN_USE → IDLE transition + // Using inline Release() method for better performance (avoids pointer dereference) + transitionedToIdle := cn.Release() - if p.cfg.MaxIdleConns == 0 || p.idleConnsLen < p.cfg.MaxIdleConns { - p.idleConns = append(p.idleConns, cn) - p.idleConnsLen++ + // Handle unexpected state changes + if !transitionedToIdle { + // Fast path failed - hook might have changed state (e.g., to UNUSABLE for handoff) + // Keep the state set by the hook and pool the connection anyway + sm := cn.GetStateMachine() + if sm == nil { + // State machine is nil - connection is in an invalid state, remove it + internal.Logger.Printf(ctx, "conn[%d] has nil state machine, removing it", cn.GetID()) + p.removeConnInternal(ctx, cn, errConnNotPooled, freeTurn) + return + } + currentState := sm.GetState() + switch currentState { + case StateUnusable: + // expected state, don't log it + case StateClosed: + internal.Logger.Printf(ctx, "Unexpected conn[%d] state changed by hook to %v, closing it", cn.GetID(), currentState) + shouldCloseConn = true + p.removeConnWithLock(cn) + default: + // Pool as-is + internal.Logger.Printf(ctx, "Unexpected conn[%d] state changed by hook to %v, pooling as-is", cn.GetID(), currentState) + } + } + + // unusable conns are expected to become usable at some point (background process is reconnecting them) + // put them at the opposite end of the queue + // Optimization: if we just transitioned to IDLE, we know it's usable - skip the check + if !transitionedToIdle && !cn.IsUsable() { + if p.cfg.PoolFIFO { + p.connsMu.Lock() + p.idleConns = append(p.idleConns, cn) + p.connsMu.Unlock() + } else { + p.connsMu.Lock() + p.idleConns = append([]*Conn{cn}, p.idleConns...) + p.connsMu.Unlock() + } + p.idleConnsLen.Add(1) + } else if !shouldCloseConn { + p.connsMu.Lock() + p.idleConns = append(p.idleConns, cn) + p.connsMu.Unlock() + p.idleConnsLen.Add(1) + } + + // Notify metrics: connection moved from used to idle + if cb := getMetricConnectionStateChangeCallback(); cb != nil { + cb(ctx, cn, "used", "idle") + } } else { - p.removeConn(cn) shouldCloseConn = true + p.removeConnWithLock(cn) + + // Notify metrics: connection removed (used -> nothing) + if cb := getMetricConnectionStateChangeCallback(); cb != nil { + cb(ctx, cn, "used", "") + } } - p.connsMu.Unlock() - - p.freeTurn() + if freeTurn { + p.freeTurn() + } if shouldCloseConn { _ = p.closeConn(cn) } + + cn.SetLastPutAtNs(getCachedTimeNs()) } -func (p *ConnPool) Remove(_ context.Context, cn *Conn, reason error) { +func (p *ConnPool) Remove(ctx context.Context, cn *Conn, reason error) { + p.removeConnInternal(ctx, cn, reason, true) +} + +// RemoveWithoutTurn removes a connection from the pool without freeing a turn. +// This should be used when removing a connection from a context that didn't acquire +// a turn via Get() (e.g., background workers, cleanup tasks). +// For normal removal after Get(), use Remove() instead. +func (p *ConnPool) RemoveWithoutTurn(ctx context.Context, cn *Conn, reason error) { + p.removeConnInternal(ctx, cn, reason, false) +} + +// removeConnInternal is the internal implementation of Remove that optionally frees a turn. +func (p *ConnPool) removeConnInternal(ctx context.Context, cn *Conn, reason error, freeTurn bool) { + // Lock-free atomic read - no mutex overhead! + hookManager := p.hookManager.Load() + + if hookManager != nil { + hookManager.ProcessOnRemove(ctx, cn, reason) + } + p.removeConnWithLock(cn) - p.freeTurn() + + if freeTurn { + p.freeTurn() + } + + // Notify metrics: connection removed (assume from used state) + if cb := getMetricConnectionStateChangeCallback(); cb != nil { + cb(ctx, cn, "used", "") + } + + // Record connection closed + if cb := getMetricConnectionClosedCallback(); cb != nil { + reasonStr := "unknown" + if reason != nil { + reasonStr = reason.Error() + } + cb(ctx, cn, reasonStr, reason) + } + _ = p.closeConn(cn) + + // Check if we need to create new idle connections to maintain MinIdleConns + p.checkMinIdleConns() } func (p *ConnPool) CloseConn(cn *Conn) error { @@ -406,17 +1223,22 @@ func (p *ConnPool) removeConnWithLock(cn *Conn) { } func (p *ConnPool) removeConn(cn *Conn) { - for i, c := range p.conns { - if c == cn { - p.conns = append(p.conns[:i], p.conns[i+1:]...) - if cn.pooled { - p.poolSize-- - p.checkMinIdleConns() + cid := cn.GetID() + delete(p.conns, cid) + atomic.AddUint32(&p.stats.StaleConns, 1) + + // Decrement pool size counter when removing a connection + if cn.pooled { + p.poolSize.Add(-1) + // this can be idle conn + for idx, ic := range p.idleConns { + if ic == cn { + p.idleConns = append(p.idleConns[:idx], p.idleConns[idx+1:]...) + p.idleConnsLen.Add(-1) + break } - break } } - atomic.AddUint32(&p.stats.StaleConns, 1) } func (p *ConnPool) closeConn(cn *Conn) error { @@ -434,16 +1256,28 @@ func (p *ConnPool) Len() int { // IdleLen returns number of idle connections. func (p *ConnPool) IdleLen() int { p.connsMu.Lock() - n := p.idleConnsLen + n := p.idleConnsLen.Load() p.connsMu.Unlock() - return n + return int(n) +} + +// Size returns the maximum pool size (capacity). +// +// This is used by the streaming credentials manager to size the re-auth worker pool, +// ensuring that re-auth operations don't exhaust the connection pool. +func (p *ConnPool) Size() int { + return int(p.cfg.PoolSize) } func (p *ConnPool) Stats() *Stats { return &Stats{ - Hits: atomic.LoadUint32(&p.stats.Hits), - Misses: atomic.LoadUint32(&p.stats.Misses), - Timeouts: atomic.LoadUint32(&p.stats.Timeouts), + Hits: atomic.LoadUint32(&p.stats.Hits), + Misses: atomic.LoadUint32(&p.stats.Misses), + Timeouts: atomic.LoadUint32(&p.stats.Timeouts), + WaitCount: atomic.LoadUint32(&p.stats.WaitCount), + Unusable: atomic.LoadUint32(&p.stats.Unusable), + WaitDurationNs: p.waitDurationNs.Load(), + PendingRequests: atomic.LoadUint32(&p.stats.PendingRequests), TotalConns: uint32(p.Len()), IdleConns: uint32(p.IdleLen()), @@ -483,28 +1317,62 @@ func (p *ConnPool) Close() error { } } p.conns = nil - p.poolSize = 0 + p.poolSize.Store(0) p.idleConns = nil - p.idleConnsLen = 0 + p.idleConnsLen.Store(0) p.connsMu.Unlock() return firstErr } -func (p *ConnPool) isHealthyConn(cn *Conn) bool { - now := time.Now() +func (p *ConnPool) isHealthyConn(cn *Conn, nowNs int64) bool { + // Performance optimization: check conditions from cheapest to most expensive, + // and from most likely to fail to least likely to fail. - if p.cfg.ConnMaxLifetime > 0 && now.Sub(cn.createdAt) >= p.cfg.ConnMaxLifetime { - return false + // Only fails if ConnMaxLifetime is set AND connection is old. + // Most pools don't set ConnMaxLifetime, so this rarely fails. + if p.cfg.ConnMaxLifetime > 0 { + if cn.expiresAt.UnixNano() < nowNs { + return false // Connection has exceeded max lifetime + } } - if p.cfg.ConnMaxIdleTime > 0 && now.Sub(cn.UsedAt()) >= p.cfg.ConnMaxIdleTime { + + // Most pools set ConnMaxIdleTime, and idle connections are common. + // Checking this first allows us to fail fast without expensive syscalls. + if p.cfg.ConnMaxIdleTime > 0 { + if nowNs-cn.UsedAtNs() >= int64(p.cfg.ConnMaxIdleTime) { + return false // Connection has been idle too long + } + } + + // Only run this if the cheap checks passed. + if err := connCheck(cn.getNetConn()); err != nil { + // If there's unexpected data, it might be push notifications (RESP3) + if p.cfg.PushNotificationsEnabled && err == errUnexpectedRead { + // Peek at the reply type to check if it's a push notification + if replyType, err := cn.rd.PeekReplyType(); err == nil && replyType == proto.RespPush { + // For RESP3 connections with push notifications, we allow some buffered data + // The client will process these notifications before using the connection + internal.Logger.Printf( + context.Background(), + "push: conn[%d] has buffered data, likely push notifications - will be processed by client", + cn.GetID(), + ) + + // Update timestamp for healthy connection + cn.SetUsedAtNs(nowNs) + + // Connection is healthy, client will handle notifications + return true + } + // Not a push notification - treat as unhealthy + return false + } + // Connection failed health check return false } - if connCheck(cn.netConn) != nil { - return false - } - - cn.SetUsedAt(now) + // Only update UsedAt if connection is healthy (avoids unnecessary atomic store) + cn.SetUsedAtNs(nowNs) return true } diff --git a/vendor/github.com/redis/go-redis/v9/internal/pool/pool_single.go b/vendor/github.com/redis/go-redis/v9/internal/pool/pool_single.go index 5a3fde19..365219a5 100644 --- a/vendor/github.com/redis/go-redis/v9/internal/pool/pool_single.go +++ b/vendor/github.com/redis/go-redis/v9/internal/pool/pool_single.go @@ -1,7 +1,13 @@ package pool -import "context" +import ( + "context" + "time" +) +// SingleConnPool is a pool that always returns the same connection. +// Note: This pool is not thread-safe. +// It is intended to be used by clients that need a single connection. type SingleConnPool struct { pool Pooler cn *Conn @@ -10,6 +16,12 @@ type SingleConnPool struct { var _ Pooler = (*SingleConnPool)(nil) +// NewSingleConnPool creates a new single connection pool. +// The pool will always return the same connection. +// The pool will not: +// - Close the connection +// - Reconnect the connection +// - Track the connection in any way func NewSingleConnPool(pool Pooler, cn *Conn) *SingleConnPool { return &SingleConnPool{ pool: pool, @@ -25,20 +37,47 @@ func (p *SingleConnPool) CloseConn(cn *Conn) error { return p.pool.CloseConn(cn) } -func (p *SingleConnPool) Get(ctx context.Context) (*Conn, error) { +func (p *SingleConnPool) Get(_ context.Context) (*Conn, error) { if p.stickyErr != nil { return nil, p.stickyErr } + if p.cn == nil { + return nil, ErrClosed + } + + // NOTE: SingleConnPool is NOT thread-safe by design and is used in special scenarios: + // - During initialization (connection is in INITIALIZING state) + // - During re-authentication (connection is in UNUSABLE state) + // - For transactions (connection might be in various states) + // We use SetUsed() which forces the transition, rather than TryTransition() which + // would fail if the connection is not in IDLE/CREATED state. + p.cn.SetUsed(true) + p.cn.SetUsedAt(time.Now()) return p.cn, nil } -func (p *SingleConnPool) Put(ctx context.Context, cn *Conn) {} +func (p *SingleConnPool) Put(_ context.Context, cn *Conn) { + if p.cn == nil { + return + } + if p.cn != cn { + return + } + p.cn.SetUsed(false) +} -func (p *SingleConnPool) Remove(ctx context.Context, cn *Conn, reason error) { +func (p *SingleConnPool) Remove(_ context.Context, cn *Conn, reason error) { + cn.SetUsed(false) p.cn = nil p.stickyErr = reason } +// RemoveWithoutTurn has the same behavior as Remove for SingleConnPool +// since SingleConnPool doesn't use a turn-based queue system. +func (p *SingleConnPool) RemoveWithoutTurn(ctx context.Context, cn *Conn, reason error) { + p.Remove(ctx, cn, reason) +} + func (p *SingleConnPool) Close() error { p.cn = nil p.stickyErr = ErrClosed @@ -53,6 +92,13 @@ func (p *SingleConnPool) IdleLen() int { return 0 } +// Size returns the maximum pool size, which is always 1 for SingleConnPool. +func (p *SingleConnPool) Size() int { return 1 } + func (p *SingleConnPool) Stats() *Stats { return &Stats{} } + +func (p *SingleConnPool) AddPoolHook(_ PoolHook) {} + +func (p *SingleConnPool) RemovePoolHook(_ PoolHook) {} diff --git a/vendor/github.com/redis/go-redis/v9/internal/pool/pool_sticky.go b/vendor/github.com/redis/go-redis/v9/internal/pool/pool_sticky.go index 3adb99bc..be869b56 100644 --- a/vendor/github.com/redis/go-redis/v9/internal/pool/pool_sticky.go +++ b/vendor/github.com/redis/go-redis/v9/internal/pool/pool_sticky.go @@ -123,6 +123,12 @@ func (p *StickyConnPool) Remove(ctx context.Context, cn *Conn, reason error) { p.ch <- cn } +// RemoveWithoutTurn has the same behavior as Remove for StickyConnPool +// since StickyConnPool doesn't use a turn-based queue system. +func (p *StickyConnPool) RemoveWithoutTurn(ctx context.Context, cn *Conn, reason error) { + p.Remove(ctx, cn, reason) +} + func (p *StickyConnPool) Close() error { if shared := atomic.AddInt32(&p.shared, -1); shared > 0 { return nil @@ -196,6 +202,13 @@ func (p *StickyConnPool) IdleLen() int { return len(p.ch) } +// Size returns the maximum pool size, which is always 1 for StickyConnPool. +func (p *StickyConnPool) Size() int { return 1 } + func (p *StickyConnPool) Stats() *Stats { return &Stats{} } + +func (p *StickyConnPool) AddPoolHook(hook PoolHook) {} + +func (p *StickyConnPool) RemovePoolHook(hook PoolHook) {} diff --git a/vendor/github.com/redis/go-redis/v9/internal/pool/pubsub.go b/vendor/github.com/redis/go-redis/v9/internal/pool/pubsub.go new file mode 100644 index 00000000..e566d42b --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/internal/pool/pubsub.go @@ -0,0 +1,81 @@ +package pool + +import ( + "context" + "net" + "sync" + "sync/atomic" +) + +type PubSubStats struct { + Created uint32 + Untracked uint32 + Active uint32 +} + +// PubSubPool manages a pool of PubSub connections. +type PubSubPool struct { + opt *Options + netDialer func(ctx context.Context, network, addr string) (net.Conn, error) + + // Map to track active PubSub connections + activeConns sync.Map // map[uint64]*Conn (connID -> conn) + closed atomic.Bool + stats PubSubStats +} + +// NewPubSubPool implements a pool for PubSub connections. +// It intentionally does not implement the Pooler interface +func NewPubSubPool(opt *Options, netDialer func(ctx context.Context, network, addr string) (net.Conn, error)) *PubSubPool { + return &PubSubPool{ + opt: opt, + netDialer: netDialer, + } +} + +func (p *PubSubPool) NewConn(ctx context.Context, network string, addr string, channels []string) (*Conn, error) { + if p.closed.Load() { + return nil, ErrClosed + } + + netConn, err := p.netDialer(ctx, network, addr) + if err != nil { + return nil, err + } + cn := NewConnWithBufferSize(netConn, p.opt.ReadBufferSize, p.opt.WriteBufferSize) + cn.pubsub = true + // Set pool name for metrics + cn.SetPoolName(p.opt.Name) + atomic.AddUint32(&p.stats.Created, 1) + return cn, nil +} + +func (p *PubSubPool) TrackConn(cn *Conn) { + atomic.AddUint32(&p.stats.Active, 1) + p.activeConns.Store(cn.GetID(), cn) +} + +func (p *PubSubPool) UntrackConn(cn *Conn) { + atomic.AddUint32(&p.stats.Active, ^uint32(0)) + atomic.AddUint32(&p.stats.Untracked, 1) + p.activeConns.Delete(cn.GetID()) +} + +func (p *PubSubPool) Close() error { + p.closed.Store(true) + p.activeConns.Range(func(key, value interface{}) bool { + cn := value.(*Conn) + _ = cn.Close() + return true + }) + return nil +} + +func (p *PubSubPool) Stats() *PubSubStats { + // load stats atomically + return &PubSubStats{ + Created: atomic.LoadUint32(&p.stats.Created), + Untracked: atomic.LoadUint32(&p.stats.Untracked), + Active: atomic.LoadUint32(&p.stats.Active), + } +} diff --git a/vendor/github.com/redis/go-redis/v9/internal/pool/want_conn.go b/vendor/github.com/redis/go-redis/v9/internal/pool/want_conn.go new file mode 100644 index 00000000..78f86813 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/internal/pool/want_conn.go @@ -0,0 +1,115 @@ +package pool + +import ( + "context" + "sync" +) + +type wantConn struct { + mu sync.RWMutex // protects ctx, done and sending of the result + ctx context.Context // context for dial, cleared after delivered or canceled + cancelCtx context.CancelFunc + done bool // true after delivered or canceled + result chan wantConnResult // channel to deliver connection or error +} + +// getCtxForDial returns context for dial or nil if connection was delivered or canceled. +func (w *wantConn) getCtxForDial() context.Context { + w.mu.RLock() + defer w.mu.RUnlock() + + return w.ctx +} + +func (w *wantConn) tryDeliver(cn *Conn, err error) bool { + w.mu.Lock() + defer w.mu.Unlock() + if w.done { + return false + } + + w.done = true + w.ctx = nil + + w.result <- wantConnResult{cn: cn, err: err} + close(w.result) + + return true +} + +func (w *wantConn) cancel() *Conn { + w.mu.Lock() + var cn *Conn + if w.done { + select { + case result := <-w.result: + cn = result.cn + default: + } + } else { + close(w.result) + } + + w.done = true + w.ctx = nil + w.mu.Unlock() + + return cn +} + +func (w *wantConn) isOngoing() bool { + w.mu.RLock() + defer w.mu.RUnlock() + return !w.done +} + +type wantConnResult struct { + cn *Conn + err error +} + +type wantConnQueue struct { + mu sync.RWMutex + items []*wantConn +} + +func newWantConnQueue() *wantConnQueue { + return &wantConnQueue{ + items: make([]*wantConn, 0), + } +} + +func (q *wantConnQueue) enqueue(w *wantConn) { + q.mu.Lock() + defer q.mu.Unlock() + q.items = append(q.items, w) +} + +func (q *wantConnQueue) dequeue() (*wantConn, bool) { + q.mu.Lock() + defer q.mu.Unlock() + + if len(q.items) == 0 { + return nil, false + } + + item := q.items[0] + q.items = q.items[1:] + return item, true +} + +func (q *wantConnQueue) discardDoneAtFront() int { + q.mu.Lock() + defer q.mu.Unlock() + count := 0 + for len(q.items) > 0 { + if q.items[0].isOngoing() { + break + } + + q.items = q.items[1:] + count++ + } + + return count +} diff --git a/vendor/github.com/redis/go-redis/v9/internal/proto/reader.go b/vendor/github.com/redis/go-redis/v9/internal/proto/reader.go index 8d23817f..bac68f79 100644 --- a/vendor/github.com/redis/go-redis/v9/internal/proto/reader.go +++ b/vendor/github.com/redis/go-redis/v9/internal/proto/reader.go @@ -12,6 +12,9 @@ import ( "github.com/redis/go-redis/v9/internal/util" ) +// DefaultBufferSize is the default size for read/write buffers (32 KiB). +const DefaultBufferSize = 32 * 1024 + // redis resp protocol data type. const ( RespStatus = '+' // +\r\n @@ -47,7 +50,8 @@ func (e RedisError) Error() string { return string(e) } func (RedisError) RedisError() {} func ParseErrorReply(line []byte) error { - return RedisError(line[1:]) + msg := string(line[1:]) + return parseTypedRedisError(msg) } //------------------------------------------------------------------------------ @@ -58,7 +62,13 @@ type Reader struct { func NewReader(rd io.Reader) *Reader { return &Reader{ - rd: bufio.NewReader(rd), + rd: bufio.NewReaderSize(rd, DefaultBufferSize), + } +} + +func NewReaderSize(rd io.Reader, size int) *Reader { + return &Reader{ + rd: bufio.NewReaderSize(rd, size), } } @@ -90,6 +100,92 @@ func (r *Reader) PeekReplyType() (byte, error) { return b[0], nil } +func (r *Reader) PeekPushNotificationName() (string, error) { + // "prime" the buffer by peeking at the next byte + c, err := r.Peek(1) + if err != nil { + return "", err + } + if c[0] != RespPush { + return "", fmt.Errorf("redis: can't peek push notification name, next reply is not a push notification") + } + + // peek 36 bytes at most, should be enough to read the push notification name + toPeek := 36 + buffered := r.Buffered() + if buffered == 0 { + return "", fmt.Errorf("redis: can't peek push notification name, no data available") + } + if buffered < toPeek { + toPeek = buffered + } + buf, err := r.rd.Peek(toPeek) + if err != nil { + return "", err + } + if buf[0] != RespPush { + return "", fmt.Errorf("redis: can't parse push notification: %q", buf) + } + + if len(buf) < 3 { + return "", fmt.Errorf("redis: can't parse push notification: %q", buf) + } + + // remove push notification type + buf = buf[1:] + // remove first line - e.g. >2\r\n + for i := 0; i < len(buf)-1; i++ { + if buf[i] == '\r' && buf[i+1] == '\n' { + buf = buf[i+2:] + break + } else { + if buf[i] < '0' || buf[i] > '9' { + return "", fmt.Errorf("redis: can't parse push notification: %q", buf) + } + } + } + if len(buf) < 2 { + return "", fmt.Errorf("redis: can't parse push notification: %q", buf) + } + // next line should be $\r\n or +\r\n + // should have the type of the push notification name and it's length + if buf[0] != RespString && buf[0] != RespStatus { + return "", fmt.Errorf("redis: can't parse push notification name: %q", buf) + } + typeOfName := buf[0] + // remove the type of the push notification name + buf = buf[1:] + if typeOfName == RespString { + // remove the length of the string + if len(buf) < 2 { + return "", fmt.Errorf("redis: can't parse push notification name: %q", buf) + } + for i := 0; i < len(buf)-1; i++ { + if buf[i] == '\r' && buf[i+1] == '\n' { + buf = buf[i+2:] + break + } else { + if buf[i] < '0' || buf[i] > '9' { + return "", fmt.Errorf("redis: can't parse push notification name: %q", buf) + } + } + } + } + + if len(buf) < 2 { + return "", fmt.Errorf("redis: can't parse push notification name: %q", buf) + } + // keep only the notification name + for i := 0; i < len(buf)-1; i++ { + if buf[i] == '\r' && buf[i+1] == '\n' { + buf = buf[:i] + break + } + } + + return util.BytesToString(buf), nil +} + // ReadLine Return a valid reply, it will check the protocol or redis error, // and discard the attribute type. func (r *Reader) ReadLine() ([]byte, error) { @@ -106,7 +202,7 @@ func (r *Reader) ReadLine() ([]byte, error) { var blobErr string blobErr, err = r.readStringReply(line) if err == nil { - err = RedisError(blobErr) + err = parseTypedRedisError(blobErr) } return nil, err case RespAttr: diff --git a/vendor/github.com/redis/go-redis/v9/internal/proto/redis_errors.go b/vendor/github.com/redis/go-redis/v9/internal/proto/redis_errors.go new file mode 100644 index 00000000..a28240f5 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/internal/proto/redis_errors.go @@ -0,0 +1,527 @@ +package proto + +import ( + "errors" + "strings" +) + +// Typed Redis errors for better error handling with wrapping support. +// These errors maintain backward compatibility by keeping the same error messages. + +// LoadingError is returned when Redis is loading the dataset in memory. +type LoadingError struct { + msg string +} + +func (e *LoadingError) Error() string { + return e.msg +} + +func (e *LoadingError) RedisError() {} + +// NewLoadingError creates a new LoadingError with the given message. +func NewLoadingError(msg string) *LoadingError { + return &LoadingError{msg: msg} +} + +// ReadOnlyError is returned when trying to write to a read-only replica. +type ReadOnlyError struct { + msg string +} + +func (e *ReadOnlyError) Error() string { + return e.msg +} + +func (e *ReadOnlyError) RedisError() {} + +// NewReadOnlyError creates a new ReadOnlyError with the given message. +func NewReadOnlyError(msg string) *ReadOnlyError { + return &ReadOnlyError{msg: msg} +} + +// MovedError is returned when a key has been moved to a different node in a cluster. +type MovedError struct { + msg string + addr string +} + +func (e *MovedError) Error() string { + return e.msg +} + +func (e *MovedError) RedisError() {} + +// Addr returns the address of the node where the key has been moved. +func (e *MovedError) Addr() string { + return e.addr +} + +// NewMovedError creates a new MovedError with the given message and address. +func NewMovedError(msg string, addr string) *MovedError { + return &MovedError{msg: msg, addr: addr} +} + +// AskError is returned when a key is being migrated and the client should ask another node. +type AskError struct { + msg string + addr string +} + +func (e *AskError) Error() string { + return e.msg +} + +func (e *AskError) RedisError() {} + +// Addr returns the address of the node to ask. +func (e *AskError) Addr() string { + return e.addr +} + +// NewAskError creates a new AskError with the given message and address. +func NewAskError(msg string, addr string) *AskError { + return &AskError{msg: msg, addr: addr} +} + +// ClusterDownError is returned when the cluster is down. +type ClusterDownError struct { + msg string +} + +func (e *ClusterDownError) Error() string { + return e.msg +} + +func (e *ClusterDownError) RedisError() {} + +// NewClusterDownError creates a new ClusterDownError with the given message. +func NewClusterDownError(msg string) *ClusterDownError { + return &ClusterDownError{msg: msg} +} + +// TryAgainError is returned when a command cannot be processed and should be retried. +type TryAgainError struct { + msg string +} + +func (e *TryAgainError) Error() string { + return e.msg +} + +func (e *TryAgainError) RedisError() {} + +// NewTryAgainError creates a new TryAgainError with the given message. +func NewTryAgainError(msg string) *TryAgainError { + return &TryAgainError{msg: msg} +} + +// MasterDownError is returned when the master is down. +type MasterDownError struct { + msg string +} + +func (e *MasterDownError) Error() string { + return e.msg +} + +func (e *MasterDownError) RedisError() {} + +// NewMasterDownError creates a new MasterDownError with the given message. +func NewMasterDownError(msg string) *MasterDownError { + return &MasterDownError{msg: msg} +} + +// MaxClientsError is returned when the maximum number of clients has been reached. +type MaxClientsError struct { + msg string +} + +func (e *MaxClientsError) Error() string { + return e.msg +} + +func (e *MaxClientsError) RedisError() {} + +// NewMaxClientsError creates a new MaxClientsError with the given message. +func NewMaxClientsError(msg string) *MaxClientsError { + return &MaxClientsError{msg: msg} +} + +// AuthError is returned when authentication fails. +type AuthError struct { + msg string +} + +func (e *AuthError) Error() string { + return e.msg +} + +func (e *AuthError) RedisError() {} + +// NewAuthError creates a new AuthError with the given message. +func NewAuthError(msg string) *AuthError { + return &AuthError{msg: msg} +} + +// PermissionError is returned when a user lacks required permissions. +type PermissionError struct { + msg string +} + +func (e *PermissionError) Error() string { + return e.msg +} + +func (e *PermissionError) RedisError() {} + +// NewPermissionError creates a new PermissionError with the given message. +func NewPermissionError(msg string) *PermissionError { + return &PermissionError{msg: msg} +} + +// ExecAbortError is returned when a transaction is aborted. +type ExecAbortError struct { + msg string +} + +func (e *ExecAbortError) Error() string { + return e.msg +} + +func (e *ExecAbortError) RedisError() {} + +// NewExecAbortError creates a new ExecAbortError with the given message. +func NewExecAbortError(msg string) *ExecAbortError { + return &ExecAbortError{msg: msg} +} + +// OOMError is returned when Redis is out of memory. +type OOMError struct { + msg string +} + +func (e *OOMError) Error() string { + return e.msg +} + +func (e *OOMError) RedisError() {} + +// NewOOMError creates a new OOMError with the given message. +func NewOOMError(msg string) *OOMError { + return &OOMError{msg: msg} +} + +// NoReplicasError is returned when not enough replicas acknowledge a write. +// This error occurs when using WAIT/WAITAOF commands or CLUSTER SETSLOT with +// synchronous replication, and the required number of replicas cannot confirm +// the write within the timeout period. +type NoReplicasError struct { + msg string +} + +func (e *NoReplicasError) Error() string { + return e.msg +} + +func (e *NoReplicasError) RedisError() {} + +// NewNoReplicasError creates a new NoReplicasError with the given message. +func NewNoReplicasError(msg string) *NoReplicasError { + return &NoReplicasError{msg: msg} +} + +// parseTypedRedisError parses a Redis error message and returns a typed error if applicable. +// This function maintains backward compatibility by keeping the same error messages. +func parseTypedRedisError(msg string) error { + // Check for specific error patterns and return typed errors + switch { + case strings.HasPrefix(msg, "LOADING "): + return NewLoadingError(msg) + case strings.HasPrefix(msg, "READONLY "): + return NewReadOnlyError(msg) + case strings.HasPrefix(msg, "MOVED "): + // Extract address from "MOVED " + addr := extractAddr(msg) + return NewMovedError(msg, addr) + case strings.HasPrefix(msg, "ASK "): + // Extract address from "ASK " + addr := extractAddr(msg) + return NewAskError(msg, addr) + case strings.HasPrefix(msg, "CLUSTERDOWN "): + return NewClusterDownError(msg) + case strings.HasPrefix(msg, "TRYAGAIN "): + return NewTryAgainError(msg) + case strings.HasPrefix(msg, "MASTERDOWN "): + return NewMasterDownError(msg) + case strings.HasPrefix(msg, "NOREPLICAS "): + return NewNoReplicasError(msg) + case msg == "ERR max number of clients reached": + return NewMaxClientsError(msg) + case strings.HasPrefix(msg, "NOAUTH "), strings.HasPrefix(msg, "WRONGPASS "), strings.Contains(msg, "unauthenticated"): + return NewAuthError(msg) + case strings.HasPrefix(msg, "NOPERM "): + return NewPermissionError(msg) + case strings.HasPrefix(msg, "EXECABORT "): + return NewExecAbortError(msg) + case strings.HasPrefix(msg, "OOM "): + return NewOOMError(msg) + default: + // Return generic RedisError for unknown error types + return RedisError(msg) + } +} + +// extractAddr extracts the address from MOVED/ASK error messages. +// Format: "MOVED " or "ASK " +func extractAddr(msg string) string { + ind := strings.LastIndex(msg, " ") + if ind == -1 { + return "" + } + return msg[ind+1:] +} + +// IsLoadingError checks if an error is a LoadingError, even if wrapped. +func IsLoadingError(err error) bool { + if err == nil { + return false + } + var loadingErr *LoadingError + if errors.As(err, &loadingErr) { + return true + } + // Check if wrapped error is a RedisError with LOADING prefix + var redisErr RedisError + if errors.As(err, &redisErr) && strings.HasPrefix(redisErr.Error(), "LOADING ") { + return true + } + // Fallback to string checking for backward compatibility + return strings.HasPrefix(err.Error(), "LOADING ") +} + +// IsReadOnlyError checks if an error is a ReadOnlyError, even if wrapped. +func IsReadOnlyError(err error) bool { + if err == nil { + return false + } + var readOnlyErr *ReadOnlyError + if errors.As(err, &readOnlyErr) { + return true + } + // Check if wrapped error is a RedisError with READONLY prefix + var redisErr RedisError + if errors.As(err, &redisErr) && strings.HasPrefix(redisErr.Error(), "READONLY ") { + return true + } + // Fallback to string checking for backward compatibility + return strings.HasPrefix(err.Error(), "READONLY ") +} + +// IsMovedError checks if an error is a MovedError, even if wrapped. +// Returns the error and a boolean indicating if it's a MovedError. +func IsMovedError(err error) (*MovedError, bool) { + if err == nil { + return nil, false + } + var movedErr *MovedError + if errors.As(err, &movedErr) { + return movedErr, true + } + // Fallback to string checking for backward compatibility + s := err.Error() + if strings.HasPrefix(s, "MOVED ") { + // Parse: MOVED 3999 127.0.0.1:6381 + parts := strings.Split(s, " ") + if len(parts) == 3 { + return &MovedError{msg: s, addr: parts[2]}, true + } + } + return nil, false +} + +// IsAskError checks if an error is an AskError, even if wrapped. +// Returns the error and a boolean indicating if it's an AskError. +func IsAskError(err error) (*AskError, bool) { + if err == nil { + return nil, false + } + var askErr *AskError + if errors.As(err, &askErr) { + return askErr, true + } + // Fallback to string checking for backward compatibility + s := err.Error() + if strings.HasPrefix(s, "ASK ") { + // Parse: ASK 3999 127.0.0.1:6381 + parts := strings.Split(s, " ") + if len(parts) == 3 { + return &AskError{msg: s, addr: parts[2]}, true + } + } + return nil, false +} + +// IsClusterDownError checks if an error is a ClusterDownError, even if wrapped. +func IsClusterDownError(err error) bool { + if err == nil { + return false + } + var clusterDownErr *ClusterDownError + if errors.As(err, &clusterDownErr) { + return true + } + // Check if wrapped error is a RedisError with CLUSTERDOWN prefix + var redisErr RedisError + if errors.As(err, &redisErr) && strings.HasPrefix(redisErr.Error(), "CLUSTERDOWN ") { + return true + } + // Fallback to string checking for backward compatibility + return strings.HasPrefix(err.Error(), "CLUSTERDOWN ") +} + +// IsTryAgainError checks if an error is a TryAgainError, even if wrapped. +func IsTryAgainError(err error) bool { + if err == nil { + return false + } + var tryAgainErr *TryAgainError + if errors.As(err, &tryAgainErr) { + return true + } + // Check if wrapped error is a RedisError with TRYAGAIN prefix + var redisErr RedisError + if errors.As(err, &redisErr) && strings.HasPrefix(redisErr.Error(), "TRYAGAIN ") { + return true + } + // Fallback to string checking for backward compatibility + return strings.HasPrefix(err.Error(), "TRYAGAIN ") +} + +// IsMasterDownError checks if an error is a MasterDownError, even if wrapped. +func IsMasterDownError(err error) bool { + if err == nil { + return false + } + var masterDownErr *MasterDownError + if errors.As(err, &masterDownErr) { + return true + } + // Check if wrapped error is a RedisError with MASTERDOWN prefix + var redisErr RedisError + if errors.As(err, &redisErr) && strings.HasPrefix(redisErr.Error(), "MASTERDOWN ") { + return true + } + // Fallback to string checking for backward compatibility + return strings.HasPrefix(err.Error(), "MASTERDOWN ") +} + +// IsMaxClientsError checks if an error is a MaxClientsError, even if wrapped. +func IsMaxClientsError(err error) bool { + if err == nil { + return false + } + var maxClientsErr *MaxClientsError + if errors.As(err, &maxClientsErr) { + return true + } + // Check if wrapped error is a RedisError with max clients prefix + var redisErr RedisError + if errors.As(err, &redisErr) && strings.HasPrefix(redisErr.Error(), "ERR max number of clients reached") { + return true + } + // Fallback to string checking for backward compatibility + return strings.HasPrefix(err.Error(), "ERR max number of clients reached") +} + +// IsAuthError checks if an error is an AuthError, even if wrapped. +func IsAuthError(err error) bool { + if err == nil { + return false + } + var authErr *AuthError + if errors.As(err, &authErr) { + return true + } + // Check if wrapped error is a RedisError with auth error prefix + var redisErr RedisError + if errors.As(err, &redisErr) { + s := redisErr.Error() + return strings.HasPrefix(s, "NOAUTH ") || strings.HasPrefix(s, "WRONGPASS ") || strings.Contains(s, "unauthenticated") + } + // Fallback to string checking for backward compatibility + s := err.Error() + return strings.HasPrefix(s, "NOAUTH ") || strings.HasPrefix(s, "WRONGPASS ") || strings.Contains(s, "unauthenticated") +} + +// IsPermissionError checks if an error is a PermissionError, even if wrapped. +func IsPermissionError(err error) bool { + if err == nil { + return false + } + var permErr *PermissionError + if errors.As(err, &permErr) { + return true + } + // Check if wrapped error is a RedisError with NOPERM prefix + var redisErr RedisError + if errors.As(err, &redisErr) && strings.HasPrefix(redisErr.Error(), "NOPERM ") { + return true + } + // Fallback to string checking for backward compatibility + return strings.HasPrefix(err.Error(), "NOPERM ") +} + +// IsExecAbortError checks if an error is an ExecAbortError, even if wrapped. +func IsExecAbortError(err error) bool { + if err == nil { + return false + } + var execAbortErr *ExecAbortError + if errors.As(err, &execAbortErr) { + return true + } + // Check if wrapped error is a RedisError with EXECABORT prefix + var redisErr RedisError + if errors.As(err, &redisErr) && strings.HasPrefix(redisErr.Error(), "EXECABORT ") { + return true + } + // Fallback to string checking for backward compatibility + return strings.HasPrefix(err.Error(), "EXECABORT ") +} + +// IsOOMError checks if an error is an OOMError, even if wrapped. +func IsOOMError(err error) bool { + if err == nil { + return false + } + var oomErr *OOMError + if errors.As(err, &oomErr) { + return true + } + // Check if wrapped error is a RedisError with OOM prefix + var redisErr RedisError + if errors.As(err, &redisErr) && strings.HasPrefix(redisErr.Error(), "OOM ") { + return true + } + // Fallback to string checking for backward compatibility + return strings.HasPrefix(err.Error(), "OOM ") +} + +// IsNoReplicasError checks if an error is a NoReplicasError, even if wrapped. +func IsNoReplicasError(err error) bool { + if err == nil { + return false + } + var noReplicasErr *NoReplicasError + if errors.As(err, &noReplicasErr) { + return true + } + // Check if wrapped error is a RedisError with NOREPLICAS prefix + var redisErr RedisError + if errors.As(err, &redisErr) && strings.HasPrefix(redisErr.Error(), "NOREPLICAS ") { + return true + } + // Fallback to string checking for backward compatibility + return strings.HasPrefix(err.Error(), "NOREPLICAS ") +} diff --git a/vendor/github.com/redis/go-redis/v9/internal/proto/writer.go b/vendor/github.com/redis/go-redis/v9/internal/proto/writer.go index 78595cc4..38e66c68 100644 --- a/vendor/github.com/redis/go-redis/v9/internal/proto/writer.go +++ b/vendor/github.com/redis/go-redis/v9/internal/proto/writer.go @@ -66,56 +66,95 @@ func (w *Writer) WriteArg(v interface{}) error { case string: return w.string(v) case *string: + if v == nil { + return w.string("") + } return w.string(*v) case []byte: return w.bytes(v) case int: return w.int(int64(v)) case *int: + if v == nil { + return w.int(0) + } return w.int(int64(*v)) case int8: return w.int(int64(v)) case *int8: + if v == nil { + return w.int(0) + } return w.int(int64(*v)) case int16: return w.int(int64(v)) case *int16: + if v == nil { + return w.int(0) + } return w.int(int64(*v)) case int32: return w.int(int64(v)) case *int32: + if v == nil { + return w.int(0) + } return w.int(int64(*v)) case int64: return w.int(v) case *int64: + if v == nil { + return w.int(0) + } return w.int(*v) case uint: return w.uint(uint64(v)) case *uint: + if v == nil { + return w.uint(0) + } return w.uint(uint64(*v)) case uint8: return w.uint(uint64(v)) case *uint8: + if v == nil { + return w.string("") + } return w.uint(uint64(*v)) case uint16: return w.uint(uint64(v)) case *uint16: + if v == nil { + return w.uint(0) + } return w.uint(uint64(*v)) case uint32: return w.uint(uint64(v)) case *uint32: + if v == nil { + return w.uint(0) + } return w.uint(uint64(*v)) case uint64: return w.uint(v) case *uint64: + if v == nil { + return w.uint(0) + } return w.uint(*v) case float32: return w.float(float64(v)) case *float32: + if v == nil { + return w.float(0) + } return w.float(float64(*v)) case float64: return w.float(v) case *float64: + if v == nil { + return w.float(0) + } return w.float(*v) case bool: if v { @@ -123,6 +162,9 @@ func (w *Writer) WriteArg(v interface{}) error { } return w.int(0) case *bool: + if v == nil { + return w.int(0) + } if *v { return w.int(1) } @@ -130,8 +172,19 @@ func (w *Writer) WriteArg(v interface{}) error { case time.Time: w.numBuf = v.AppendFormat(w.numBuf[:0], time.RFC3339Nano) return w.bytes(w.numBuf) + case *time.Time: + if v == nil { + v = &time.Time{} + } + w.numBuf = v.AppendFormat(w.numBuf[:0], time.RFC3339Nano) + return w.bytes(w.numBuf) case time.Duration: return w.int(v.Nanoseconds()) + case *time.Duration: + if v == nil { + return w.int(0) + } + return w.int(v.Nanoseconds()) case encoding.BinaryMarshaler: b, err := v.MarshalBinary() if err != nil { diff --git a/vendor/github.com/redis/go-redis/v9/internal/redis.go b/vendor/github.com/redis/go-redis/v9/internal/redis.go new file mode 100644 index 00000000..190bbebe --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/internal/redis.go @@ -0,0 +1,3 @@ +package internal + +const RedisNull = "" diff --git a/vendor/github.com/redis/go-redis/v9/internal/routing/aggregator.go b/vendor/github.com/redis/go-redis/v9/internal/routing/aggregator.go new file mode 100644 index 00000000..0d6321ec --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/internal/routing/aggregator.go @@ -0,0 +1,1000 @@ +package routing + +import ( + "errors" + "fmt" + "math" + "sync" + + "sync/atomic" + + "github.com/redis/go-redis/v9/internal/util" + uberAtomic "go.uber.org/atomic" +) + +var ( + ErrMaxAggregation = errors.New("redis: no valid results to aggregate for max operation") + ErrMinAggregation = errors.New("redis: no valid results to aggregate for min operation") + ErrAndAggregation = errors.New("redis: no valid results to aggregate for logical AND operation") + ErrOrAggregation = errors.New("redis: no valid results to aggregate for logical OR operation") +) + +// ResponseAggregator defines the interface for aggregating responses from multiple shards. +type ResponseAggregator interface { + // Add processes a single shard response. + Add(result interface{}, err error) error + + // AddWithKey processes a single shard response for a specific key (used by keyed aggregators). + AddWithKey(key string, result interface{}, err error) error + + BatchAdd(map[string]AggregatorResErr) error + + BatchSlice([]AggregatorResErr) error + + // Result returns the final aggregated result and any error. + Result() (interface{}, error) +} + +type AggregatorResErr struct { + Result interface{} + Err error +} + +// NewResponseAggregator creates an aggregator based on the response policy. +func NewResponseAggregator(policy ResponsePolicy, cmdName string) ResponseAggregator { + switch policy { + case RespDefaultKeyless: + return &DefaultKeylessAggregator{results: make([]interface{}, 0)} + case RespDefaultHashSlot: + return &DefaultKeyedAggregator{results: make(map[string]interface{})} + case RespAllSucceeded: + return &AllSucceededAggregator{} + case RespOneSucceeded: + return &OneSucceededAggregator{} + case RespAggSum: + return &AggSumAggregator{ + // res: + } + case RespAggMin: + return &AggMinAggregator{ + res: util.NewAtomicMin(), + } + case RespAggMax: + return &AggMaxAggregator{ + res: util.NewAtomicMax(), + } + case RespAggLogicalAnd: + andAgg := &AggLogicalAndAggregator{} + andAgg.res.Store(true) + + return andAgg + case RespAggLogicalOr: + return &AggLogicalOrAggregator{} + case RespSpecial: + return NewSpecialAggregator(cmdName) + default: + return &AllSucceededAggregator{} + } +} + +func NewDefaultAggregator(isKeyed bool) ResponseAggregator { + if isKeyed { + return &DefaultKeyedAggregator{ + results: make(map[string]interface{}), + } + } + return &DefaultKeylessAggregator{} +} + +// AllSucceededAggregator returns one non-error reply if every shard succeeded, +// propagates the first error otherwise. +type AllSucceededAggregator struct { + err atomic.Value + res atomic.Value +} + +func (a *AllSucceededAggregator) Add(result interface{}, err error) error { + if err != nil { + a.err.CompareAndSwap(nil, err) + return nil + } + + if result != nil { + a.res.CompareAndSwap(nil, result) + } + + return nil +} + +func (a *AllSucceededAggregator) BatchAdd(results map[string]AggregatorResErr) error { + for _, res := range results { + err := a.Add(res.Result, res.Err) + if err != nil { + return err + } + + if res.Err != nil { + return nil + } + } + + return nil +} + +func (a *AllSucceededAggregator) BatchSlice(results []AggregatorResErr) error { + for _, res := range results { + err := a.Add(res.Result, res.Err) + if err != nil { + return err + } + + if res.Err != nil { + return nil + } + } + + return nil +} + +func (a *AllSucceededAggregator) Result() (interface{}, error) { + var err error + res, e := a.res.Load(), a.err.Load() + if e != nil { + err = e.(error) + } + + return res, err +} + +func (a *AllSucceededAggregator) AddWithKey(key string, result interface{}, err error) error { + return a.Add(result, err) +} + +// OneSucceededAggregator returns the first non-error reply, +// if all shards errored, returns any one of those errors. +type OneSucceededAggregator struct { + err atomic.Value + res atomic.Value +} + +func (a *OneSucceededAggregator) Add(result interface{}, err error) error { + if err != nil { + a.err.CompareAndSwap(nil, err) + return nil + } + + if result != nil { + a.res.CompareAndSwap(nil, result) + } + + return nil +} + +func (a *OneSucceededAggregator) BatchAdd(results map[string]AggregatorResErr) error { + for _, res := range results { + err := a.Add(res.Result, res.Err) + if err != nil { + return err + } + + if res.Err == nil { + return nil + } + } + + return nil +} + +func (a *OneSucceededAggregator) AddWithKey(key string, result interface{}, err error) error { + return a.Add(result, err) +} + +func (a *OneSucceededAggregator) BatchSlice(results []AggregatorResErr) error { + for _, res := range results { + err := a.Add(res.Result, res.Err) + if err != nil { + return err + } + + if res.Err == nil { + return nil + } + } + + return nil +} + +func (a *OneSucceededAggregator) Result() (interface{}, error) { + res, e := a.res.Load(), a.err.Load() + if res == nil { + return nil, e.(error) + } + + return res, nil +} + +// AggSumAggregator sums numeric replies from all shards. +type AggSumAggregator struct { + err atomic.Value + res uberAtomic.Float64 +} + +func (a *AggSumAggregator) Add(result interface{}, err error) error { + if err != nil { + a.err.CompareAndSwap(nil, err) + } + + if result != nil { + val, err := toFloat64(result) + if err != nil { + a.err.CompareAndSwap(nil, err) + return err + } + a.res.Add(val) + } + + return nil +} + +func (a *AggSumAggregator) BatchAdd(results map[string]AggregatorResErr) error { + var sum int64 + + for _, res := range results { + if res.Err != nil { + return a.Add(res.Result, res.Err) + } + + intRes, err := toInt64(res.Result) + if err != nil { + return a.Add(nil, err) + } + + sum += intRes + } + + return a.Add(sum, nil) +} + +func (a *AggSumAggregator) AddWithKey(key string, result interface{}, err error) error { + return a.Add(result, err) +} + +func (a *AggSumAggregator) BatchSlice(results []AggregatorResErr) error { + var sum int64 + + for _, res := range results { + if res.Err != nil { + return a.Add(res.Result, res.Err) + } + + intRes, err := toInt64(res.Result) + if err != nil { + return a.Add(nil, err) + } + + sum += intRes + } + + return a.Add(sum, nil) +} + +func (a *AggSumAggregator) Result() (interface{}, error) { + res, err := a.res.Load(), a.err.Load() + if err != nil { + return nil, err.(error) + } + + return res, nil +} + +// AggMinAggregator returns the minimum numeric value from all shards. +type AggMinAggregator struct { + err atomic.Value + res *util.AtomicMin +} + +func (a *AggMinAggregator) Add(result interface{}, err error) error { + if err != nil { + a.err.CompareAndSwap(nil, err) + return nil + } + + floatVal, e := toFloat64(result) + if e != nil { + a.err.CompareAndSwap(nil, err) + return nil + } + + a.res.Value(floatVal) + + return nil +} + +func (a *AggMinAggregator) BatchAdd(results map[string]AggregatorResErr) error { + min := int64(math.MaxInt64) + + for _, res := range results { + if res.Err != nil { + _ = a.Add(nil, res.Err) + return nil + } + + resInt, err := toInt64(res.Result) + if err != nil { + _ = a.Add(nil, res.Err) + return nil + } + + if resInt < min { + min = resInt + } + + } + + return a.Add(min, nil) +} + +func (a *AggMinAggregator) AddWithKey(key string, result interface{}, err error) error { + return a.Add(result, err) +} + +func (a *AggMinAggregator) BatchSlice(results []AggregatorResErr) error { + min := float64(math.MaxFloat64) + + for _, res := range results { + if res.Err != nil { + _ = a.Add(nil, res.Err) + return nil + } + + floatVal, err := toFloat64(res.Result) + if err != nil { + _ = a.Add(nil, res.Err) + return nil + } + + if floatVal < min { + min = floatVal + } + + } + + return a.Add(min, nil) +} + +func (a *AggMinAggregator) Result() (interface{}, error) { + err := a.err.Load() + if err != nil { + return nil, err.(error) + } + + val, hasVal := a.res.Min() + if !hasVal { + return nil, ErrMinAggregation + } + return val, nil +} + +// AggMaxAggregator returns the maximum numeric value from all shards. +type AggMaxAggregator struct { + err atomic.Value + res *util.AtomicMax +} + +func (a *AggMaxAggregator) Add(result interface{}, err error) error { + if err != nil { + a.err.CompareAndSwap(nil, err) + return nil + } + + floatVal, e := toFloat64(result) + if e != nil { + a.err.CompareAndSwap(nil, err) + return nil + } + + a.res.Value(floatVal) + + return nil +} + +func (a *AggMaxAggregator) BatchAdd(results map[string]AggregatorResErr) error { + max := int64(math.MinInt64) + + for _, res := range results { + if res.Err != nil { + _ = a.Add(nil, res.Err) + return nil + } + + resInt, err := toInt64(res.Result) + if err != nil { + _ = a.Add(nil, res.Err) + return nil + } + + if resInt > max { + max = resInt + } + + } + + return a.Add(max, nil) +} + +func (a *AggMaxAggregator) AddWithKey(key string, result interface{}, err error) error { + return a.Add(result, err) +} + +func (a *AggMaxAggregator) BatchSlice(results []AggregatorResErr) error { + max := int64(math.MinInt64) + + for _, res := range results { + if res.Err != nil { + _ = a.Add(nil, res.Err) + return nil + } + + resInt, err := toInt64(res.Result) + if err != nil { + _ = a.Add(nil, res.Err) + return nil + } + + if resInt > max { + max = resInt + } + + } + + return a.Add(max, nil) +} + +func (a *AggMaxAggregator) Result() (interface{}, error) { + err := a.err.Load() + if err != nil { + return nil, err.(error) + } + + val, hasVal := a.res.Max() + if !hasVal { + return nil, ErrMaxAggregation + } + return val, nil +} + +// AggLogicalAndAggregator performs logical AND on boolean values. +type AggLogicalAndAggregator struct { + err atomic.Value + res atomic.Bool + hasResult atomic.Bool +} + +func (a *AggLogicalAndAggregator) Add(result interface{}, err error) error { + if err != nil { + a.err.CompareAndSwap(nil, err) + return nil + } + + val, e := toBool(result) + if e != nil { + a.err.CompareAndSwap(nil, e) + return e + } + + // Atomic AND operation: if val is false, result is always false + if !val { + a.res.Store(false) + } + + a.hasResult.Store(true) + + return nil +} + +func (a *AggLogicalAndAggregator) BatchAdd(results map[string]AggregatorResErr) error { + result := true + + for _, res := range results { + if res.Err != nil { + return a.Add(nil, res.Err) + } + + boolRes, err := toBool(res.Result) + if err != nil { + return a.Add(nil, err) + } + + result = result && boolRes + } + + return a.Add(result, nil) +} + +func (a *AggLogicalAndAggregator) AddWithKey(key string, result interface{}, err error) error { + return a.Add(result, err) +} + +func (a *AggLogicalAndAggregator) BatchSlice(results []AggregatorResErr) error { + result := true + + for _, res := range results { + if res.Err != nil { + return a.Add(nil, res.Err) + } + + boolRes, err := toBool(res.Result) + if err != nil { + return a.Add(nil, err) + } + + result = result && boolRes + } + + return a.Add(result, nil) +} + +func (a *AggLogicalAndAggregator) Result() (interface{}, error) { + err := a.err.Load() + if err != nil { + return nil, err.(error) + } + + if !a.hasResult.Load() { + return nil, ErrAndAggregation + } + return a.res.Load(), nil +} + +// AggLogicalOrAggregator performs logical OR on boolean values. +type AggLogicalOrAggregator struct { + err atomic.Value + res atomic.Bool + hasResult atomic.Bool +} + +func (a *AggLogicalOrAggregator) Add(result interface{}, err error) error { + if err != nil { + a.err.CompareAndSwap(nil, err) + return nil + } + + val, e := toBool(result) + if e != nil { + a.err.CompareAndSwap(nil, e) + return e + } + + // Atomic OR operation: if val is true, result is always true + if val { + a.res.Store(true) + } + + a.hasResult.Store(true) + + return nil +} + +func (a *AggLogicalOrAggregator) BatchAdd(results map[string]AggregatorResErr) error { + result := false + + for _, res := range results { + if res.Err != nil { + return a.Add(nil, res.Err) + } + + boolRes, err := toBool(res.Result) + if err != nil { + return a.Add(nil, err) + } + + result = result || boolRes + } + + return a.Add(result, nil) +} + +func (a *AggLogicalOrAggregator) AddWithKey(key string, result interface{}, err error) error { + return a.Add(result, err) +} + +func (a *AggLogicalOrAggregator) BatchSlice(results []AggregatorResErr) error { + result := false + + for _, res := range results { + if res.Err != nil { + return a.Add(nil, res.Err) + } + + boolRes, err := toBool(res.Result) + if err != nil { + return a.Add(nil, err) + } + + result = result || boolRes + } + + return a.Add(result, nil) +} + +func (a *AggLogicalOrAggregator) Result() (interface{}, error) { + err := a.err.Load() + if err != nil { + return nil, err.(error) + } + + if !a.hasResult.Load() { + return nil, ErrOrAggregation + } + return a.res.Load(), nil +} + +func toInt64(val interface{}) (int64, error) { + if val == nil { + return 0, nil + } + switch v := val.(type) { + case int64: + return v, nil + case int: + return int64(v), nil + case int32: + return int64(v), nil + case float64: + if v != math.Trunc(v) { + return 0, fmt.Errorf("cannot convert float %f to int64", v) + } + return int64(v), nil + default: + return 0, fmt.Errorf("cannot convert %T to int64", val) + } +} + +func toFloat64(val interface{}) (float64, error) { + if val == nil { + return 0, nil + } + + switch v := val.(type) { + case float64: + return v, nil + case int: + return float64(v), nil + case int32: + return float64(v), nil + case int64: + return float64(v), nil + case float32: + return float64(v), nil + default: + return 0, fmt.Errorf("cannot convert %T to float64", val) + } +} + +func toBool(val interface{}) (bool, error) { + if val == nil { + return false, nil + } + switch v := val.(type) { + case bool: + return v, nil + case int64: + return v != 0, nil + case int: + return v != 0, nil + default: + return false, fmt.Errorf("cannot convert %T to bool", val) + } +} + +// DefaultKeylessAggregator collects all results in an array, order doesn't matter. +type DefaultKeylessAggregator struct { + mu sync.Mutex + results []interface{} + firstErr error +} + +func (a *DefaultKeylessAggregator) add(result interface{}, err error) error { + if err != nil && a.firstErr == nil { + a.firstErr = err + return nil + } + if err == nil { + a.results = append(a.results, result) + } + return nil +} + +func (a *DefaultKeylessAggregator) Add(result interface{}, err error) error { + a.mu.Lock() + defer a.mu.Unlock() + + return a.add(result, err) +} + +func (a *DefaultKeylessAggregator) BatchAdd(results map[string]AggregatorResErr) error { + a.mu.Lock() + defer a.mu.Unlock() + + for _, res := range results { + err := a.add(res.Result, res.Err) + if err != nil { + return err + } + + if res.Err != nil { + return nil + } + } + + return nil +} + +func (a *DefaultKeylessAggregator) AddWithKey(key string, result interface{}, err error) error { + return a.Add(result, err) +} + +func (a *DefaultKeylessAggregator) BatchSlice(results []AggregatorResErr) error { + a.mu.Lock() + defer a.mu.Unlock() + + for _, res := range results { + err := a.add(res.Result, res.Err) + if err != nil { + return err + } + + if res.Err != nil { + return nil + } + } + + return nil +} + +func (a *DefaultKeylessAggregator) Result() (interface{}, error) { + a.mu.Lock() + defer a.mu.Unlock() + + if a.firstErr != nil { + return nil, a.firstErr + } + return a.results, nil +} + +// DefaultKeyedAggregator reassembles replies in the exact key order of the original request. +type DefaultKeyedAggregator struct { + mu sync.Mutex + results map[string]interface{} + keyOrder []string + firstErr error +} + +func NewDefaultKeyedAggregator(keyOrder []string) *DefaultKeyedAggregator { + return &DefaultKeyedAggregator{ + results: make(map[string]interface{}), + keyOrder: keyOrder, + } +} + +func (a *DefaultKeyedAggregator) add(result interface{}, err error) error { + if err != nil && a.firstErr == nil { + a.firstErr = err + return nil + } + // For non-keyed Add, just collect the result without ordering + if err == nil { + a.results["__default__"] = result + } + return nil +} + +func (a *DefaultKeyedAggregator) Add(result interface{}, err error) error { + a.mu.Lock() + defer a.mu.Unlock() + + return a.add(result, err) +} + +func (a *DefaultKeyedAggregator) BatchAdd(results map[string]AggregatorResErr) error { + a.mu.Lock() + defer a.mu.Unlock() + + for _, res := range results { + err := a.add(res.Result, res.Err) + if err != nil { + return err + } + + if res.Err != nil { + return nil + } + } + + return nil +} + +func (a *DefaultKeyedAggregator) addWithKey(key string, result interface{}, err error) error { + if err != nil && a.firstErr == nil { + a.firstErr = err + return nil + } + if err == nil { + a.results[key] = result + } + return nil +} + +func (a *DefaultKeyedAggregator) AddWithKey(key string, result interface{}, err error) error { + a.mu.Lock() + defer a.mu.Unlock() + + return a.addWithKey(key, result, err) +} + +func (a *DefaultKeyedAggregator) BatchAddWithKeyOrder(results map[string]AggregatorResErr, keyOrder []string) error { + a.mu.Lock() + defer a.mu.Unlock() + + a.keyOrder = keyOrder + for key, res := range results { + err := a.addWithKey(key, res.Result, res.Err) + if err != nil { + return nil + } + + if res.Err != nil { + return nil + } + } + + return nil +} + +func (a *DefaultKeyedAggregator) SetKeyOrder(keyOrder []string) { + a.mu.Lock() + defer a.mu.Unlock() + a.keyOrder = keyOrder +} + +func (a *DefaultKeyedAggregator) BatchSlice(results []AggregatorResErr) error { + a.mu.Lock() + defer a.mu.Unlock() + + for _, res := range results { + err := a.add(res.Result, res.Err) + if err != nil { + return err + } + + if res.Err != nil { + return nil + } + } + + return nil +} + +func (a *DefaultKeyedAggregator) Result() (interface{}, error) { + a.mu.Lock() + defer a.mu.Unlock() + + if a.firstErr != nil { + return nil, a.firstErr + } + + // If no explicit key order is set, return results in any order + if len(a.keyOrder) == 0 { + orderedResults := make([]interface{}, 0, len(a.results)) + for _, result := range a.results { + orderedResults = append(orderedResults, result) + } + return orderedResults, nil + } + + // Return results in the exact key order + orderedResults := make([]interface{}, len(a.keyOrder)) + for i, key := range a.keyOrder { + if result, exists := a.results[key]; exists { + orderedResults[i] = result + } + } + return orderedResults, nil +} + +// SpecialAggregator provides a registry for command-specific aggregation logic. +type SpecialAggregator struct { + mu sync.Mutex + aggregatorFunc func([]interface{}, []error) (interface{}, error) + results []interface{} + errors []error +} + +func (a *SpecialAggregator) add(result interface{}, err error) error { + a.results = append(a.results, result) + a.errors = append(a.errors, err) + return nil +} + +func (a *SpecialAggregator) Add(result interface{}, err error) error { + a.mu.Lock() + defer a.mu.Unlock() + + return a.add(result, err) +} + +func (a *SpecialAggregator) BatchAdd(results map[string]AggregatorResErr) error { + a.mu.Lock() + defer a.mu.Unlock() + + for _, res := range results { + err := a.add(res.Result, res.Err) + if err != nil { + return err + } + + if res.Err != nil { + return nil + } + } + + return nil +} + +func (a *SpecialAggregator) AddWithKey(key string, result interface{}, err error) error { + return a.Add(result, err) +} + +func (a *SpecialAggregator) BatchSlice(results []AggregatorResErr) error { + a.mu.Lock() + defer a.mu.Unlock() + + for _, res := range results { + err := a.add(res.Result, res.Err) + if err != nil { + return err + } + + if res.Err != nil { + return nil + } + } + + return nil +} + +func (a *SpecialAggregator) Result() (interface{}, error) { + a.mu.Lock() + defer a.mu.Unlock() + + if a.aggregatorFunc != nil { + return a.aggregatorFunc(a.results, a.errors) + } + // Default behavior: return first non-error result or first error + for i, err := range a.errors { + if err == nil { + return a.results[i], nil + } + } + if len(a.errors) > 0 { + return nil, a.errors[0] + } + return nil, nil +} + +// SpecialAggregatorRegistry holds custom aggregation functions for specific commands. +var SpecialAggregatorRegistry = make(map[string]func([]interface{}, []error) (interface{}, error)) + +// RegisterSpecialAggregator registers a custom aggregation function for a command. +func RegisterSpecialAggregator(cmdName string, fn func([]interface{}, []error) (interface{}, error)) { + SpecialAggregatorRegistry[cmdName] = fn +} + +// NewSpecialAggregator creates a special aggregator with command-specific logic if available. +func NewSpecialAggregator(cmdName string) *SpecialAggregator { + agg := &SpecialAggregator{} + if fn, exists := SpecialAggregatorRegistry[cmdName]; exists { + agg.aggregatorFunc = fn + } + return agg +} diff --git a/vendor/github.com/redis/go-redis/v9/internal/routing/policy.go b/vendor/github.com/redis/go-redis/v9/internal/routing/policy.go new file mode 100644 index 00000000..7f784b50 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/internal/routing/policy.go @@ -0,0 +1,144 @@ +package routing + +import ( + "fmt" + "strings" +) + +type RequestPolicy uint8 + +const ( + ReqDefault RequestPolicy = iota + + ReqAllNodes + + ReqAllShards + + ReqMultiShard + + ReqSpecial +) + +const ( + ReadOnlyCMD string = "readonly" +) + +func (p RequestPolicy) String() string { + switch p { + case ReqDefault: + return "default" + case ReqAllNodes: + return "all_nodes" + case ReqAllShards: + return "all_shards" + case ReqMultiShard: + return "multi_shard" + case ReqSpecial: + return "special" + default: + return fmt.Sprintf("unknown_request_policy(%d)", p) + } +} + +func ParseRequestPolicy(raw string) (RequestPolicy, error) { + switch strings.ToLower(raw) { + case "", "default", "none": + return ReqDefault, nil + case "all_nodes": + return ReqAllNodes, nil + case "all_shards": + return ReqAllShards, nil + case "multi_shard": + return ReqMultiShard, nil + case "special": + return ReqSpecial, nil + default: + return ReqDefault, fmt.Errorf("routing: unknown request_policy %q", raw) + } +} + +type ResponsePolicy uint8 + +const ( + RespDefaultKeyless ResponsePolicy = iota + RespDefaultHashSlot + RespAllSucceeded + RespOneSucceeded + RespAggSum + RespAggMin + RespAggMax + RespAggLogicalAnd + RespAggLogicalOr + RespSpecial +) + +func (p ResponsePolicy) String() string { + switch p { + case RespDefaultKeyless: + return "default(keyless)" + case RespDefaultHashSlot: + return "default(hashslot)" + case RespAllSucceeded: + return "all_succeeded" + case RespOneSucceeded: + return "one_succeeded" + case RespAggSum: + return "agg_sum" + case RespAggMin: + return "agg_min" + case RespAggMax: + return "agg_max" + case RespAggLogicalAnd: + return "agg_logical_and" + case RespAggLogicalOr: + return "agg_logical_or" + case RespSpecial: + return "special" + default: + return "all_succeeded" + } +} + +func ParseResponsePolicy(raw string) (ResponsePolicy, error) { + switch strings.ToLower(raw) { + case "default(keyless)": + return RespDefaultKeyless, nil + case "default(hashslot)": + return RespDefaultHashSlot, nil + case "all_succeeded": + return RespAllSucceeded, nil + case "one_succeeded": + return RespOneSucceeded, nil + case "agg_sum": + return RespAggSum, nil + case "agg_min": + return RespAggMin, nil + case "agg_max": + return RespAggMax, nil + case "agg_logical_and": + return RespAggLogicalAnd, nil + case "agg_logical_or": + return RespAggLogicalOr, nil + case "special": + return RespSpecial, nil + default: + return RespDefaultKeyless, fmt.Errorf("routing: unknown response_policy %q", raw) + } +} + +type CommandPolicy struct { + Request RequestPolicy + Response ResponsePolicy + // Tips that are not request_policy or response_policy + // e.g nondeterministic_output, nondeterministic_output_order. + Tips map[string]string +} + +func (p *CommandPolicy) CanBeUsedInPipeline() bool { + return p.Request != ReqAllNodes && p.Request != ReqAllShards && p.Request != ReqMultiShard +} + +func (p *CommandPolicy) IsReadOnly() bool { + _, readOnly := p.Tips[ReadOnlyCMD] + return readOnly +} diff --git a/vendor/github.com/redis/go-redis/v9/internal/routing/shard_picker.go b/vendor/github.com/redis/go-redis/v9/internal/routing/shard_picker.go new file mode 100644 index 00000000..8e6228dd --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/internal/routing/shard_picker.go @@ -0,0 +1,57 @@ +package routing + +import ( + "math/rand" + "sync/atomic" +) + +// ShardPicker chooses “one arbitrary shard” when the request_policy is +// ReqDefault and the command has no keys. +type ShardPicker interface { + Next(total int) int // returns an index in [0,total) +} + +// StaticShardPicker always returns the same shard index. +type StaticShardPicker struct { + index int +} + +func NewStaticShardPicker(index int) *StaticShardPicker { + return &StaticShardPicker{index: index} +} + +func (p *StaticShardPicker) Next(total int) int { + if total == 0 || p.index >= total { + return 0 + } + return p.index +} + +/*─────────────────────────────── + Round-robin (default) +────────────────────────────────*/ + +type RoundRobinPicker struct { + cnt atomic.Uint32 +} + +func (p *RoundRobinPicker) Next(total int) int { + if total == 0 { + return 0 + } + i := p.cnt.Add(1) + return int(i-1) % total +} + +/*─────────────────────────────── + Random +────────────────────────────────*/ + +type RandomPicker struct{} + +func (RandomPicker) Next(total int) int { + if total == 0 { + return 0 + } + return rand.Intn(total) +} diff --git a/vendor/github.com/redis/go-redis/v9/internal/semaphore.go b/vendor/github.com/redis/go-redis/v9/internal/semaphore.go new file mode 100644 index 00000000..a7f40466 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/internal/semaphore.go @@ -0,0 +1,193 @@ +package internal + +import ( + "context" + "sync" + "time" +) + +var semTimers = sync.Pool{ + New: func() interface{} { + t := time.NewTimer(time.Hour) + t.Stop() + return t + }, +} + +// FastSemaphore is a channel-based semaphore optimized for performance. +// It uses a fast path that avoids timer allocation when tokens are available. +// The channel is pre-filled with tokens: Acquire = receive, Release = send. +// Closing the semaphore unblocks all waiting goroutines. +// +// Performance: ~30 ns/op with zero allocations on fast path. +// Fairness: Eventual fairness (no starvation) but not strict FIFO. +type FastSemaphore struct { + tokens chan struct{} + max int32 +} + +// NewFastSemaphore creates a new fast semaphore with the given capacity. +func NewFastSemaphore(capacity int32) *FastSemaphore { + ch := make(chan struct{}, capacity) + // Pre-fill with tokens + for i := int32(0); i < capacity; i++ { + ch <- struct{}{} + } + return &FastSemaphore{ + tokens: ch, + max: capacity, + } +} + +// TryAcquire attempts to acquire a token without blocking. +// Returns true if successful, false if no tokens available. +func (s *FastSemaphore) TryAcquire() bool { + select { + case <-s.tokens: + return true + default: + return false + } +} + +// Acquire acquires a token, blocking if necessary until one is available. +// Returns an error if the context is cancelled or the timeout expires. +// Uses a fast path to avoid timer allocation when tokens are immediately available. +func (s *FastSemaphore) Acquire(ctx context.Context, timeout time.Duration, timeoutErr error) error { + // Check context first + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + // Try fast path first (no timer needed) + select { + case <-s.tokens: + return nil + default: + } + + // Slow path: need to wait with timeout + timer := semTimers.Get().(*time.Timer) + defer semTimers.Put(timer) + timer.Reset(timeout) + + select { + case <-s.tokens: + if !timer.Stop() { + <-timer.C + } + return nil + case <-ctx.Done(): + if !timer.Stop() { + <-timer.C + } + return ctx.Err() + case <-timer.C: + return timeoutErr + } +} + +// AcquireBlocking acquires a token, blocking indefinitely until one is available. +func (s *FastSemaphore) AcquireBlocking() { + <-s.tokens +} + +// Release releases a token back to the semaphore. +func (s *FastSemaphore) Release() { + s.tokens <- struct{}{} +} + +// Close closes the semaphore, unblocking all waiting goroutines. +// After close, all Acquire calls will receive a closed channel signal. +func (s *FastSemaphore) Close() { + close(s.tokens) +} + +// Len returns the current number of acquired tokens. +func (s *FastSemaphore) Len() int32 { + return s.max - int32(len(s.tokens)) +} + +// FIFOSemaphore is a channel-based semaphore with strict FIFO ordering. +// Unlike FastSemaphore, this guarantees that threads are served in the exact order they call Acquire(). +// The channel is pre-filled with tokens: Acquire = receive, Release = send. +// Closing the semaphore unblocks all waiting goroutines. +// +// Performance: ~115 ns/op with zero allocations (slower than FastSemaphore due to timer allocation). +// Fairness: Strict FIFO ordering guaranteed by Go runtime. +type FIFOSemaphore struct { + tokens chan struct{} + max int32 +} + +// NewFIFOSemaphore creates a new FIFO semaphore with the given capacity. +func NewFIFOSemaphore(capacity int32) *FIFOSemaphore { + ch := make(chan struct{}, capacity) + // Pre-fill with tokens + for i := int32(0); i < capacity; i++ { + ch <- struct{}{} + } + return &FIFOSemaphore{ + tokens: ch, + max: capacity, + } +} + +// TryAcquire attempts to acquire a token without blocking. +// Returns true if successful, false if no tokens available. +func (s *FIFOSemaphore) TryAcquire() bool { + select { + case <-s.tokens: + return true + default: + return false + } +} + +// Acquire acquires a token, blocking if necessary until one is available. +// Returns an error if the context is cancelled or the timeout expires. +// Always uses timer to guarantee FIFO ordering (no fast path). +func (s *FIFOSemaphore) Acquire(ctx context.Context, timeout time.Duration, timeoutErr error) error { + // No fast path - always use timer to guarantee FIFO + timer := semTimers.Get().(*time.Timer) + defer semTimers.Put(timer) + timer.Reset(timeout) + + select { + case <-s.tokens: + if !timer.Stop() { + <-timer.C + } + return nil + case <-ctx.Done(): + if !timer.Stop() { + <-timer.C + } + return ctx.Err() + case <-timer.C: + return timeoutErr + } +} + +// AcquireBlocking acquires a token, blocking indefinitely until one is available. +func (s *FIFOSemaphore) AcquireBlocking() { + <-s.tokens +} + +// Release releases a token back to the semaphore. +func (s *FIFOSemaphore) Release() { + s.tokens <- struct{}{} +} + +// Close closes the semaphore, unblocking all waiting goroutines. +// After close, all Acquire calls will receive a closed channel signal. +func (s *FIFOSemaphore) Close() { + close(s.tokens) +} + +// Len returns the current number of acquired tokens. +func (s *FIFOSemaphore) Len() int32 { + return s.max - int32(len(s.tokens)) +} diff --git a/vendor/github.com/redis/go-redis/v9/internal/util.go b/vendor/github.com/redis/go-redis/v9/internal/util.go index 77ca4ee1..f77775ff 100644 --- a/vendor/github.com/redis/go-redis/v9/internal/util.go +++ b/vendor/github.com/redis/go-redis/v9/internal/util.go @@ -2,6 +2,9 @@ package internal import ( "context" + "net" + "strconv" + "strings" "time" "github.com/redis/go-redis/v9/internal/util" @@ -44,3 +47,67 @@ func isLower(s string) bool { } return true } + +func ReplaceSpaces(s string) string { + return strings.ReplaceAll(s, " ", "-") +} + +func GetAddr(addr string) string { + ind := strings.LastIndexByte(addr, ':') + if ind == -1 { + return "" + } + + if strings.IndexByte(addr, '.') != -1 { + return addr + } + + if addr[0] == '[' { + return addr + } + return net.JoinHostPort(addr[:ind], addr[ind+1:]) +} + +func ToInteger(val interface{}) int { + switch v := val.(type) { + case int: + return v + case int64: + return int(v) + case string: + i, _ := strconv.Atoi(v) + return i + default: + return 0 + } +} + +func ToFloat(val interface{}) float64 { + switch v := val.(type) { + case float64: + return v + case string: + f, _ := strconv.ParseFloat(v, 64) + return f + default: + return 0.0 + } +} + +func ToString(val interface{}) string { + if str, ok := val.(string); ok { + return str + } + return "" +} + +func ToStringSlice(val interface{}) []string { + if arr, ok := val.([]interface{}); ok { + result := make([]string, len(arr)) + for i, v := range arr { + result[i] = ToString(v) + } + return result + } + return nil +} diff --git a/vendor/github.com/redis/go-redis/v9/internal/util/atomic_max.go b/vendor/github.com/redis/go-redis/v9/internal/util/atomic_max.go new file mode 100644 index 00000000..6c621ba8 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/internal/util/atomic_max.go @@ -0,0 +1,97 @@ +/* +© 2023–present Harald Rudell (https://haraldrudell.github.io/haraldrudell/) +ISC License + +Modified by htemelski-redis +Removed the treshold, adapted it to work with float64 +*/ + +package util + +import ( + "math" + + "go.uber.org/atomic" +) + +// AtomicMax is a thread-safe max container +// - hasValue indicator true if a value was equal to or greater than threshold +// - optional threshold for minimum accepted max value +// - if threshold is not used, initialization-free +// - — +// - wait-free CompareAndSwap mechanic +type AtomicMax struct { + + // value is current max + value atomic.Float64 + // whether [AtomicMax.Value] has been invoked + // with value equal or greater to threshold + hasValue atomic.Bool +} + +// NewAtomicMax returns a thread-safe max container +// - if threshold is not used, AtomicMax is initialization-free +func NewAtomicMax() (atomicMax *AtomicMax) { + m := AtomicMax{} + m.value.Store((-math.MaxFloat64)) + return &m +} + +// Value updates the container with a possible max value +// - isNewMax is true if: +// - — value is equal to or greater than any threshold and +// - — invocation recorded the first 0 or +// - — a new max +// - upon return, Max and Max1 are guaranteed to reflect the invocation +// - the return order of concurrent Value invocations is not guaranteed +// - Thread-safe +func (m *AtomicMax) Value(value float64) (isNewMax bool) { + // -math.MaxFloat64 as max case + var hasValue0 = m.hasValue.Load() + if value == (-math.MaxFloat64) { + if !hasValue0 { + isNewMax = m.hasValue.CompareAndSwap(false, true) + } + return // -math.MaxFloat64 as max: isNewMax true for first 0 writer + } + + // check against present value + var current = m.value.Load() + if isNewMax = value > current; !isNewMax { + return // not a new max return: isNewMax false + } + + // store the new max + for { + + // try to write value to *max + if isNewMax = m.value.CompareAndSwap(current, value); isNewMax { + if !hasValue0 { + // may be rarely written multiple times + // still faster than CompareAndSwap + m.hasValue.Store(true) + } + return // new max written return: isNewMax true + } + if current = m.value.Load(); current >= value { + return // no longer a need to write return: isNewMax false + } + } +} + +// Max returns current max and value-present flag +// - hasValue true indicates that value reflects a Value invocation +// - hasValue false: value is zero-value +// - Thread-safe +func (m *AtomicMax) Max() (value float64, hasValue bool) { + if hasValue = m.hasValue.Load(); !hasValue { + return + } + value = m.value.Load() + return +} + +// Max1 returns current maximum whether zero-value or set by Value +// - threshold is ignored +// - Thread-safe +func (m *AtomicMax) Max1() (value float64) { return m.value.Load() } diff --git a/vendor/github.com/redis/go-redis/v9/internal/util/atomic_min.go b/vendor/github.com/redis/go-redis/v9/internal/util/atomic_min.go new file mode 100644 index 00000000..e33d29cc --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/internal/util/atomic_min.go @@ -0,0 +1,96 @@ +package util + +/* +© 2023–present Harald Rudell (https://haraldrudell.github.io/haraldrudell/) +ISC License + +Modified by htemelski-redis +Adapted from the modified atomic_max, but with inverted logic +*/ + +import ( + "math" + + "go.uber.org/atomic" +) + +// AtomicMin is a thread-safe Min container +// - hasValue indicator true if a value was equal to or greater than threshold +// - optional threshold for minimum accepted Min value +// - — +// - wait-free CompareAndSwap mechanic +type AtomicMin struct { + + // value is current Min + value atomic.Float64 + // whether [AtomicMin.Value] has been invoked + // with value equal or greater to threshold + hasValue atomic.Bool +} + +// NewAtomicMin returns a thread-safe Min container +// - if threshold is not used, AtomicMin is initialization-free +func NewAtomicMin() (atomicMin *AtomicMin) { + m := AtomicMin{} + m.value.Store(math.MaxFloat64) + return &m +} + +// Value updates the container with a possible Min value +// - isNewMin is true if: +// - — value is equal to or greater than any threshold and +// - — invocation recorded the first 0 or +// - — a new Min +// - upon return, Min and Min1 are guaranteed to reflect the invocation +// - the return order of concurrent Value invocations is not guaranteed +// - Thread-safe +func (m *AtomicMin) Value(value float64) (isNewMin bool) { + // math.MaxFloat64 as Min case + var hasValue0 = m.hasValue.Load() + if value == math.MaxFloat64 { + if !hasValue0 { + isNewMin = m.hasValue.CompareAndSwap(false, true) + } + return // math.MaxFloat64 as Min: isNewMin true for first 0 writer + } + + // check against present value + var current = m.value.Load() + if isNewMin = value < current; !isNewMin { + return // not a new Min return: isNewMin false + } + + // store the new Min + for { + + // try to write value to *Min + if isNewMin = m.value.CompareAndSwap(current, value); isNewMin { + if !hasValue0 { + // may be rarely written multiple times + // still faster than CompareAndSwap + m.hasValue.Store(true) + } + return // new Min written return: isNewMin true + } + if current = m.value.Load(); current <= value { + return // no longer a need to write return: isNewMin false + } + } +} + +// Min returns current min and value-present flag +// - hasValue true indicates that value reflects a Value invocation +// - hasValue false: value is zero-value +// - Thread-safe +func (m *AtomicMin) Min() (value float64, hasValue bool) { + if hasValue = m.hasValue.Load(); !hasValue { + return + } + value = m.value.Load() + return +} + +// Min1 returns current Minimum whether zero-value or set by Value +// - threshold is ignored +// - Thread-safe +func (m *AtomicMin) Min1() (value float64) { return m.value.Load() } diff --git a/vendor/github.com/redis/go-redis/v9/internal/util/convert.go b/vendor/github.com/redis/go-redis/v9/internal/util/convert.go new file mode 100644 index 00000000..b743a4f0 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/internal/util/convert.go @@ -0,0 +1,41 @@ +package util + +import ( + "fmt" + "math" + "strconv" +) + +// ParseFloat parses a Redis RESP3 float reply into a Go float64, +// handling "inf", "-inf", "nan" per Redis conventions. +func ParseStringToFloat(s string) (float64, error) { + switch s { + case "inf": + return math.Inf(1), nil + case "-inf": + return math.Inf(-1), nil + case "nan", "-nan": + return math.NaN(), nil + } + return strconv.ParseFloat(s, 64) +} + +// MustParseFloat is like ParseFloat but panics on parse errors. +func MustParseFloat(s string) float64 { + f, err := ParseStringToFloat(s) + if err != nil { + panic(fmt.Sprintf("redis: failed to parse float %q: %v", s, err)) + } + return f +} + +// SafeIntToInt32 safely converts an int to int32, returning an error if overflow would occur. +func SafeIntToInt32(value int, fieldName string) (int32, error) { + if value > math.MaxInt32 { + return 0, fmt.Errorf("redis: %s value %d exceeds maximum allowed value %d", fieldName, value, math.MaxInt32) + } + if value < math.MinInt32 { + return 0, fmt.Errorf("redis: %s value %d is below minimum allowed value %d", fieldName, value, math.MinInt32) + } + return int32(value), nil +} diff --git a/vendor/github.com/redis/go-redis/v9/internal/util/unsafe.go b/vendor/github.com/redis/go-redis/v9/internal/util/unsafe.go index cbcd2cc0..f4c3c3f3 100644 --- a/vendor/github.com/redis/go-redis/v9/internal/util/unsafe.go +++ b/vendor/github.com/redis/go-redis/v9/internal/util/unsafe.go @@ -8,15 +8,10 @@ import ( // BytesToString converts byte slice to string. func BytesToString(b []byte) string { - return *(*string)(unsafe.Pointer(&b)) + return unsafe.String(unsafe.SliceData(b), len(b)) } // StringToBytes converts string to byte slice. func StringToBytes(s string) []byte { - return *(*[]byte)(unsafe.Pointer( - &struct { - string - Cap int - }{s, len(s)}, - )) + return unsafe.Slice(unsafe.StringData(s), len(s)) } diff --git a/vendor/github.com/redis/go-redis/v9/json.go b/vendor/github.com/redis/go-redis/v9/json.go index ca731db3..781cc468 100644 --- a/vendor/github.com/redis/go-redis/v9/json.go +++ b/vendor/github.com/redis/go-redis/v9/json.go @@ -60,7 +60,7 @@ type JSONArrTrimArgs struct { type JSONCmd struct { baseCmd val string - expanded []interface{} + expanded interface{} } var _ Cmder = (*JSONCmd)(nil) @@ -68,8 +68,9 @@ var _ Cmder = (*JSONCmd)(nil) func newJSONCmd(ctx context.Context, args ...interface{}) *JSONCmd { return &JSONCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeJSON, }, } } @@ -82,6 +83,7 @@ func (cmd *JSONCmd) SetVal(val string) { cmd.val = val } +// Val returns the result of the JSON.GET command as a string. func (cmd *JSONCmd) Val() string { if len(cmd.val) == 0 && cmd.expanded != nil { val, err := json.Marshal(cmd.expanded) @@ -100,11 +102,12 @@ func (cmd *JSONCmd) Result() (string, error) { return cmd.Val(), cmd.Err() } -func (cmd JSONCmd) Expanded() (interface{}, error) { +// Expanded returns the result of the JSON.GET command as unmarshalled JSON. +func (cmd *JSONCmd) Expanded() (interface{}, error) { if len(cmd.val) != 0 && cmd.expanded == nil { err := json.Unmarshal([]byte(cmd.val), &cmd.expanded) if err != nil { - return "", err + return nil, err } } @@ -113,11 +116,17 @@ func (cmd JSONCmd) Expanded() (interface{}, error) { func (cmd *JSONCmd) readReply(rd *proto.Reader) error { // nil response from JSON.(M)GET (cmd.baseCmd.err will be "redis: nil") + // This happens when the key doesn't exist if cmd.baseCmd.Err() == Nil { cmd.val = "" return Nil } + // Handle other base command errors + if cmd.baseCmd.Err() != nil { + return cmd.baseCmd.Err() + } + if readType, err := rd.PeekReplyType(); err != nil { return err } else if readType == proto.RespArray { @@ -127,6 +136,13 @@ func (cmd *JSONCmd) readReply(rd *proto.Reader) error { return err } + // Empty array means no results found for JSON path, but key exists + // This should return "[]", not an error + if size == 0 { + cmd.val = "[]" + return nil + } + expanded := make([]interface{}, size) for i := 0; i < size; i++ { @@ -141,6 +157,7 @@ func (cmd *JSONCmd) readReply(rd *proto.Reader) error { return err } else if str == "" || err == Nil { cmd.val = "" + return Nil } else { cmd.val = str } @@ -149,6 +166,14 @@ func (cmd *JSONCmd) readReply(rd *proto.Reader) error { return nil } +func (cmd *JSONCmd) Clone() Cmder { + return &JSONCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: cmd.val, + expanded: cmd.expanded, // interface{} can be shared as it should be immutable after parsing + } +} + // ------------------------------------------- type JSONSliceCmd struct { @@ -159,8 +184,9 @@ type JSONSliceCmd struct { func NewJSONSliceCmd(ctx context.Context, args ...interface{}) *JSONSliceCmd { return &JSONSliceCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeJSONSlice, }, } } @@ -217,6 +243,18 @@ func (cmd *JSONSliceCmd) readReply(rd *proto.Reader) error { return nil } +func (cmd *JSONSliceCmd) Clone() Cmder { + var val []interface{} + if cmd.val != nil { + val = make([]interface{}, len(cmd.val)) + copy(val, cmd.val) + } + return &JSONSliceCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: val, + } +} + /******************************************************************************* * * IntPointerSliceCmd @@ -233,8 +271,9 @@ type IntPointerSliceCmd struct { func NewIntPointerSliceCmd(ctx context.Context, args ...interface{}) *IntPointerSliceCmd { return &IntPointerSliceCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeIntPointerSlice, }, } } @@ -274,6 +313,18 @@ func (cmd *IntPointerSliceCmd) readReply(rd *proto.Reader) error { return nil } +func (cmd *IntPointerSliceCmd) Clone() Cmder { + var val []*int64 + if cmd.val != nil { + val = make([]*int64, len(cmd.val)) + copy(val, cmd.val) + } + return &IntPointerSliceCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: val, + } +} + //------------------------------------------------------------------------------ // JSONArrAppend adds the provided JSON values to the end of the array at the given path. @@ -494,7 +545,7 @@ func (c cmdable) JSONMSet(ctx context.Context, params ...interface{}) *StatusCmd } // JSONNumIncrBy increments the number value stored at the specified path by the provided number. -// For more information, see https://redis.io/commands/json.numincreby +// For more information, see https://redis.io/docs/latest/commands/json.numincrby/ func (c cmdable) JSONNumIncrBy(ctx context.Context, key, path string, value float64) *JSONCmd { args := []interface{}{"JSON.NUMINCRBY", key, path, value} cmd := newJSONCmd(ctx, args...) diff --git a/vendor/github.com/redis/go-redis/v9/list_commands.go b/vendor/github.com/redis/go-redis/v9/list_commands.go index 24a0de08..9d9e16c6 100644 --- a/vendor/github.com/redis/go-redis/v9/list_commands.go +++ b/vendor/github.com/redis/go-redis/v9/list_commands.go @@ -77,6 +77,10 @@ func (c cmdable) BRPop(ctx context.Context, timeout time.Duration, keys ...strin return cmd } +// BRPopLPush pops an element from a list, pushes it to another list and returns it. +// Blocks until an element is available or timeout is reached. +// +// Deprecated: Use BLMove with RIGHT and LEFT arguments instead as of Redis 6.2.0. func (c cmdable) BRPopLPush(ctx context.Context, source, destination string, timeout time.Duration) *StringCmd { cmd := NewStringCmd( ctx, @@ -247,6 +251,10 @@ func (c cmdable) RPopCount(ctx context.Context, key string, count int) *StringSl return cmd } +// RPopLPush atomically returns and removes the last element of the source list, +// and pushes the element as the first element of the destination list. +// +// Deprecated: Use LMove with RIGHT and LEFT arguments instead as of Redis 6.2.0. func (c cmdable) RPopLPush(ctx context.Context, source, destination string) *StringCmd { cmd := NewStringCmd(ctx, "rpoplpush", source, destination) _ = c(ctx, cmd) diff --git a/vendor/github.com/redis/go-redis/v9/maintnotifications/FEATURES.md b/vendor/github.com/redis/go-redis/v9/maintnotifications/FEATURES.md new file mode 100644 index 00000000..03bbd391 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/maintnotifications/FEATURES.md @@ -0,0 +1,235 @@ +# Maintenance Notifications - FEATURES + +## Overview + +The Maintenance Notifications feature enables seamless Redis connection handoffs during cluster maintenance operations without dropping active connections. This feature leverages Redis RESP3 push notifications to provide zero-downtime maintenance for Redis Enterprise and compatible Redis deployments. + +## Important + +Using Maintenance Notifications may affect the read and write timeouts by relaxing them during maintenance operations. +This is necessary to prevent false failures due to increased latency during handoffs. The relaxed timeouts are automatically applied and removed as needed. + +## Key Features + +### Seamless Connection Handoffs +- **Zero-Downtime Maintenance**: Automatically handles connection transitions during cluster operations +- **Active Operation Preservation**: Transfers in-flight operations to new connections without interruption +- **Graceful Degradation**: Falls back to standard reconnection if handoff fails + +### Push Notification Support +Supports all Redis Enterprise maintenance notification types: +- **MOVING** - Slot moving to a new node +- **MIGRATING** - Slot in migration state +- **MIGRATED** - Migration completed +- **FAILING_OVER** - Node failing over +- **FAILED_OVER** - Failover completed + +### Circuit Breaker Pattern +- **Endpoint-Specific Failure Tracking**: Prevents repeated connection attempts to failing endpoints +- **Automatic Recovery Testing**: Half-open state allows gradual recovery validation +- **Configurable Thresholds**: Customize failure thresholds and reset timeouts + +### Flexible Configuration +- **Auto-Detection Mode**: Automatically detects server support for maintenance notifications +- **Multiple Endpoint Types**: Support for internal/external IP/FQDN endpoint resolution +- **Auto-Scaling Workers**: Automatically sizes worker pool based on connection pool size +- **Timeout Management**: Separate timeouts for relaxed (during maintenance) and normal operations + +### Extensible Hook System +- **Pre/Post Processing Hooks**: Monitor and customize notification handling +- **Built-in Hooks**: Logging and metrics collection hooks included +- **Custom Hook Support**: Implement custom business logic around maintenance events + +### Comprehensive Monitoring +- **Metrics Collection**: Track notification counts, processing times, and error rates +- **Circuit Breaker Stats**: Monitor endpoint health and circuit breaker states +- **Operation Tracking**: Track active handoff operations and their lifecycle + +## Architecture Highlights + +### Event-Driven Handoff System +- **Asynchronous Processing**: Non-blocking handoff operations using worker pool pattern +- **Queue-Based Architecture**: Configurable queue size with auto-scaling support +- **Retry Mechanism**: Configurable retry attempts with exponential backoff + +### Connection Pool Integration +- **Pool Hook Interface**: Seamless integration with go-redis connection pool +- **Connection State Management**: Atomic flags for connection usability tracking +- **Graceful Shutdown**: Ensures all in-flight handoffs complete before shutdown + +### Thread-Safe Design +- **Lock-Free Operations**: Atomic operations for high-performance state tracking +- **Concurrent-Safe Maps**: sync.Map for tracking active operations +- **Minimal Lock Contention**: Read-write locks only where necessary + +## Configuration Options + +### Operation Modes +- **`ModeDisabled`**: Maintenance notifications completely disabled +- **`ModeEnabled`**: Forcefully enabled (fails if server doesn't support) +- **`ModeAuto`**: Auto-detect server support (recommended default) + +### Endpoint Types +- **`EndpointTypeAuto`**: Auto-detect based on current connection +- **`EndpointTypeInternalIP`**: Use internal IP addresses +- **`EndpointTypeInternalFQDN`**: Use internal fully qualified domain names +- **`EndpointTypeExternalIP`**: Use external IP addresses +- **`EndpointTypeExternalFQDN`**: Use external fully qualified domain names +- **`EndpointTypeNone`**: No endpoint (reconnect with current configuration) + +### Timeout Configuration +- **`RelaxedTimeout`**: Extended timeout during maintenance operations (default: 10s) +- **`HandoffTimeout`**: Maximum time for handoff completion (default: 15s) +- **`PostHandoffRelaxedDuration`**: Relaxed period after handoff (default: 2×RelaxedTimeout) + +### Worker Pool Configuration +- **`MaxWorkers`**: Maximum concurrent handoff workers (auto-calculated if 0) +- **`HandoffQueueSize`**: Handoff queue capacity (auto-calculated if 0) +- **`MaxHandoffRetries`**: Maximum retry attempts for failed handoffs (default: 3) + +### Circuit Breaker Configuration +- **`CircuitBreakerFailureThreshold`**: Failures before opening circuit (default: 5) +- **`CircuitBreakerResetTimeout`**: Time before testing recovery (default: 60s) +- **`CircuitBreakerMaxRequests`**: Max requests in half-open state (default: 3) + +## Auto-Scaling Formulas + +### Worker Pool Sizing +When `MaxWorkers = 0` (auto-calculate): +``` +MaxWorkers = min(PoolSize/2, max(10, PoolSize/3)) +``` + +### Queue Sizing +When `HandoffQueueSize = 0` (auto-calculate): +``` +QueueSize = max(20 × MaxWorkers, PoolSize) +Capped by: min(MaxActiveConns + 1, 5 × PoolSize) +``` + +### Examples +- **Pool Size 100**: 33 workers, 660 queue (capped at 500) +- **Pool Size 100 + MaxActiveConns 150**: 33 workers, 151 queue +- **Pool Size 50**: 16 workers, 320 queue (capped at 250) + +## Performance Characteristics + +### Throughput +- **Non-Blocking Handoffs**: Client operations continue during handoffs +- **Concurrent Processing**: Multiple handoffs processed in parallel +- **Minimal Overhead**: Lock-free atomic operations for state tracking + +### Latency +- **Relaxed Timeouts**: Extended timeouts during maintenance prevent false failures +- **Fast Path**: Connections not undergoing handoff have zero overhead +- **Graceful Degradation**: Failed handoffs fall back to standard reconnection + +### Resource Usage +- **Memory Efficient**: Bounded queue sizes prevent memory exhaustion +- **Worker Pool**: Fixed worker count prevents goroutine explosion +- **Connection Reuse**: Handoff reuses existing connection objects + +## Testing + +### Unit Tests +- Comprehensive unit test coverage for all components +- Mock-based testing for isolation +- Concurrent operation testing + +### Integration Tests +- Pool integration tests with real connection handoffs +- Circuit breaker behavior validation +- Hook system integration testing + +### E2E Tests +- Real Redis Enterprise cluster testing +- Multiple scenario coverage (timeouts, endpoint types, stress tests) +- Fault injection testing +- TLS configuration testing + +## Compatibility + +### Requirements +- **Redis Protocol**: RESP3 required for push notifications +- **Redis Version**: Redis Enterprise or compatible Redis with maintenance notifications +- **Go Version**: Go 1.18+ (uses generics and atomic types) + +### Client Support +#### Currently Supported +- **Standalone Client** (`redis.NewClient`) - Full support for MOVING, MIGRATING, MIGRATED, FAILING_OVER, FAILED_OVER notifications +- **Cluster Client** (`redis.NewClusterClient`) - Support for SMIGRATING and SMIGRATED notifications for hitless slot migrations + +#### Will Not Support +- **Failover Client** (no planned support) +- **Ring Client** (no planned support) + +## Migration Guide + +### Enabling Maintenance Notifications (Standalone Client) + +**Before:** +```go +client := redis.NewClient(&redis.Options{ + Addr: "localhost:6379", + Protocol: 2, // RESP2 +}) +``` + +**After:** +```go +client := redis.NewClient(&redis.Options{ + Addr: "localhost:6379", + Protocol: 3, // RESP3 required + MaintNotificationsConfig: &maintnotifications.Config{ + Mode: maintnotifications.ModeAuto, + }, +}) +``` + +### Enabling Hitless Upgrades (Cluster Client) + +For Redis Cluster with hitless slot migration support: + +```go +client := redis.NewClusterClient(&redis.ClusterOptions{ + Addrs: []string{"localhost:7000", "localhost:7001", "localhost:7002"}, + Protocol: 3, // RESP3 required for push notifications + MaintNotificationsConfig: &maintnotifications.Config{ + Mode: maintnotifications.ModeAuto, + RelaxedTimeout: 10 * time.Second, // Extended timeout during slot migrations + }, +}) +``` + +The cluster client automatically handles: +- **SMIGRATING**: Relaxes timeouts when slots are being migrated +- **SMIGRATED**: Triggers lazy cluster state reload when migration completes +- **SeqID Deduplication**: Same notification from multiple nodes triggers only one reload + +### Adding Monitoring + +```go +// Get the manager from the client +manager := client.GetMaintNotificationsManager() +if manager != nil { + // Add logging hook + loggingHook := maintnotifications.NewLoggingHook(2) // Info level + manager.AddNotificationHook(loggingHook) + + // Add metrics hook + metricsHook := maintnotifications.NewMetricsHook() + manager.AddNotificationHook(metricsHook) +} +``` + +## Known Limitations + +1. **RESP3 Required**: Push notifications require RESP3 protocol +2. **Server Support**: Requires Redis Enterprise or compatible Redis with maintenance notifications +3. **Single Connection Commands**: Some commands (MULTI/EXEC, WATCH) may need special handling +4. **No Failover/Ring Client Support**: Failover and Ring clients are not supported and there are no plans to add support + +## Future Enhancements + +- Enhanced metrics and observability +- TTL-based cleanup for SeqID deduplication map \ No newline at end of file diff --git a/vendor/github.com/redis/go-redis/v9/maintnotifications/README.md b/vendor/github.com/redis/go-redis/v9/maintnotifications/README.md new file mode 100644 index 00000000..2f354ef6 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/maintnotifications/README.md @@ -0,0 +1,73 @@ +# Maintenance Notifications + +Seamless Redis connection handoffs during cluster maintenance operations without dropping connections. + +## Cluster Support + +**Cluster notifications are now supported for ClusterClient!** + +- **SMIGRATING**: `["SMIGRATING", SeqID, slot/range, ...]` - Relaxes timeouts when slots are being migrated +- **SMIGRATED**: `["SMIGRATED", SeqID, src host:port, dst host:port, slot/range, ...]` - Reloads cluster state when slot migration completes + +**Note:** Other maintenance notifications (MOVING, MIGRATING, MIGRATED, FAILING_OVER, FAILED_OVER) are supported only in standalone Redis clients. Cluster clients support SMIGRATING and SMIGRATED for cluster-specific slot migration handling. + +## Quick Start + +```go +client := redis.NewClient(&redis.Options{ + Addr: "localhost:6379", + Protocol: 3, // RESP3 required + MaintNotificationsConfig: &maintnotifications.Config{ + Mode: maintnotifications.ModeEnabled, + }, +}) +``` + +## Modes + +- **`ModeDisabled`** - Maintenance notifications disabled +- **`ModeEnabled`** - Forcefully enabled (fails if server doesn't support) +- **`ModeAuto`** - Auto-detect server support (default) + +## Configuration + +```go +&maintnotifications.Config{ + Mode: maintnotifications.ModeAuto, + EndpointType: maintnotifications.EndpointTypeAuto, + RelaxedTimeout: 10 * time.Second, + HandoffTimeout: 15 * time.Second, + MaxHandoffRetries: 3, + MaxWorkers: 0, // Auto-calculated + HandoffQueueSize: 0, // Auto-calculated + PostHandoffRelaxedDuration: 0, // 2 * RelaxedTimeout +} +``` + +### Endpoint Types + +- **`EndpointTypeAuto`** - Auto-detect based on connection (default) +- **`EndpointTypeInternalIP`** - Internal IP address +- **`EndpointTypeInternalFQDN`** - Internal FQDN +- **`EndpointTypeExternalIP`** - External IP address +- **`EndpointTypeExternalFQDN`** - External FQDN +- **`EndpointTypeNone`** - No endpoint (reconnect with current config) + +### Auto-Scaling + +**Workers**: `min(PoolSize/2, max(10, PoolSize/3))` when auto-calculated +**Queue**: `max(20×Workers, PoolSize)` capped by `MaxActiveConns+1` or `5×PoolSize` + +**Examples:** +- Pool 100: 33 workers, 660 queue (capped at 500) +- Pool 100 + MaxActiveConns 150: 33 workers, 151 queue + +## How It Works + +1. Redis sends push notifications about cluster maintenance operations +2. Client creates new connections to updated endpoints +3. Active operations transfer to new connections +4. Old connections close gracefully + + +## For more information, see [FEATURES](FEATURES.md) diff --git a/vendor/github.com/redis/go-redis/v9/maintnotifications/circuit_breaker.go b/vendor/github.com/redis/go-redis/v9/maintnotifications/circuit_breaker.go new file mode 100644 index 00000000..cb76b644 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/maintnotifications/circuit_breaker.go @@ -0,0 +1,353 @@ +package maintnotifications + +import ( + "context" + "sync" + "sync/atomic" + "time" + + "github.com/redis/go-redis/v9/internal" + "github.com/redis/go-redis/v9/internal/maintnotifications/logs" +) + +// CircuitBreakerState represents the state of a circuit breaker +type CircuitBreakerState int32 + +const ( + // CircuitBreakerClosed - normal operation, requests allowed + CircuitBreakerClosed CircuitBreakerState = iota + // CircuitBreakerOpen - failing fast, requests rejected + CircuitBreakerOpen + // CircuitBreakerHalfOpen - testing if service recovered + CircuitBreakerHalfOpen +) + +func (s CircuitBreakerState) String() string { + switch s { + case CircuitBreakerClosed: + return "closed" + case CircuitBreakerOpen: + return "open" + case CircuitBreakerHalfOpen: + return "half-open" + default: + return "unknown" + } +} + +// CircuitBreaker implements the circuit breaker pattern for endpoint-specific failure handling +type CircuitBreaker struct { + // Configuration + failureThreshold int // Number of failures before opening + resetTimeout time.Duration // How long to stay open before testing + maxRequests int // Max requests allowed in half-open state + + // State tracking (atomic for lock-free access) + state atomic.Int32 // CircuitBreakerState + failures atomic.Int64 // Current failure count + successes atomic.Int64 // Success count in half-open state + requests atomic.Int64 // Request count in half-open state + lastFailureTime atomic.Int64 // Unix timestamp of last failure + lastSuccessTime atomic.Int64 // Unix timestamp of last success + + // Endpoint identification + endpoint string + config *Config +} + +// newCircuitBreaker creates a new circuit breaker for an endpoint +func newCircuitBreaker(endpoint string, config *Config) *CircuitBreaker { + // Use configuration values with sensible defaults + failureThreshold := 5 + resetTimeout := 60 * time.Second + maxRequests := 3 + + if config != nil { + failureThreshold = config.CircuitBreakerFailureThreshold + resetTimeout = config.CircuitBreakerResetTimeout + maxRequests = config.CircuitBreakerMaxRequests + } + + return &CircuitBreaker{ + failureThreshold: failureThreshold, + resetTimeout: resetTimeout, + maxRequests: maxRequests, + endpoint: endpoint, + config: config, + state: atomic.Int32{}, // Defaults to CircuitBreakerClosed (0) + } +} + +// IsOpen returns true if the circuit breaker is open (rejecting requests) +func (cb *CircuitBreaker) IsOpen() bool { + state := CircuitBreakerState(cb.state.Load()) + return state == CircuitBreakerOpen +} + +// shouldAttemptReset checks if enough time has passed to attempt reset +func (cb *CircuitBreaker) shouldAttemptReset() bool { + lastFailure := time.Unix(cb.lastFailureTime.Load(), 0) + return time.Since(lastFailure) >= cb.resetTimeout +} + +// Execute runs the given function with circuit breaker protection +func (cb *CircuitBreaker) Execute(fn func() error) error { + // Single atomic state load for consistency + state := CircuitBreakerState(cb.state.Load()) + + switch state { + case CircuitBreakerOpen: + if cb.shouldAttemptReset() { + // Attempt transition to half-open + if cb.state.CompareAndSwap(int32(CircuitBreakerOpen), int32(CircuitBreakerHalfOpen)) { + cb.requests.Store(0) + cb.successes.Store(0) + if internal.LogLevel.InfoOrAbove() { + internal.Logger.Printf(context.Background(), logs.CircuitBreakerTransitioningToHalfOpen(cb.endpoint)) + } + // Fall through to half-open logic + } else { + return ErrCircuitBreakerOpen + } + } else { + return ErrCircuitBreakerOpen + } + fallthrough + case CircuitBreakerHalfOpen: + requests := cb.requests.Add(1) + if requests > int64(cb.maxRequests) { + cb.requests.Add(-1) // Revert the increment + return ErrCircuitBreakerOpen + } + } + + // Execute the function with consistent state + err := fn() + + if err != nil { + cb.recordFailure() + return err + } + + cb.recordSuccess() + return nil +} + +// recordFailure records a failure and potentially opens the circuit +func (cb *CircuitBreaker) recordFailure() { + cb.lastFailureTime.Store(time.Now().Unix()) + failures := cb.failures.Add(1) + + state := CircuitBreakerState(cb.state.Load()) + + switch state { + case CircuitBreakerClosed: + if failures >= int64(cb.failureThreshold) { + if cb.state.CompareAndSwap(int32(CircuitBreakerClosed), int32(CircuitBreakerOpen)) { + if internal.LogLevel.WarnOrAbove() { + internal.Logger.Printf(context.Background(), logs.CircuitBreakerOpened(cb.endpoint, failures)) + } + } + } + case CircuitBreakerHalfOpen: + // Any failure in half-open state immediately opens the circuit + if cb.state.CompareAndSwap(int32(CircuitBreakerHalfOpen), int32(CircuitBreakerOpen)) { + if internal.LogLevel.WarnOrAbove() { + internal.Logger.Printf(context.Background(), logs.CircuitBreakerReopened(cb.endpoint)) + } + } + } +} + +// recordSuccess records a success and potentially closes the circuit +func (cb *CircuitBreaker) recordSuccess() { + cb.lastSuccessTime.Store(time.Now().Unix()) + + state := CircuitBreakerState(cb.state.Load()) + + switch state { + case CircuitBreakerClosed: + // Reset failure count on success in closed state + cb.failures.Store(0) + case CircuitBreakerHalfOpen: + successes := cb.successes.Add(1) + + // If we've had enough successful requests, close the circuit + if successes >= int64(cb.maxRequests) { + if cb.state.CompareAndSwap(int32(CircuitBreakerHalfOpen), int32(CircuitBreakerClosed)) { + cb.failures.Store(0) + if internal.LogLevel.InfoOrAbove() { + internal.Logger.Printf(context.Background(), logs.CircuitBreakerClosed(cb.endpoint, successes)) + } + } + } + } +} + +// GetState returns the current state of the circuit breaker +func (cb *CircuitBreaker) GetState() CircuitBreakerState { + return CircuitBreakerState(cb.state.Load()) +} + +// GetStats returns current statistics for monitoring +func (cb *CircuitBreaker) GetStats() CircuitBreakerStats { + return CircuitBreakerStats{ + Endpoint: cb.endpoint, + State: cb.GetState(), + Failures: cb.failures.Load(), + Successes: cb.successes.Load(), + Requests: cb.requests.Load(), + LastFailureTime: time.Unix(cb.lastFailureTime.Load(), 0), + LastSuccessTime: time.Unix(cb.lastSuccessTime.Load(), 0), + } +} + +// CircuitBreakerStats provides statistics about a circuit breaker +type CircuitBreakerStats struct { + Endpoint string + State CircuitBreakerState + Failures int64 + Successes int64 + Requests int64 + LastFailureTime time.Time + LastSuccessTime time.Time +} + +// CircuitBreakerEntry wraps a circuit breaker with access tracking +type CircuitBreakerEntry struct { + breaker *CircuitBreaker + lastAccess atomic.Int64 // Unix timestamp + created time.Time +} + +// CircuitBreakerManager manages circuit breakers for multiple endpoints +type CircuitBreakerManager struct { + breakers sync.Map // map[string]*CircuitBreakerEntry + config *Config + cleanupStop chan struct{} + cleanupMu sync.Mutex + lastCleanup atomic.Int64 // Unix timestamp +} + +// newCircuitBreakerManager creates a new circuit breaker manager +func newCircuitBreakerManager(config *Config) *CircuitBreakerManager { + cbm := &CircuitBreakerManager{ + config: config, + cleanupStop: make(chan struct{}), + } + cbm.lastCleanup.Store(time.Now().Unix()) + + // Start background cleanup goroutine + go cbm.cleanupLoop() + + return cbm +} + +// GetCircuitBreaker returns the circuit breaker for an endpoint, creating it if necessary +func (cbm *CircuitBreakerManager) GetCircuitBreaker(endpoint string) *CircuitBreaker { + now := time.Now().Unix() + + if entry, ok := cbm.breakers.Load(endpoint); ok { + cbEntry := entry.(*CircuitBreakerEntry) + cbEntry.lastAccess.Store(now) + return cbEntry.breaker + } + + // Create new circuit breaker with metadata + newBreaker := newCircuitBreaker(endpoint, cbm.config) + newEntry := &CircuitBreakerEntry{ + breaker: newBreaker, + created: time.Now(), + } + newEntry.lastAccess.Store(now) + + actual, _ := cbm.breakers.LoadOrStore(endpoint, newEntry) + return actual.(*CircuitBreakerEntry).breaker +} + +// GetAllStats returns statistics for all circuit breakers +func (cbm *CircuitBreakerManager) GetAllStats() []CircuitBreakerStats { + var stats []CircuitBreakerStats + cbm.breakers.Range(func(key, value interface{}) bool { + entry := value.(*CircuitBreakerEntry) + stats = append(stats, entry.breaker.GetStats()) + return true + }) + return stats +} + +// cleanupLoop runs background cleanup of unused circuit breakers +func (cbm *CircuitBreakerManager) cleanupLoop() { + ticker := time.NewTicker(5 * time.Minute) // Cleanup every 5 minutes + defer ticker.Stop() + + for { + select { + case <-ticker.C: + cbm.cleanup() + case <-cbm.cleanupStop: + return + } + } +} + +// cleanup removes circuit breakers that haven't been accessed recently +func (cbm *CircuitBreakerManager) cleanup() { + // Prevent concurrent cleanups + if !cbm.cleanupMu.TryLock() { + return + } + defer cbm.cleanupMu.Unlock() + + now := time.Now() + cutoff := now.Add(-30 * time.Minute).Unix() // 30 minute TTL + + var toDelete []string + count := 0 + + cbm.breakers.Range(func(key, value interface{}) bool { + endpoint := key.(string) + entry := value.(*CircuitBreakerEntry) + + count++ + + // Remove if not accessed recently + if entry.lastAccess.Load() < cutoff { + toDelete = append(toDelete, endpoint) + } + + return true + }) + + // Delete expired entries + for _, endpoint := range toDelete { + cbm.breakers.Delete(endpoint) + } + + // Log cleanup results + if len(toDelete) > 0 && internal.LogLevel.InfoOrAbove() { + internal.Logger.Printf(context.Background(), logs.CircuitBreakerCleanup(len(toDelete), count)) + } + + cbm.lastCleanup.Store(now.Unix()) +} + +// Shutdown stops the cleanup goroutine +func (cbm *CircuitBreakerManager) Shutdown() { + close(cbm.cleanupStop) +} + +// Reset resets all circuit breakers (useful for testing) +func (cbm *CircuitBreakerManager) Reset() { + cbm.breakers.Range(func(key, value interface{}) bool { + entry := value.(*CircuitBreakerEntry) + breaker := entry.breaker + breaker.state.Store(int32(CircuitBreakerClosed)) + breaker.failures.Store(0) + breaker.successes.Store(0) + breaker.requests.Store(0) + breaker.lastFailureTime.Store(0) + breaker.lastSuccessTime.Store(0) + return true + }) +} diff --git a/vendor/github.com/redis/go-redis/v9/maintnotifications/config.go b/vendor/github.com/redis/go-redis/v9/maintnotifications/config.go new file mode 100644 index 00000000..db666f3a --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/maintnotifications/config.go @@ -0,0 +1,457 @@ +package maintnotifications + +import ( + "context" + "net" + "runtime" + "strings" + "time" + + "github.com/redis/go-redis/v9/internal" + "github.com/redis/go-redis/v9/internal/maintnotifications/logs" +) + +// Mode represents the maintenance notifications mode +type Mode string + +// Constants for maintenance push notifications modes +const ( + ModeDisabled Mode = "disabled" // Client doesn't send CLIENT MAINT_NOTIFICATIONS ON command + ModeEnabled Mode = "enabled" // Client forcefully sends command, interrupts connection on error + ModeAuto Mode = "auto" // Client tries to send command, disables feature on error +) + +// IsValid returns true if the maintenance notifications mode is valid +func (m Mode) IsValid() bool { + switch m { + case ModeDisabled, ModeEnabled, ModeAuto: + return true + default: + return false + } +} + +// String returns the string representation of the mode +func (m Mode) String() string { + return string(m) +} + +// EndpointType represents the type of endpoint to request in MOVING notifications +type EndpointType string + +// Constants for endpoint types +const ( + EndpointTypeAuto EndpointType = "auto" // Auto-detect based on connection + EndpointTypeInternalIP EndpointType = "internal-ip" // Internal IP address + EndpointTypeInternalFQDN EndpointType = "internal-fqdn" // Internal FQDN + EndpointTypeExternalIP EndpointType = "external-ip" // External IP address + EndpointTypeExternalFQDN EndpointType = "external-fqdn" // External FQDN + EndpointTypeNone EndpointType = "none" // No endpoint (reconnect with current config) +) + +// IsValid returns true if the endpoint type is valid +func (e EndpointType) IsValid() bool { + switch e { + case EndpointTypeAuto, EndpointTypeInternalIP, EndpointTypeInternalFQDN, + EndpointTypeExternalIP, EndpointTypeExternalFQDN, EndpointTypeNone: + return true + default: + return false + } +} + +// String returns the string representation of the endpoint type +func (e EndpointType) String() string { + return string(e) +} + +// Config provides configuration options for maintenance notifications +type Config struct { + // Mode controls how client maintenance notifications are handled. + // Valid values: ModeDisabled, ModeEnabled, ModeAuto + // Default: ModeAuto + Mode Mode + + // EndpointType specifies the type of endpoint to request in MOVING notifications. + // Valid values: EndpointTypeAuto, EndpointTypeInternalIP, EndpointTypeInternalFQDN, + // EndpointTypeExternalIP, EndpointTypeExternalFQDN, EndpointTypeNone + // Default: EndpointTypeAuto + EndpointType EndpointType + + // RelaxedTimeout is the concrete timeout value to use during + // MIGRATING/FAILING_OVER states to accommodate increased latency. + // This applies to both read and write timeouts. + // Default: 10 seconds + RelaxedTimeout time.Duration + + // HandoffTimeout is the maximum time to wait for connection handoff to complete. + // If handoff takes longer than this, the old connection will be forcibly closed. + // Default: 15 seconds (matches server-side eviction timeout) + HandoffTimeout time.Duration + + // MaxWorkers is the maximum number of worker goroutines for processing handoff requests. + // Workers are created on-demand and automatically cleaned up when idle. + // If zero, defaults to min(10, PoolSize/2) to handle bursts effectively. + // If explicitly set, enforces minimum of PoolSize/2 + // + // Default: min(PoolSize/2, max(10, PoolSize/3)), Minimum when set: PoolSize/2 + MaxWorkers int + + // HandoffQueueSize is the size of the buffered channel used to queue handoff requests. + // If the queue is full, new handoff requests will be rejected. + // Scales with both worker count and pool size for better burst handling. + // + // Default: max(20×MaxWorkers, PoolSize), capped by MaxActiveConns+1 (if set) or 5×PoolSize + // When set: minimum 200, capped by MaxActiveConns+1 (if set) or 5×PoolSize + HandoffQueueSize int + + // PostHandoffRelaxedDuration is how long to keep relaxed timeouts on the new connection + // after a handoff completes. This provides additional resilience during cluster transitions. + // Default: 2 * RelaxedTimeout + PostHandoffRelaxedDuration time.Duration + + // Circuit breaker configuration for endpoint failure handling + // CircuitBreakerFailureThreshold is the number of failures before opening the circuit. + // Default: 5 + CircuitBreakerFailureThreshold int + + // CircuitBreakerResetTimeout is how long to wait before testing if the endpoint recovered. + // Default: 60 seconds + CircuitBreakerResetTimeout time.Duration + + // CircuitBreakerMaxRequests is the maximum number of requests allowed in half-open state. + // Default: 3 + CircuitBreakerMaxRequests int + + // MaxHandoffRetries is the maximum number of times to retry a failed handoff. + // After this many retries, the connection will be removed from the pool. + // Default: 3 + MaxHandoffRetries int +} + +func (c *Config) IsEnabled() bool { + return c != nil && c.Mode != ModeDisabled +} + +// DefaultConfig returns a Config with sensible defaults. +func DefaultConfig() *Config { + return &Config{ + Mode: ModeAuto, // Enable by default for Redis Cloud + EndpointType: EndpointTypeAuto, // Auto-detect based on connection + RelaxedTimeout: 10 * time.Second, + HandoffTimeout: 15 * time.Second, + MaxWorkers: 0, // Auto-calculated based on pool size + HandoffQueueSize: 0, // Auto-calculated based on max workers + PostHandoffRelaxedDuration: 0, // Auto-calculated based on relaxed timeout + + // Circuit breaker configuration + CircuitBreakerFailureThreshold: 5, + CircuitBreakerResetTimeout: 60 * time.Second, + CircuitBreakerMaxRequests: 3, + + // Connection Handoff Configuration + MaxHandoffRetries: 3, + } +} + +// Validate checks if the configuration is valid. +func (c *Config) Validate() error { + if c.RelaxedTimeout <= 0 { + return ErrInvalidRelaxedTimeout + } + if c.HandoffTimeout <= 0 { + return ErrInvalidHandoffTimeout + } + // Validate worker configuration + // Allow 0 for auto-calculation, but negative values are invalid + if c.MaxWorkers < 0 { + return ErrInvalidHandoffWorkers + } + // HandoffQueueSize validation - allow 0 for auto-calculation + if c.HandoffQueueSize < 0 { + return ErrInvalidHandoffQueueSize + } + if c.PostHandoffRelaxedDuration < 0 { + return ErrInvalidPostHandoffRelaxedDuration + } + + // Circuit breaker validation + if c.CircuitBreakerFailureThreshold < 1 { + return ErrInvalidCircuitBreakerFailureThreshold + } + if c.CircuitBreakerResetTimeout < 0 { + return ErrInvalidCircuitBreakerResetTimeout + } + if c.CircuitBreakerMaxRequests < 1 { + return ErrInvalidCircuitBreakerMaxRequests + } + + // Validate Mode (maintenance notifications mode) + if !c.Mode.IsValid() { + return ErrInvalidMaintNotifications + } + + // Validate EndpointType + if !c.EndpointType.IsValid() { + return ErrInvalidEndpointType + } + + // Validate configuration fields + if c.MaxHandoffRetries < 1 || c.MaxHandoffRetries > 10 { + return ErrInvalidHandoffRetries + } + + return nil +} + +// ApplyDefaults applies default values to any zero-value fields in the configuration. +// This ensures that partially configured structs get sensible defaults for missing fields. +func (c *Config) ApplyDefaults() *Config { + return c.ApplyDefaultsWithPoolSize(0) +} + +// ApplyDefaultsWithPoolSize applies default values to any zero-value fields in the configuration, +// using the provided pool size to calculate worker defaults. +// This ensures that partially configured structs get sensible defaults for missing fields. +func (c *Config) ApplyDefaultsWithPoolSize(poolSize int) *Config { + return c.ApplyDefaultsWithPoolConfig(poolSize, 0) +} + +// ApplyDefaultsWithPoolConfig applies default values to any zero-value fields in the configuration, +// using the provided pool size and max active connections to calculate worker and queue defaults. +// This ensures that partially configured structs get sensible defaults for missing fields. +func (c *Config) ApplyDefaultsWithPoolConfig(poolSize int, maxActiveConns int) *Config { + if c == nil { + return DefaultConfig().ApplyDefaultsWithPoolSize(poolSize) + } + + defaults := DefaultConfig() + result := &Config{} + + // Apply defaults for enum fields (empty/zero means not set) + result.Mode = defaults.Mode + if c.Mode != "" { + result.Mode = c.Mode + } + + result.EndpointType = defaults.EndpointType + if c.EndpointType != "" { + result.EndpointType = c.EndpointType + } + + // Apply defaults for duration fields (zero means not set) + result.RelaxedTimeout = defaults.RelaxedTimeout + if c.RelaxedTimeout > 0 { + result.RelaxedTimeout = c.RelaxedTimeout + } + + result.HandoffTimeout = defaults.HandoffTimeout + if c.HandoffTimeout > 0 { + result.HandoffTimeout = c.HandoffTimeout + } + + // Copy worker configuration + result.MaxWorkers = c.MaxWorkers + + // Apply worker defaults based on pool size + result.applyWorkerDefaults(poolSize) + + // Apply queue size defaults with new scaling approach + // Default: max(20x workers, PoolSize), capped by maxActiveConns or 5x pool size + workerBasedSize := result.MaxWorkers * 20 + poolBasedSize := poolSize + result.HandoffQueueSize = max(workerBasedSize, poolBasedSize) + if c.HandoffQueueSize > 0 { + // When explicitly set: enforce minimum of 200 + result.HandoffQueueSize = max(200, c.HandoffQueueSize) + } + + // Cap queue size: use maxActiveConns+1 if set, otherwise 5x pool size + var queueCap int + if maxActiveConns > 0 { + queueCap = maxActiveConns + 1 + // Ensure queue cap is at least 2 for very small maxActiveConns + if queueCap < 2 { + queueCap = 2 + } + } else { + queueCap = poolSize * 5 + } + result.HandoffQueueSize = min(result.HandoffQueueSize, queueCap) + + // Ensure minimum queue size of 2 (fallback for very small pools) + if result.HandoffQueueSize < 2 { + result.HandoffQueueSize = 2 + } + + result.PostHandoffRelaxedDuration = result.RelaxedTimeout * 2 + if c.PostHandoffRelaxedDuration > 0 { + result.PostHandoffRelaxedDuration = c.PostHandoffRelaxedDuration + } + + // Apply defaults for configuration fields + result.MaxHandoffRetries = defaults.MaxHandoffRetries + if c.MaxHandoffRetries > 0 { + result.MaxHandoffRetries = c.MaxHandoffRetries + } + + // Circuit breaker configuration + result.CircuitBreakerFailureThreshold = defaults.CircuitBreakerFailureThreshold + if c.CircuitBreakerFailureThreshold > 0 { + result.CircuitBreakerFailureThreshold = c.CircuitBreakerFailureThreshold + } + + result.CircuitBreakerResetTimeout = defaults.CircuitBreakerResetTimeout + if c.CircuitBreakerResetTimeout > 0 { + result.CircuitBreakerResetTimeout = c.CircuitBreakerResetTimeout + } + + result.CircuitBreakerMaxRequests = defaults.CircuitBreakerMaxRequests + if c.CircuitBreakerMaxRequests > 0 { + result.CircuitBreakerMaxRequests = c.CircuitBreakerMaxRequests + } + + if internal.LogLevel.DebugOrAbove() { + internal.Logger.Printf(context.Background(), logs.DebugLoggingEnabled()) + internal.Logger.Printf(context.Background(), logs.ConfigDebug(result)) + } + return result +} + +// Clone creates a deep copy of the configuration. +func (c *Config) Clone() *Config { + if c == nil { + return DefaultConfig() + } + + return &Config{ + Mode: c.Mode, + EndpointType: c.EndpointType, + RelaxedTimeout: c.RelaxedTimeout, + HandoffTimeout: c.HandoffTimeout, + MaxWorkers: c.MaxWorkers, + HandoffQueueSize: c.HandoffQueueSize, + PostHandoffRelaxedDuration: c.PostHandoffRelaxedDuration, + + // Circuit breaker configuration + CircuitBreakerFailureThreshold: c.CircuitBreakerFailureThreshold, + CircuitBreakerResetTimeout: c.CircuitBreakerResetTimeout, + CircuitBreakerMaxRequests: c.CircuitBreakerMaxRequests, + + // Configuration fields + MaxHandoffRetries: c.MaxHandoffRetries, + } +} + +// applyWorkerDefaults calculates and applies worker defaults based on pool size +func (c *Config) applyWorkerDefaults(poolSize int) { + // Calculate defaults based on pool size + if poolSize <= 0 { + poolSize = 10 * runtime.GOMAXPROCS(0) + } + + // When not set: min(poolSize/2, max(10, poolSize/3)) - balanced scaling approach + originalMaxWorkers := c.MaxWorkers + c.MaxWorkers = min(poolSize/2, max(10, poolSize/3)) + if originalMaxWorkers != 0 { + // When explicitly set: max(poolSize/2, set_value) - ensure at least poolSize/2 workers + c.MaxWorkers = max(poolSize/2, originalMaxWorkers) + } + + // Ensure minimum of 1 worker (fallback for very small pools) + if c.MaxWorkers < 1 { + c.MaxWorkers = 1 + } +} + +// DetectEndpointType automatically detects the appropriate endpoint type +// based on the connection address and TLS configuration. +// +// For IP addresses: +// - If TLS is enabled: requests FQDN for proper certificate validation +// - If TLS is disabled: requests IP for better performance +// +// For hostnames: +// - If TLS is enabled: always requests FQDN for proper certificate validation +// - If TLS is disabled: requests IP for better performance +// +// Internal vs External detection: +// - For IPs: uses private IP range detection +// - For hostnames: uses heuristics based on common internal naming patterns +func DetectEndpointType(addr string, tlsEnabled bool) EndpointType { + // Extract host from "host:port" format + host, _, err := net.SplitHostPort(addr) + if err != nil { + host = addr // Assume no port + } + + // Check if the host is an IP address or hostname + ip := net.ParseIP(host) + isIPAddress := ip != nil + var endpointType EndpointType + + if isIPAddress { + // Address is an IP - determine if it's private or public + isPrivate := ip.IsPrivate() || ip.IsLoopback() || ip.IsLinkLocalUnicast() + + if tlsEnabled { + // TLS with IP addresses - still prefer FQDN for certificate validation + if isPrivate { + endpointType = EndpointTypeInternalFQDN + } else { + endpointType = EndpointTypeExternalFQDN + } + } else { + // No TLS - can use IP addresses directly + if isPrivate { + endpointType = EndpointTypeInternalIP + } else { + endpointType = EndpointTypeExternalIP + } + } + } else { + // Address is a hostname + isInternalHostname := isInternalHostname(host) + if isInternalHostname { + endpointType = EndpointTypeInternalFQDN + } else { + endpointType = EndpointTypeExternalFQDN + } + } + + return endpointType +} + +// isInternalHostname determines if a hostname appears to be internal/private. +// This is a heuristic based on common naming patterns. +func isInternalHostname(hostname string) bool { + // Convert to lowercase for comparison + hostname = strings.ToLower(hostname) + + // Common internal hostname patterns + internalPatterns := []string{ + "localhost", + ".local", + ".internal", + ".corp", + ".lan", + ".intranet", + ".private", + } + + // Check for exact match or suffix match + for _, pattern := range internalPatterns { + if hostname == pattern || strings.HasSuffix(hostname, pattern) { + return true + } + } + + // Check for RFC 1918 style hostnames (e.g., redis-1, db-server, etc.) + // If hostname doesn't contain dots, it's likely internal + if !strings.Contains(hostname, ".") { + return true + } + + // Default to external for fully qualified domain names + return false +} diff --git a/vendor/github.com/redis/go-redis/v9/maintnotifications/errors.go b/vendor/github.com/redis/go-redis/v9/maintnotifications/errors.go new file mode 100644 index 00000000..049656bd --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/maintnotifications/errors.go @@ -0,0 +1,76 @@ +package maintnotifications + +import ( + "errors" + + "github.com/redis/go-redis/v9/internal/maintnotifications/logs" +) + +// Configuration errors +var ( + ErrInvalidRelaxedTimeout = errors.New(logs.InvalidRelaxedTimeoutError()) + ErrInvalidHandoffTimeout = errors.New(logs.InvalidHandoffTimeoutError()) + ErrInvalidHandoffWorkers = errors.New(logs.InvalidHandoffWorkersError()) + ErrInvalidHandoffQueueSize = errors.New(logs.InvalidHandoffQueueSizeError()) + ErrInvalidPostHandoffRelaxedDuration = errors.New(logs.InvalidPostHandoffRelaxedDurationError()) + ErrInvalidEndpointType = errors.New(logs.InvalidEndpointTypeError()) + ErrInvalidMaintNotifications = errors.New(logs.InvalidMaintNotificationsError()) + ErrMaxHandoffRetriesReached = errors.New(logs.MaxHandoffRetriesReachedError()) + + // Configuration validation errors + + // ErrInvalidHandoffRetries is returned when the number of handoff retries is invalid + ErrInvalidHandoffRetries = errors.New(logs.InvalidHandoffRetriesError()) +) + +// Integration errors +var ( + // ErrInvalidClient is returned when the client does not support push notifications + ErrInvalidClient = errors.New(logs.InvalidClientError()) +) + +// Handoff errors +var ( + // ErrHandoffQueueFull is returned when the handoff queue is full + ErrHandoffQueueFull = errors.New(logs.HandoffQueueFullError()) +) + +// Notification errors +var ( + // ErrInvalidNotification is returned when a notification is in an invalid format + ErrInvalidNotification = errors.New(logs.InvalidNotificationError()) +) + +// connection handoff errors +var ( + // ErrConnectionMarkedForHandoff is returned when a connection is marked for handoff + // and should not be used until the handoff is complete + ErrConnectionMarkedForHandoff = errors.New(logs.ConnectionMarkedForHandoffErrorMessage) + // ErrConnectionMarkedForHandoffWithState is returned when a connection is marked for handoff + // and should not be used until the handoff is complete + ErrConnectionMarkedForHandoffWithState = errors.New(logs.ConnectionMarkedForHandoffErrorMessage + " with state") + // ErrConnectionInvalidHandoffState is returned when a connection is in an invalid state for handoff + ErrConnectionInvalidHandoffState = errors.New(logs.ConnectionInvalidHandoffStateErrorMessage) +) + +// shutdown errors +var ( + // ErrShutdown is returned when the maintnotifications manager is shutdown + ErrShutdown = errors.New(logs.ShutdownError()) +) + +// circuit breaker errors +var ( + // ErrCircuitBreakerOpen is returned when the circuit breaker is open + ErrCircuitBreakerOpen = errors.New(logs.CircuitBreakerOpenErrorMessage) +) + +// circuit breaker configuration errors +var ( + // ErrInvalidCircuitBreakerFailureThreshold is returned when the circuit breaker failure threshold is invalid + ErrInvalidCircuitBreakerFailureThreshold = errors.New(logs.InvalidCircuitBreakerFailureThresholdError()) + // ErrInvalidCircuitBreakerResetTimeout is returned when the circuit breaker reset timeout is invalid + ErrInvalidCircuitBreakerResetTimeout = errors.New(logs.InvalidCircuitBreakerResetTimeoutError()) + // ErrInvalidCircuitBreakerMaxRequests is returned when the circuit breaker max requests is invalid + ErrInvalidCircuitBreakerMaxRequests = errors.New(logs.InvalidCircuitBreakerMaxRequestsError()) +) diff --git a/vendor/github.com/redis/go-redis/v9/maintnotifications/example_hooks.go b/vendor/github.com/redis/go-redis/v9/maintnotifications/example_hooks.go new file mode 100644 index 00000000..3a346557 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/maintnotifications/example_hooks.go @@ -0,0 +1,101 @@ +package maintnotifications + +import ( + "context" + "fmt" + "time" + + "github.com/redis/go-redis/v9/internal" + "github.com/redis/go-redis/v9/internal/maintnotifications/logs" + "github.com/redis/go-redis/v9/internal/pool" + "github.com/redis/go-redis/v9/push" +) + +// contextKey is a custom type for context keys to avoid collisions +type contextKey string + +const ( + startTimeKey contextKey = "maint_notif_start_time" +) + +// MetricsHook collects metrics about notification processing. +type MetricsHook struct { + NotificationCounts map[string]int64 + ProcessingTimes map[string]time.Duration + ErrorCounts map[string]int64 + HandoffCounts int64 // Total handoffs initiated + HandoffSuccesses int64 // Successful handoffs + HandoffFailures int64 // Failed handoffs +} + +// NewMetricsHook creates a new metrics collection hook. +func NewMetricsHook() *MetricsHook { + return &MetricsHook{ + NotificationCounts: make(map[string]int64), + ProcessingTimes: make(map[string]time.Duration), + ErrorCounts: make(map[string]int64), + } +} + +// PreHook records the start time for processing metrics. +func (mh *MetricsHook) PreHook(ctx context.Context, notificationCtx push.NotificationHandlerContext, notificationType string, notification []interface{}) ([]interface{}, bool) { + mh.NotificationCounts[notificationType]++ + + // Log connection information if available + if conn, ok := notificationCtx.Conn.(*pool.Conn); ok { + internal.Logger.Printf(ctx, logs.MetricsHookProcessingNotification(notificationType, conn.GetID())) + } + + // Store start time in context for duration calculation + startTime := time.Now() + _ = context.WithValue(ctx, startTimeKey, startTime) // Context not used further + + return notification, true +} + +// PostHook records processing completion and any errors. +func (mh *MetricsHook) PostHook(ctx context.Context, notificationCtx push.NotificationHandlerContext, notificationType string, notification []interface{}, result error) { + // Calculate processing duration + if startTime, ok := ctx.Value(startTimeKey).(time.Time); ok { + duration := time.Since(startTime) + mh.ProcessingTimes[notificationType] = duration + } + + // Record errors + if result != nil { + mh.ErrorCounts[notificationType]++ + + // Log error details with connection information + if conn, ok := notificationCtx.Conn.(*pool.Conn); ok { + internal.Logger.Printf(ctx, logs.MetricsHookRecordedError(notificationType, conn.GetID(), result)) + } + } +} + +// GetMetrics returns a summary of collected metrics. +func (mh *MetricsHook) GetMetrics() map[string]interface{} { + return map[string]interface{}{ + "notification_counts": mh.NotificationCounts, + "processing_times": mh.ProcessingTimes, + "error_counts": mh.ErrorCounts, + } +} + +// ExampleCircuitBreakerMonitor demonstrates how to monitor circuit breaker status +func ExampleCircuitBreakerMonitor(poolHook *PoolHook) { + // Get circuit breaker statistics + stats := poolHook.GetCircuitBreakerStats() + + for _, stat := range stats { + fmt.Printf("Circuit Breaker for %s:\n", stat.Endpoint) + fmt.Printf(" State: %s\n", stat.State) + fmt.Printf(" Failures: %d\n", stat.Failures) + fmt.Printf(" Last Failure: %v\n", stat.LastFailureTime) + fmt.Printf(" Last Success: %v\n", stat.LastSuccessTime) + + // Alert if circuit breaker is open + if stat.State.String() == "open" { + fmt.Printf(" ⚠️ ALERT: Circuit breaker is OPEN for %s\n", stat.Endpoint) + } + } +} diff --git a/vendor/github.com/redis/go-redis/v9/maintnotifications/handoff_worker.go b/vendor/github.com/redis/go-redis/v9/maintnotifications/handoff_worker.go new file mode 100644 index 00000000..d66542ff --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/maintnotifications/handoff_worker.go @@ -0,0 +1,525 @@ +package maintnotifications + +import ( + "context" + "errors" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/redis/go-redis/v9/internal" + "github.com/redis/go-redis/v9/internal/maintnotifications/logs" + "github.com/redis/go-redis/v9/internal/pool" +) + +// PoolNameMain is the name used for the main connection pool in metrics. +const PoolNameMain = "main" + +// handoffWorkerManager manages background workers and queue for connection handoffs +type handoffWorkerManager struct { + // Event-driven handoff support + handoffQueue chan HandoffRequest // Queue for handoff requests + shutdown chan struct{} // Shutdown signal + shutdownOnce sync.Once // Ensure clean shutdown + workerWg sync.WaitGroup // Track worker goroutines + + // On-demand worker management + maxWorkers int + activeWorkers atomic.Int32 + workerTimeout time.Duration // How long workers wait for work before exiting + workersScaling atomic.Bool + + // Simple state tracking + pending sync.Map // map[uint64]int64 (connID -> seqID) + + // Configuration for the maintenance notifications + config *Config + + // Pool hook reference for handoff processing + poolHook *PoolHook + + // Circuit breaker manager for endpoint failure handling + circuitBreakerManager *CircuitBreakerManager +} + +// newHandoffWorkerManager creates a new handoff worker manager +func newHandoffWorkerManager(config *Config, poolHook *PoolHook) *handoffWorkerManager { + return &handoffWorkerManager{ + handoffQueue: make(chan HandoffRequest, config.HandoffQueueSize), + shutdown: make(chan struct{}), + maxWorkers: config.MaxWorkers, + activeWorkers: atomic.Int32{}, // Start with no workers - create on demand + workerTimeout: 15 * time.Second, // Workers exit after 15s of inactivity + config: config, + poolHook: poolHook, + circuitBreakerManager: newCircuitBreakerManager(config), + } +} + +// getCurrentWorkers returns the current number of active workers (for testing) +func (hwm *handoffWorkerManager) getCurrentWorkers() int { + return int(hwm.activeWorkers.Load()) +} + +// getPendingMap returns the pending map for testing purposes +func (hwm *handoffWorkerManager) getPendingMap() *sync.Map { + return &hwm.pending +} + +// getMaxWorkers returns the max workers for testing purposes +func (hwm *handoffWorkerManager) getMaxWorkers() int { + return hwm.maxWorkers +} + +// getHandoffQueue returns the handoff queue for testing purposes +func (hwm *handoffWorkerManager) getHandoffQueue() chan HandoffRequest { + return hwm.handoffQueue +} + +// getCircuitBreakerStats returns circuit breaker statistics for monitoring +func (hwm *handoffWorkerManager) getCircuitBreakerStats() []CircuitBreakerStats { + return hwm.circuitBreakerManager.GetAllStats() +} + +// resetCircuitBreakers resets all circuit breakers (useful for testing) +func (hwm *handoffWorkerManager) resetCircuitBreakers() { + hwm.circuitBreakerManager.Reset() +} + +// isHandoffPending returns true if the given connection has a pending handoff +func (hwm *handoffWorkerManager) isHandoffPending(conn *pool.Conn) bool { + _, pending := hwm.pending.Load(conn.GetID()) + return pending +} + +// ensureWorkerAvailable ensures at least one worker is available to process requests +// Creates a new worker if needed and under the max limit +func (hwm *handoffWorkerManager) ensureWorkerAvailable() { + select { + case <-hwm.shutdown: + return + default: + if hwm.workersScaling.CompareAndSwap(false, true) { + defer hwm.workersScaling.Store(false) + // Check if we need a new worker + currentWorkers := hwm.activeWorkers.Load() + workersWas := currentWorkers + for currentWorkers < int32(hwm.maxWorkers) { + hwm.workerWg.Add(1) + go hwm.onDemandWorker() + currentWorkers++ + } + // workersWas is always <= currentWorkers + // currentWorkers will be maxWorkers, but if we have a worker that was closed + // while we were creating new workers, just add the difference between + // the currentWorkers and the number of workers we observed initially (i.e. the number of workers we created) + hwm.activeWorkers.Add(currentWorkers - workersWas) + } + } +} + +// onDemandWorker processes handoff requests and exits when idle +func (hwm *handoffWorkerManager) onDemandWorker() { + defer func() { + // Handle panics to ensure proper cleanup + if r := recover(); r != nil { + internal.Logger.Printf(context.Background(), logs.WorkerPanicRecovered(r)) + } + + // Decrement active worker count when exiting + hwm.activeWorkers.Add(-1) + hwm.workerWg.Done() + }() + + // Create reusable timer to prevent timer leaks + timer := time.NewTimer(hwm.workerTimeout) + defer timer.Stop() + + for { + // Reset timer for next iteration + if !timer.Stop() { + select { + case <-timer.C: + default: + } + } + timer.Reset(hwm.workerTimeout) + + select { + case <-hwm.shutdown: + if internal.LogLevel.InfoOrAbove() { + internal.Logger.Printf(context.Background(), logs.WorkerExitingDueToShutdown()) + } + return + case <-timer.C: + // Worker has been idle for too long, exit to save resources + if internal.LogLevel.InfoOrAbove() { + internal.Logger.Printf(context.Background(), logs.WorkerExitingDueToInactivityTimeout(hwm.workerTimeout)) + } + return + case request := <-hwm.handoffQueue: + // Check for shutdown before processing + select { + case <-hwm.shutdown: + if internal.LogLevel.InfoOrAbove() { + internal.Logger.Printf(context.Background(), logs.WorkerExitingDueToShutdownWhileProcessing()) + } + // Clean up the request before exiting + hwm.pending.Delete(request.ConnID) + return + default: + // Process the request + hwm.processHandoffRequest(request) + } + } + } +} + +// processHandoffRequest processes a single handoff request +func (hwm *handoffWorkerManager) processHandoffRequest(request HandoffRequest) { + if internal.LogLevel.InfoOrAbove() { + internal.Logger.Printf(context.Background(), logs.HandoffStarted(request.Conn.GetID(), request.Endpoint)) + } + + // Create a context with handoff timeout from config + handoffTimeout := 15 * time.Second // Default timeout + if hwm.config != nil && hwm.config.HandoffTimeout > 0 { + handoffTimeout = hwm.config.HandoffTimeout + } + ctx, cancel := context.WithTimeout(context.Background(), handoffTimeout) + defer cancel() + + // Create a context that also respects the shutdown signal + shutdownCtx, shutdownCancel := context.WithCancel(ctx) + defer shutdownCancel() + + // Monitor shutdown signal in a separate goroutine + go func() { + select { + case <-hwm.shutdown: + shutdownCancel() + case <-shutdownCtx.Done(): + } + }() + + // Perform the handoff with cancellable context + shouldRetry, err := hwm.performConnectionHandoff(shutdownCtx, request.Conn) + minRetryBackoff := 500 * time.Millisecond + if err != nil { + if shouldRetry { + now := time.Now() + deadline, ok := shutdownCtx.Deadline() + thirdOfTimeout := handoffTimeout / 3 + if !ok || deadline.Before(now) { + // wait half the timeout before retrying if no deadline or deadline has passed + deadline = now.Add(thirdOfTimeout) + } + afterTime := deadline.Sub(now) + if afterTime < minRetryBackoff { + afterTime = minRetryBackoff + } + + if internal.LogLevel.InfoOrAbove() { + // Get current retry count for better logging + currentRetries := request.Conn.HandoffRetries() + maxRetries := 3 // Default fallback + if hwm.config != nil { + maxRetries = hwm.config.MaxHandoffRetries + } + internal.Logger.Printf(context.Background(), logs.HandoffFailed(request.ConnID, request.Endpoint, currentRetries, maxRetries, err)) + } + // Schedule retry - keep connection in pending map until retry is queued + time.AfterFunc(afterTime, func() { + if err := hwm.queueHandoff(request.Conn); err != nil { + if internal.LogLevel.WarnOrAbove() { + internal.Logger.Printf(context.Background(), logs.CannotQueueHandoffForRetry(err)) + } + // Failed to queue retry - remove from pending and close connection + hwm.pending.Delete(request.Conn.GetID()) + hwm.closeConnFromRequest(context.Background(), request, err) + } else { + // Successfully queued retry - remove from pending (will be re-added by queueHandoff) + hwm.pending.Delete(request.Conn.GetID()) + } + }) + return + } else { + // Won't retry - remove from pending and close connection + hwm.pending.Delete(request.Conn.GetID()) + go hwm.closeConnFromRequest(ctx, request, err) + } + + // Clear handoff state if not returned for retry + seqID := request.Conn.GetMovingSeqID() + connID := request.Conn.GetID() + if hwm.poolHook.operationsManager != nil { + hwm.poolHook.operationsManager.UntrackOperationWithConnID(seqID, connID) + } + } else { + // Success - remove from pending map + hwm.pending.Delete(request.Conn.GetID()) + } +} + +// queueHandoff queues a handoff request for processing +// if err is returned, connection will be removed from pool +func (hwm *handoffWorkerManager) queueHandoff(conn *pool.Conn) error { + // Get handoff info atomically to prevent race conditions + shouldHandoff, endpoint, seqID := conn.GetHandoffInfo() + + // on retries the connection will not be marked for handoff, but it will have retries > 0 + // if shouldHandoff is false and retries is 0, then we are not retrying and not do a handoff + if !shouldHandoff && conn.HandoffRetries() == 0 { + if internal.LogLevel.InfoOrAbove() { + internal.Logger.Printf(context.Background(), logs.ConnectionNotMarkedForHandoff(conn.GetID())) + } + return errors.New(logs.ConnectionNotMarkedForHandoffError(conn.GetID())) + } + + // Create handoff request with atomically retrieved data + request := HandoffRequest{ + Conn: conn, + ConnID: conn.GetID(), + Endpoint: endpoint, + SeqID: seqID, + Pool: hwm.poolHook.pool, // Include pool for connection removal on failure + } + + select { + // priority to shutdown + case <-hwm.shutdown: + return ErrShutdown + default: + select { + case <-hwm.shutdown: + return ErrShutdown + case hwm.handoffQueue <- request: + // Store in pending map + hwm.pending.Store(request.ConnID, request.SeqID) + // Ensure we have a worker to process this request + hwm.ensureWorkerAvailable() + return nil + default: + select { + case <-hwm.shutdown: + return ErrShutdown + case hwm.handoffQueue <- request: + // Store in pending map + hwm.pending.Store(request.ConnID, request.SeqID) + // Ensure we have a worker to process this request + hwm.ensureWorkerAvailable() + return nil + case <-time.After(100 * time.Millisecond): // give workers a chance to process + // Queue is full - log and attempt scaling + queueLen := len(hwm.handoffQueue) + queueCap := cap(hwm.handoffQueue) + if internal.LogLevel.WarnOrAbove() { + internal.Logger.Printf(context.Background(), logs.HandoffQueueFull(queueLen, queueCap)) + } + } + } + } + + // Ensure we have workers available to handle the load + hwm.ensureWorkerAvailable() + return ErrHandoffQueueFull +} + +// shutdownWorkers gracefully shuts down the worker manager, waiting for workers to complete +func (hwm *handoffWorkerManager) shutdownWorkers(ctx context.Context) error { + hwm.shutdownOnce.Do(func() { + close(hwm.shutdown) + // workers will exit when they finish their current request + + // Shutdown circuit breaker manager cleanup goroutine + if hwm.circuitBreakerManager != nil { + hwm.circuitBreakerManager.Shutdown() + } + }) + + // Wait for workers to complete + done := make(chan struct{}) + go func() { + hwm.workerWg.Wait() + close(done) + }() + + select { + case <-done: + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +// performConnectionHandoff performs the actual connection handoff +// When error is returned, the connection handoff should be retried if err is not ErrMaxHandoffRetriesReached +func (hwm *handoffWorkerManager) performConnectionHandoff(ctx context.Context, conn *pool.Conn) (shouldRetry bool, err error) { + // Clear handoff state after successful handoff + connID := conn.GetID() + + newEndpoint := conn.GetHandoffEndpoint() + if newEndpoint == "" { + return false, ErrConnectionInvalidHandoffState + } + + // Use circuit breaker to protect against failing endpoints + circuitBreaker := hwm.circuitBreakerManager.GetCircuitBreaker(newEndpoint) + + // Check if circuit breaker is open before attempting handoff + if circuitBreaker.IsOpen() { + internal.Logger.Printf(ctx, logs.CircuitBreakerOpen(connID, newEndpoint)) + return false, ErrCircuitBreakerOpen // Don't retry when circuit breaker is open + } + + // Perform the handoff + shouldRetry, err = hwm.performHandoffInternal(ctx, conn, newEndpoint, connID) + + // Update circuit breaker based on result + if err != nil { + // Only track dial/network errors in circuit breaker, not initialization errors + if shouldRetry { + circuitBreaker.recordFailure() + } + return shouldRetry, err + } + + // Success - record in circuit breaker + circuitBreaker.recordSuccess() + return false, nil +} + +// performHandoffInternal performs the actual handoff logic (extracted for circuit breaker integration) +func (hwm *handoffWorkerManager) performHandoffInternal( + ctx context.Context, + conn *pool.Conn, + newEndpoint string, + connID uint64, +) (shouldRetry bool, err error) { + retries := conn.IncrementAndGetHandoffRetries(1) + internal.Logger.Printf(ctx, logs.HandoffRetryAttempt(connID, retries, newEndpoint, conn.RemoteAddr().String())) + maxRetries := 3 // Default fallback + if hwm.config != nil { + maxRetries = hwm.config.MaxHandoffRetries + } + + if retries > maxRetries { + if internal.LogLevel.WarnOrAbove() { + internal.Logger.Printf(ctx, logs.ReachedMaxHandoffRetries(connID, newEndpoint, maxRetries)) + } + // won't retry on ErrMaxHandoffRetriesReached + return false, ErrMaxHandoffRetriesReached + } + + // Create endpoint-specific dialer + endpointDialer := hwm.createEndpointDialer(newEndpoint) + + // Create new connection to the new endpoint + newNetConn, err := endpointDialer(ctx) + if err != nil { + internal.Logger.Printf(ctx, logs.FailedToDialNewEndpoint(connID, newEndpoint, err)) + // will retry + // Maybe a network error - retry after a delay + return true, err + } + + // Get the old connection + oldConn := conn.GetNetConn() + + // Apply relaxed timeout to the new connection for the configured post-handoff duration + // This gives the new connection more time to handle operations during cluster transition + // Setting this here (before initing the connection) ensures that the connection is going + // to use the relaxed timeout for the first operation (auth/ACL select) + if hwm.config != nil && hwm.config.PostHandoffRelaxedDuration > 0 { + relaxedTimeout := hwm.config.RelaxedTimeout + // Set relaxed timeout with deadline - no background goroutine needed + deadline := time.Now().Add(hwm.config.PostHandoffRelaxedDuration) + conn.SetRelaxedTimeoutWithDeadline(relaxedTimeout, relaxedTimeout, deadline) + + // Record relaxed timeout metric (post-handoff) + if relaxedTimeoutCallback := pool.GetMetricConnectionRelaxedTimeoutCallback(); relaxedTimeoutCallback != nil { + relaxedTimeoutCallback(ctx, 1, conn, PoolNameMain, "HANDOFF") + } + + if internal.LogLevel.InfoOrAbove() { + internal.Logger.Printf(context.Background(), logs.ApplyingRelaxedTimeoutDueToPostHandoff(connID, relaxedTimeout, deadline.Format("15:04:05.000"))) + } + } + + // Replace the connection and execute initialization + err = conn.SetNetConnAndInitConn(ctx, newNetConn) + if err != nil { + // won't retry + // Initialization failed - remove the connection + return false, err + } + defer func() { + if oldConn != nil { + oldConn.Close() + } + }() + + // Clear handoff state will: + // - set the connection as usable again + // - clear the handoff state (shouldHandoff, endpoint, seqID) + // - reset the handoff retries to 0 + // Note: Theoretically there may be a short window where the connection is in the pool + // and IDLE (initConn completed) but still has handoff state set. + conn.ClearHandoffState() + internal.Logger.Printf(ctx, logs.HandoffSucceeded(connID, newEndpoint)) + + // successfully completed the handoff, no retry needed and no error + // Notify metrics: connection handoff succeeded + if handoffCallback := pool.GetMetricConnectionHandoffCallback(); handoffCallback != nil { + handoffCallback(ctx, conn, PoolNameMain) + } + + return false, nil +} + +// createEndpointDialer creates a dialer function that connects to a specific endpoint +func (hwm *handoffWorkerManager) createEndpointDialer(endpoint string) func(context.Context) (net.Conn, error) { + return func(ctx context.Context) (net.Conn, error) { + // Parse endpoint to extract host and port + host, port, err := net.SplitHostPort(endpoint) + if err != nil { + // If no port specified, assume default Redis port + host = endpoint + if port == "" { + port = "6379" + } + } + + // Use the base dialer to connect to the new endpoint + return hwm.poolHook.baseDialer(ctx, hwm.poolHook.network, net.JoinHostPort(host, port)) + } +} + +// closeConnFromRequest closes the connection and logs the reason +func (hwm *handoffWorkerManager) closeConnFromRequest(ctx context.Context, request HandoffRequest, err error) { + pooler := request.Pool + conn := request.Conn + + // Clear handoff state before closing + conn.ClearHandoffState() + + if pooler != nil { + // Use RemoveWithoutTurn instead of Remove to avoid freeing a turn that we don't have. + // The handoff worker doesn't call Get(), so it doesn't have a turn to free. + // Remove() is meant to be called after Get() and frees a turn. + // RemoveWithoutTurn() removes and closes the connection without affecting the queue. + pooler.RemoveWithoutTurn(ctx, conn, err) + if internal.LogLevel.WarnOrAbove() { + internal.Logger.Printf(ctx, logs.RemovingConnectionFromPool(conn.GetID(), err)) + } + } else { + errClose := conn.Close() // Close the connection if no pool provided + if errClose != nil { + internal.Logger.Printf(ctx, "redis: failed to close connection: %v", errClose) + } + if internal.LogLevel.WarnOrAbove() { + internal.Logger.Printf(ctx, logs.NoPoolProvidedCannotRemove(conn.GetID(), err)) + } + } +} diff --git a/vendor/github.com/redis/go-redis/v9/maintnotifications/hooks.go b/vendor/github.com/redis/go-redis/v9/maintnotifications/hooks.go new file mode 100644 index 00000000..ee3c3819 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/maintnotifications/hooks.go @@ -0,0 +1,60 @@ +package maintnotifications + +import ( + "context" + "slices" + + "github.com/redis/go-redis/v9/internal" + "github.com/redis/go-redis/v9/internal/maintnotifications/logs" + "github.com/redis/go-redis/v9/internal/pool" + "github.com/redis/go-redis/v9/push" +) + +// LoggingHook is an example hook implementation that logs all notifications. +type LoggingHook struct { + LogLevel int // 0=Error, 1=Warn, 2=Info, 3=Debug +} + +// PreHook logs the notification before processing and allows modification. +func (lh *LoggingHook) PreHook(ctx context.Context, notificationCtx push.NotificationHandlerContext, notificationType string, notification []interface{}) ([]interface{}, bool) { + if lh.LogLevel >= 2 { // Info level + // Log the notification type and content + connID := uint64(0) + if conn, ok := notificationCtx.Conn.(*pool.Conn); ok { + connID = conn.GetID() + } + seqID := int64(0) + if slices.Contains(maintenanceNotificationTypes, notificationType) { + // seqID is the second element in the notification array + if len(notification) > 1 { + if parsedSeqID, ok := notification[1].(int64); !ok { + seqID = 0 + } else { + seqID = parsedSeqID + } + } + + } + internal.Logger.Printf(ctx, logs.ProcessingNotification(connID, seqID, notificationType, notification)) + } + return notification, true // Continue processing with unmodified notification +} + +// PostHook logs the result after processing. +func (lh *LoggingHook) PostHook(ctx context.Context, notificationCtx push.NotificationHandlerContext, notificationType string, notification []interface{}, result error) { + connID := uint64(0) + if conn, ok := notificationCtx.Conn.(*pool.Conn); ok { + connID = conn.GetID() + } + if result != nil && lh.LogLevel >= 1 { // Warning level + internal.Logger.Printf(ctx, logs.ProcessingNotificationFailed(connID, notificationType, result, notification)) + } else if lh.LogLevel >= 3 { // Debug level + internal.Logger.Printf(ctx, logs.ProcessingNotificationSucceeded(connID, notificationType)) + } +} + +// NewLoggingHook creates a new logging hook with the specified log level. +// Log levels: 0=Error, 1=Warn, 2=Info, 3=Debug +func NewLoggingHook(logLevel int) *LoggingHook { + return &LoggingHook{LogLevel: logLevel} +} diff --git a/vendor/github.com/redis/go-redis/v9/maintnotifications/manager.go b/vendor/github.com/redis/go-redis/v9/maintnotifications/manager.go new file mode 100644 index 00000000..3f9478e1 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/maintnotifications/manager.go @@ -0,0 +1,362 @@ +package maintnotifications + +import ( + "context" + "errors" + "fmt" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/redis/go-redis/v9/internal" + "github.com/redis/go-redis/v9/internal/interfaces" + "github.com/redis/go-redis/v9/internal/maintnotifications/logs" + "github.com/redis/go-redis/v9/internal/pool" + "github.com/redis/go-redis/v9/push" +) + +// Push notification type constants for maintenance +const ( + NotificationMoving = "MOVING" // Per-connection handoff notification + NotificationMigrating = "MIGRATING" // Per-connection migration start notification - relaxes timeouts + NotificationMigrated = "MIGRATED" // Per-connection migration complete notification - clears relaxed timeouts + NotificationFailingOver = "FAILING_OVER" // Per-connection failover start notification - relaxes timeouts + NotificationFailedOver = "FAILED_OVER" // Per-connection failover complete notification - clears relaxed timeouts + NotificationSMigrating = "SMIGRATING" // Cluster slot migrating notification - relaxes timeouts + NotificationSMigrated = "SMIGRATED" // Cluster slot migrated notification - unrelaxes timeouts and triggers cluster state reload +) + +// maintenanceNotificationTypes contains all notification types that maintenance handles +var maintenanceNotificationTypes = []string{ + NotificationMoving, + NotificationMigrating, + NotificationMigrated, + NotificationFailingOver, + NotificationFailedOver, + NotificationSMigrating, + NotificationSMigrated, +} + +// NotificationHook is called before and after notification processing +// PreHook can modify the notification and return false to skip processing +// PostHook is called after successful processing +type NotificationHook interface { + PreHook(ctx context.Context, notificationCtx push.NotificationHandlerContext, notificationType string, notification []interface{}) ([]interface{}, bool) + PostHook(ctx context.Context, notificationCtx push.NotificationHandlerContext, notificationType string, notification []interface{}, result error) +} + +// MovingOperationKey provides a unique key for tracking MOVING operations +// that combines sequence ID with connection identifier to handle duplicate +// sequence IDs across multiple connections to the same node. +type MovingOperationKey struct { + SeqID int64 // Sequence ID from MOVING notification + ConnID uint64 // Unique connection identifier +} + +// String returns a string representation of the key for debugging +func (k MovingOperationKey) String() string { + return fmt.Sprintf("seq:%d-conn:%d", k.SeqID, k.ConnID) +} + +// Manager provides a simplified upgrade functionality with hooks and atomic state. +type Manager struct { + client interfaces.ClientInterface + config *Config + options interfaces.OptionsInterface + pool pool.Pooler + + // MOVING operation tracking - using sync.Map for better concurrent performance + activeMovingOps sync.Map // map[MovingOperationKey]*MovingOperation + + // SMIGRATED notification deduplication - tracks processed SeqIDs + // Multiple connections may receive the same SMIGRATED notification + processedSMigratedSeqIDs sync.Map // map[int64]bool + + // Atomic state tracking - no locks needed for state queries + activeOperationCount atomic.Int64 // Number of active operations + closed atomic.Bool // Manager closed state + + // Notification hooks for extensibility + hooks []NotificationHook + hooksMu sync.RWMutex // Protects hooks slice + poolHooksRef *PoolHook + + // Cluster state reload callback for SMIGRATED notifications + clusterStateReloadCallback ClusterStateReloadCallback +} + +// MovingOperation tracks an active MOVING operation. +type MovingOperation struct { + SeqID int64 + NewEndpoint string + StartTime time.Time + Deadline time.Time +} + +// ClusterStateReloadCallback is a callback function that triggers cluster state reload. +// This is used by node clients to notify their parent ClusterClient about SMIGRATED notifications. +// The hostPort parameter indicates the destination node (e.g., "127.0.0.1:6379"). +// The slotRanges parameter contains the migrated slots (e.g., ["1234", "5000-6000"]). +// Currently, implementations typically reload the entire cluster state, but in the future +// this could be optimized to reload only the specific slots. +type ClusterStateReloadCallback func(ctx context.Context, hostPort string, slotRanges []string) + +// NewManager creates a new simplified manager. +func NewManager(client interfaces.ClientInterface, pool pool.Pooler, config *Config) (*Manager, error) { + if client == nil { + return nil, ErrInvalidClient + } + + hm := &Manager{ + client: client, + pool: pool, + options: client.GetOptions(), + config: config.Clone(), + hooks: make([]NotificationHook, 0), + } + + // Set up push notification handling + if err := hm.setupPushNotifications(); err != nil { + return nil, err + } + + return hm, nil +} + +// GetPoolHook creates a pool hook with a custom dialer. +func (hm *Manager) InitPoolHook(baseDialer func(context.Context, string, string) (net.Conn, error)) { + poolHook := hm.createPoolHook(baseDialer) + hm.pool.AddPoolHook(poolHook) +} + +// setupPushNotifications sets up push notification handling by registering with the client's processor. +func (hm *Manager) setupPushNotifications() error { + processor := hm.client.GetPushProcessor() + if processor == nil { + return ErrInvalidClient // Client doesn't support push notifications + } + + // Create our notification handler + handler := &NotificationHandler{manager: hm, operationsManager: hm} + + // Register handlers for all upgrade notifications with the client's processor + for _, notificationType := range maintenanceNotificationTypes { + if err := processor.RegisterHandler(notificationType, handler, true); err != nil { + return errors.New(logs.FailedToRegisterHandler(notificationType, err)) + } + } + + return nil +} + +// TrackMovingOperationWithConnID starts a new MOVING operation with a specific connection ID. +func (hm *Manager) TrackMovingOperationWithConnID(ctx context.Context, newEndpoint string, deadline time.Time, seqID int64, connID uint64) error { + // Create composite key + key := MovingOperationKey{ + SeqID: seqID, + ConnID: connID, + } + + // Create MOVING operation record + movingOp := &MovingOperation{ + SeqID: seqID, + NewEndpoint: newEndpoint, + StartTime: time.Now(), + Deadline: deadline, + } + + // Use LoadOrStore for atomic check-and-set operation + if _, loaded := hm.activeMovingOps.LoadOrStore(key, movingOp); loaded { + // Duplicate MOVING notification, ignore + if internal.LogLevel.DebugOrAbove() { // Debug level + internal.Logger.Printf(context.Background(), logs.DuplicateMovingOperation(connID, newEndpoint, seqID)) + } + return nil + } + if internal.LogLevel.DebugOrAbove() { // Debug level + internal.Logger.Printf(context.Background(), logs.TrackingMovingOperation(connID, newEndpoint, seqID)) + } + + // Increment active operation count atomically + hm.activeOperationCount.Add(1) + + return nil +} + +// UntrackOperationWithConnID completes a MOVING operation with a specific connection ID. +func (hm *Manager) UntrackOperationWithConnID(seqID int64, connID uint64) { + // Create composite key + key := MovingOperationKey{ + SeqID: seqID, + ConnID: connID, + } + + // Remove from active operations atomically + if _, loaded := hm.activeMovingOps.LoadAndDelete(key); loaded { + if internal.LogLevel.DebugOrAbove() { // Debug level + internal.Logger.Printf(context.Background(), logs.UntrackingMovingOperation(connID, seqID)) + } + // Decrement active operation count only if operation existed + hm.activeOperationCount.Add(-1) + } else { + if internal.LogLevel.DebugOrAbove() { // Debug level + internal.Logger.Printf(context.Background(), logs.OperationNotTracked(connID, seqID)) + } + } +} + +// GetActiveMovingOperations returns active operations with composite keys. +// WARNING: This method creates a new map and copies all operations on every call. +// Use sparingly, especially in hot paths or high-frequency logging. +func (hm *Manager) GetActiveMovingOperations() map[MovingOperationKey]*MovingOperation { + result := make(map[MovingOperationKey]*MovingOperation) + + // Iterate over sync.Map to build result + hm.activeMovingOps.Range(func(key, value interface{}) bool { + k := key.(MovingOperationKey) + op := value.(*MovingOperation) + + // Create a copy to avoid sharing references + result[k] = &MovingOperation{ + SeqID: op.SeqID, + NewEndpoint: op.NewEndpoint, + StartTime: op.StartTime, + Deadline: op.Deadline, + } + return true // Continue iteration + }) + + return result +} + +// IsHandoffInProgress returns true if any handoff is in progress. +// Uses atomic counter for lock-free operation. +func (hm *Manager) IsHandoffInProgress() bool { + return hm.activeOperationCount.Load() > 0 +} + +// GetActiveOperationCount returns the number of active operations. +// Uses atomic counter for lock-free operation. +func (hm *Manager) GetActiveOperationCount() int64 { + return hm.activeOperationCount.Load() +} + +// MarkSMigratedSeqIDProcessed attempts to mark a SMIGRATED SeqID as processed. +// Returns true if this is the first time processing this SeqID (should process), +// false if it was already processed (should skip). +// This prevents duplicate processing when multiple connections receive the same notification. +func (hm *Manager) MarkSMigratedSeqIDProcessed(seqID int64) bool { + _, alreadyProcessed := hm.processedSMigratedSeqIDs.LoadOrStore(seqID, true) + return !alreadyProcessed // Return true if NOT already processed +} + +// Close closes the manager. +func (hm *Manager) Close() error { + // Use atomic operation for thread-safe close check + if !hm.closed.CompareAndSwap(false, true) { + return nil // Already closed + } + + // Shutdown the pool hook if it exists + if hm.poolHooksRef != nil { + // Use a timeout to prevent hanging indefinitely + shutdownCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + err := hm.poolHooksRef.Shutdown(shutdownCtx) + if err != nil { + // was not able to close pool hook, keep closed state false + hm.closed.Store(false) + return err + } + // Remove the pool hook from the pool + if hm.pool != nil { + hm.pool.RemovePoolHook(hm.poolHooksRef) + } + } + + // Clear all active operations + hm.activeMovingOps.Range(func(key, value interface{}) bool { + hm.activeMovingOps.Delete(key) + return true + }) + + // Reset counter + hm.activeOperationCount.Store(0) + + return nil +} + +// GetState returns current state using atomic counter for lock-free operation. +func (hm *Manager) GetState() State { + if hm.activeOperationCount.Load() > 0 { + return StateMoving + } + return StateIdle +} + +// processPreHooks calls all pre-hooks and returns the modified notification and whether to continue processing. +func (hm *Manager) processPreHooks(ctx context.Context, notificationCtx push.NotificationHandlerContext, notificationType string, notification []interface{}) ([]interface{}, bool) { + hm.hooksMu.RLock() + defer hm.hooksMu.RUnlock() + + currentNotification := notification + + for _, hook := range hm.hooks { + modifiedNotification, shouldContinue := hook.PreHook(ctx, notificationCtx, notificationType, currentNotification) + if !shouldContinue { + return modifiedNotification, false + } + currentNotification = modifiedNotification + } + + return currentNotification, true +} + +// processPostHooks calls all post-hooks with the processing result. +func (hm *Manager) processPostHooks(ctx context.Context, notificationCtx push.NotificationHandlerContext, notificationType string, notification []interface{}, result error) { + hm.hooksMu.RLock() + defer hm.hooksMu.RUnlock() + + for _, hook := range hm.hooks { + hook.PostHook(ctx, notificationCtx, notificationType, notification, result) + } +} + +// createPoolHook creates a pool hook with this manager already set. +func (hm *Manager) createPoolHook(baseDialer func(context.Context, string, string) (net.Conn, error)) *PoolHook { + if hm.poolHooksRef != nil { + return hm.poolHooksRef + } + // Get pool size from client options for better worker defaults + poolSize := 0 + if hm.options != nil { + poolSize = hm.options.GetPoolSize() + } + + hm.poolHooksRef = NewPoolHookWithPoolSize(baseDialer, hm.options.GetNetwork(), hm.config, hm, poolSize) + hm.poolHooksRef.SetPool(hm.pool) + + return hm.poolHooksRef +} + +func (hm *Manager) AddNotificationHook(notificationHook NotificationHook) { + hm.hooksMu.Lock() + defer hm.hooksMu.Unlock() + hm.hooks = append(hm.hooks, notificationHook) +} + +// SetClusterStateReloadCallback sets the callback function that will be called when a SMIGRATED notification is received. +// This allows node clients to notify their parent ClusterClient to reload cluster state. +func (hm *Manager) SetClusterStateReloadCallback(callback ClusterStateReloadCallback) { + hm.clusterStateReloadCallback = callback +} + +// TriggerClusterStateReload calls the cluster state reload callback if it's set. +// This is called when a SMIGRATED notification is received. +func (hm *Manager) TriggerClusterStateReload(ctx context.Context, hostPort string, slotRanges []string) { + if hm.clusterStateReloadCallback != nil { + hm.clusterStateReloadCallback(ctx, hostPort, slotRanges) + } +} diff --git a/vendor/github.com/redis/go-redis/v9/maintnotifications/pool_hook.go b/vendor/github.com/redis/go-redis/v9/maintnotifications/pool_hook.go new file mode 100644 index 00000000..9ea0558b --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/maintnotifications/pool_hook.go @@ -0,0 +1,182 @@ +package maintnotifications + +import ( + "context" + "net" + "sync" + "time" + + "github.com/redis/go-redis/v9/internal" + "github.com/redis/go-redis/v9/internal/maintnotifications/logs" + "github.com/redis/go-redis/v9/internal/pool" +) + +// OperationsManagerInterface defines the interface for completing handoff operations +type OperationsManagerInterface interface { + TrackMovingOperationWithConnID(ctx context.Context, newEndpoint string, deadline time.Time, seqID int64, connID uint64) error + UntrackOperationWithConnID(seqID int64, connID uint64) +} + +// HandoffRequest represents a request to handoff a connection to a new endpoint +type HandoffRequest struct { + Conn *pool.Conn + ConnID uint64 // Unique connection identifier + Endpoint string + SeqID int64 + Pool pool.Pooler // Pool to remove connection from on failure +} + +// PoolHook implements pool.PoolHook for Redis-specific connection handling +// with maintenance notifications support. +type PoolHook struct { + // Base dialer for creating connections to new endpoints during handoffs + // args are network and address + baseDialer func(context.Context, string, string) (net.Conn, error) + + // Network type (e.g., "tcp", "unix") + network string + + // Worker manager for background handoff processing + workerManager *handoffWorkerManager + + // Configuration for the maintenance notifications + config *Config + + // Operations manager interface for operation completion tracking + operationsManager OperationsManagerInterface + + // Pool interface for removing connections on handoff failure + pool pool.Pooler +} + +// NewPoolHook creates a new pool hook +func NewPoolHook(baseDialer func(context.Context, string, string) (net.Conn, error), network string, config *Config, operationsManager OperationsManagerInterface) *PoolHook { + return NewPoolHookWithPoolSize(baseDialer, network, config, operationsManager, 0) +} + +// NewPoolHookWithPoolSize creates a new pool hook with pool size for better worker defaults +func NewPoolHookWithPoolSize(baseDialer func(context.Context, string, string) (net.Conn, error), network string, config *Config, operationsManager OperationsManagerInterface, poolSize int) *PoolHook { + // Apply defaults if config is nil or has zero values + if config == nil { + config = config.ApplyDefaultsWithPoolSize(poolSize) + } + + ph := &PoolHook{ + // baseDialer is used to create connections to new endpoints during handoffs + baseDialer: baseDialer, + network: network, + config: config, + operationsManager: operationsManager, + } + + // Create worker manager + ph.workerManager = newHandoffWorkerManager(config, ph) + + return ph +} + +// SetPool sets the pool interface for removing connections on handoff failure +func (ph *PoolHook) SetPool(pooler pool.Pooler) { + ph.pool = pooler +} + +// GetCurrentWorkers returns the current number of active workers (for testing) +func (ph *PoolHook) GetCurrentWorkers() int { + return ph.workerManager.getCurrentWorkers() +} + +// IsHandoffPending returns true if the given connection has a pending handoff +func (ph *PoolHook) IsHandoffPending(conn *pool.Conn) bool { + return ph.workerManager.isHandoffPending(conn) +} + +// GetPendingMap returns the pending map for testing purposes +func (ph *PoolHook) GetPendingMap() *sync.Map { + return ph.workerManager.getPendingMap() +} + +// GetMaxWorkers returns the max workers for testing purposes +func (ph *PoolHook) GetMaxWorkers() int { + return ph.workerManager.getMaxWorkers() +} + +// GetHandoffQueue returns the handoff queue for testing purposes +func (ph *PoolHook) GetHandoffQueue() chan HandoffRequest { + return ph.workerManager.getHandoffQueue() +} + +// GetCircuitBreakerStats returns circuit breaker statistics for monitoring +func (ph *PoolHook) GetCircuitBreakerStats() []CircuitBreakerStats { + return ph.workerManager.getCircuitBreakerStats() +} + +// ResetCircuitBreakers resets all circuit breakers (useful for testing) +func (ph *PoolHook) ResetCircuitBreakers() { + ph.workerManager.resetCircuitBreakers() +} + +// OnGet is called when a connection is retrieved from the pool +func (ph *PoolHook) OnGet(_ context.Context, conn *pool.Conn, _ bool) (accept bool, err error) { + // Check if connection is marked for handoff + // This prevents using connections that have received MOVING notifications + if conn.ShouldHandoff() { + return false, ErrConnectionMarkedForHandoffWithState + } + + // Check if connection is usable (not in UNUSABLE or CLOSED state) + // This ensures we don't return connections that are currently being handed off or re-authenticated. + if !conn.IsUsable() { + return false, ErrConnectionMarkedForHandoff + } + + return true, nil +} + +// OnPut is called when a connection is returned to the pool +func (ph *PoolHook) OnPut(ctx context.Context, conn *pool.Conn) (shouldPool bool, shouldRemove bool, err error) { + // first check if we should handoff for faster rejection + if !conn.ShouldHandoff() { + // Default behavior (no handoff): pool the connection + return true, false, nil + } + + // check pending handoff to not queue the same connection twice + if ph.workerManager.isHandoffPending(conn) { + // Default behavior (pending handoff): pool the connection + return true, false, nil + } + + if err := ph.workerManager.queueHandoff(conn); err != nil { + // Failed to queue handoff, remove the connection + internal.Logger.Printf(ctx, logs.FailedToQueueHandoff(conn.GetID(), err)) + // Don't pool, remove connection, no error to caller + return false, true, nil + } + + // Check if handoff was already processed by a worker before we can mark it as queued + if !conn.ShouldHandoff() { + // Handoff was already processed - this is normal and the connection should be pooled + return true, false, nil + } + + if err := conn.MarkQueuedForHandoff(); err != nil { + // If marking fails, check if handoff was processed in the meantime + if !conn.ShouldHandoff() { + // Handoff was processed - this is normal, pool the connection + return true, false, nil + } + // Other error - remove the connection + return false, true, nil + } + internal.Logger.Printf(ctx, logs.MarkedForHandoff(conn.GetID())) + return true, false, nil +} + +func (ph *PoolHook) OnRemove(_ context.Context, _ *pool.Conn, _ error) { + // Not used +} + +// Shutdown gracefully shuts down the processor, waiting for workers to complete +func (ph *PoolHook) Shutdown(ctx context.Context) error { + return ph.workerManager.shutdownWorkers(ctx) +} diff --git a/vendor/github.com/redis/go-redis/v9/maintnotifications/push_notification_handler.go b/vendor/github.com/redis/go-redis/v9/maintnotifications/push_notification_handler.go new file mode 100644 index 00000000..7108265b --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/maintnotifications/push_notification_handler.go @@ -0,0 +1,524 @@ +package maintnotifications + +import ( + "context" + "errors" + "fmt" + "strings" + "time" + + "github.com/redis/go-redis/v9/internal" + "github.com/redis/go-redis/v9/internal/maintnotifications/logs" + "github.com/redis/go-redis/v9/internal/pool" + "github.com/redis/go-redis/v9/push" +) + +// NotificationHandler handles push notifications for the simplified manager. +type NotificationHandler struct { + manager *Manager + operationsManager OperationsManagerInterface +} + +// HandlePushNotification processes push notifications with hook support. +func (snh *NotificationHandler) HandlePushNotification(ctx context.Context, handlerCtx push.NotificationHandlerContext, notification []interface{}) error { + if len(notification) == 0 { + internal.Logger.Printf(ctx, logs.InvalidNotificationFormat(notification)) + return ErrInvalidNotification + } + + notificationType, ok := notification[0].(string) + if !ok { + internal.Logger.Printf(ctx, logs.InvalidNotificationTypeFormat(notification[0])) + return ErrInvalidNotification + } + + // Process pre-hooks - they can modify the notification or skip processing + modifiedNotification, shouldContinue := snh.manager.processPreHooks(ctx, handlerCtx, notificationType, notification) + if !shouldContinue { + return nil // Hooks decided to skip processing + } + + var err error + switch notificationType { + case NotificationMoving: + err = snh.handleMoving(ctx, handlerCtx, modifiedNotification) + case NotificationMigrating: + err = snh.handleMigrating(ctx, handlerCtx, modifiedNotification) + case NotificationMigrated: + err = snh.handleMigrated(ctx, handlerCtx, modifiedNotification) + case NotificationFailingOver: + err = snh.handleFailingOver(ctx, handlerCtx, modifiedNotification) + case NotificationFailedOver: + err = snh.handleFailedOver(ctx, handlerCtx, modifiedNotification) + case NotificationSMigrating: + err = snh.handleSMigrating(ctx, handlerCtx, modifiedNotification) + case NotificationSMigrated: + err = snh.handleSMigrated(ctx, handlerCtx, modifiedNotification) + default: + // Ignore other notification types (e.g., pub/sub messages) + err = nil + } + + // Record maintenance notification metric + if maintenanceCallback := pool.GetMetricMaintenanceNotificationCallback(); maintenanceCallback != nil { + if conn, ok := handlerCtx.Conn.(*pool.Conn); ok { + maintenanceCallback(ctx, conn, notificationType) + } + } + + // Process post-hooks with the result + snh.manager.processPostHooks(ctx, handlerCtx, notificationType, modifiedNotification, err) + + return err +} + +// handleMoving processes MOVING notifications. +// MOVING indicates that a connection should be handed off to a new endpoint. +// This is a per-connection notification that triggers connection handoff. +// Expected format: ["MOVING", seqNum, timeS, endpoint] +func (snh *NotificationHandler) handleMoving(ctx context.Context, handlerCtx push.NotificationHandlerContext, notification []interface{}) error { + if len(notification) < 3 { + internal.Logger.Printf(ctx, logs.InvalidNotification("MOVING", notification)) + return ErrInvalidNotification + } + seqID, ok := notification[1].(int64) + if !ok { + internal.Logger.Printf(ctx, logs.InvalidSeqIDInMovingNotification(notification[1])) + return ErrInvalidNotification + } + + // Extract timeS + timeS, ok := notification[2].(int64) + if !ok { + internal.Logger.Printf(ctx, logs.InvalidTimeSInMovingNotification(notification[2])) + return ErrInvalidNotification + } + + newEndpoint := "" + if len(notification) > 3 { + // Extract new endpoint + newEndpoint, ok = notification[3].(string) + if !ok { + stringified := fmt.Sprintf("%v", notification[3]) + // this could be which is valid + if notification[3] == nil || stringified == internal.RedisNull { + newEndpoint = "" + } else { + internal.Logger.Printf(ctx, logs.InvalidNewEndpointInMovingNotification(notification[3])) + return ErrInvalidNotification + } + } + } + + // Get the connection that received this notification + conn := handlerCtx.Conn + if conn == nil { + internal.Logger.Printf(ctx, logs.NoConnectionInHandlerContext("MOVING")) + return ErrInvalidNotification + } + + // Type assert to get the underlying pool connection + var poolConn *pool.Conn + if pc, ok := conn.(*pool.Conn); ok { + poolConn = pc + } else { + internal.Logger.Printf(ctx, logs.InvalidConnectionTypeInHandlerContext("MOVING", conn, handlerCtx)) + return ErrInvalidNotification + } + + // If the connection is closed or not pooled, we can ignore the notification + // this connection won't be remembered by the pool and will be garbage collected + // Keep pubsub connections around since they are not pooled but are long-lived + // and should be allowed to handoff (the pubsub instance will reconnect and change + // the underlying *pool.Conn) + if (poolConn.IsClosed() || !poolConn.IsPooled()) && !poolConn.IsPubSub() { + return nil + } + + deadline := time.Now().Add(time.Duration(timeS) * time.Second) + // If newEndpoint is empty, we should schedule a handoff to the current endpoint in timeS/2 seconds + if newEndpoint == "" || newEndpoint == internal.RedisNull { + if internal.LogLevel.DebugOrAbove() { + internal.Logger.Printf(ctx, logs.SchedulingHandoffToCurrentEndpoint(poolConn.GetID(), float64(timeS)/2)) + } + // same as current endpoint + newEndpoint = snh.manager.options.GetAddr() + // delay the handoff for timeS/2 seconds to the same endpoint + // do this in a goroutine to avoid blocking the notification handler + // NOTE: This timer is started while parsing the notification, so the connection is not marked for handoff + // and there should be no possibility of a race condition or double handoff. + time.AfterFunc(time.Duration(timeS/2)*time.Second, func() { + if poolConn == nil || poolConn.IsClosed() { + return + } + if err := snh.markConnForHandoff(poolConn, newEndpoint, seqID, deadline); err != nil { + // Log error but don't fail the goroutine - use background context since original may be cancelled + internal.Logger.Printf(context.Background(), logs.FailedToMarkForHandoff(poolConn.GetID(), err)) + return + } + + // Queue the handoff immediately if the connection is idle in the pool. + // If the connection is in use (StateInUse), it will be queued when returned to the pool via OnPut. + // This handles the case where the connection is idle and might never be retrieved again. + if poolConn.GetStateMachine().GetState() == pool.StateIdle { + if snh.manager.poolHooksRef != nil && snh.manager.poolHooksRef.workerManager != nil { + if err := snh.manager.poolHooksRef.workerManager.queueHandoff(poolConn); err != nil { + internal.Logger.Printf(context.Background(), logs.FailedToQueueHandoff(poolConn.GetID(), err)) + } else { + // Mark the connection as queued for handoff to prevent it from being retrieved + // This transitions the connection to StateUnusable + if err := poolConn.MarkQueuedForHandoff(); err != nil { + internal.Logger.Printf(context.Background(), logs.FailedToMarkForHandoff(poolConn.GetID(), err)) + } else { + internal.Logger.Printf(context.Background(), logs.MarkedForHandoff(poolConn.GetID())) + } + } + } + } + // If connection is StateInUse, the handoff will be queued when it's returned to the pool + }) + return nil + } + + return snh.markConnForHandoff(poolConn, newEndpoint, seqID, deadline) +} + +func (snh *NotificationHandler) markConnForHandoff(conn *pool.Conn, newEndpoint string, seqID int64, deadline time.Time) error { + if err := conn.MarkForHandoff(newEndpoint, seqID); err != nil { + internal.Logger.Printf(context.Background(), logs.FailedToMarkForHandoff(conn.GetID(), err)) + // Connection is already marked for handoff, which is acceptable + // This can happen if multiple MOVING notifications are received for the same connection + return nil + } + // Optionally track in m + if snh.operationsManager != nil { + connID := conn.GetID() + // Track the operation (ignore errors since this is optional) + _ = snh.operationsManager.TrackMovingOperationWithConnID(context.Background(), newEndpoint, deadline, seqID, connID) + } else { + return errors.New(logs.ManagerNotInitialized()) + } + return nil +} + +// handleMigrating processes MIGRATING notifications. +// MIGRATING indicates that a connection migration is starting. +// This is a per-connection notification that applies relaxed timeouts. +// Expected format: ["MIGRATING", ...] +func (snh *NotificationHandler) handleMigrating(ctx context.Context, handlerCtx push.NotificationHandlerContext, notification []interface{}) error { + if len(notification) < 2 { + internal.Logger.Printf(ctx, logs.InvalidNotification("MIGRATING", notification)) + return ErrInvalidNotification + } + + if handlerCtx.Conn == nil { + internal.Logger.Printf(ctx, logs.NoConnectionInHandlerContext("MIGRATING")) + return ErrInvalidNotification + } + + conn, ok := handlerCtx.Conn.(*pool.Conn) + if !ok { + internal.Logger.Printf(ctx, logs.InvalidConnectionTypeInHandlerContext("MIGRATING", handlerCtx.Conn, handlerCtx)) + return ErrInvalidNotification + } + + // Apply relaxed timeout to this specific connection + if internal.LogLevel.InfoOrAbove() { + internal.Logger.Printf(ctx, logs.RelaxedTimeoutDueToNotification(conn.GetID(), "MIGRATING", snh.manager.config.RelaxedTimeout)) + } + conn.SetRelaxedTimeout(snh.manager.config.RelaxedTimeout, snh.manager.config.RelaxedTimeout) + + // Record relaxed timeout metric + if relaxedTimeoutCallback := pool.GetMetricConnectionRelaxedTimeoutCallback(); relaxedTimeoutCallback != nil { + relaxedTimeoutCallback(ctx, 1, conn, PoolNameMain, "MIGRATING") + } + + return nil +} + +// handleMigrated processes MIGRATED notifications. +// MIGRATED indicates that a connection migration has completed. +// This is a per-connection notification that clears relaxed timeouts. +// Expected format: ["MIGRATED", ...] +func (snh *NotificationHandler) handleMigrated(ctx context.Context, handlerCtx push.NotificationHandlerContext, notification []interface{}) error { + if len(notification) < 2 { + internal.Logger.Printf(ctx, logs.InvalidNotification("MIGRATED", notification)) + return ErrInvalidNotification + } + + if handlerCtx.Conn == nil { + internal.Logger.Printf(ctx, logs.NoConnectionInHandlerContext("MIGRATED")) + return ErrInvalidNotification + } + + conn, ok := handlerCtx.Conn.(*pool.Conn) + if !ok { + internal.Logger.Printf(ctx, logs.InvalidConnectionTypeInHandlerContext("MIGRATED", handlerCtx.Conn, handlerCtx)) + return ErrInvalidNotification + } + + // Clear relaxed timeout for this specific connection + if internal.LogLevel.InfoOrAbove() { + connID := conn.GetID() + internal.Logger.Printf(ctx, logs.UnrelaxedTimeout(connID)) + } + conn.ClearRelaxedTimeout() + return nil +} + +// handleFailingOver processes FAILING_OVER notifications. +// FAILING_OVER indicates that a failover is starting. +// This is a per-connection notification that applies relaxed timeouts. +// Expected format: ["FAILING_OVER", ...] +func (snh *NotificationHandler) handleFailingOver(ctx context.Context, handlerCtx push.NotificationHandlerContext, notification []interface{}) error { + if len(notification) < 2 { + internal.Logger.Printf(ctx, logs.InvalidNotification("FAILING_OVER", notification)) + return ErrInvalidNotification + } + + if handlerCtx.Conn == nil { + internal.Logger.Printf(ctx, logs.NoConnectionInHandlerContext("FAILING_OVER")) + return ErrInvalidNotification + } + + conn, ok := handlerCtx.Conn.(*pool.Conn) + if !ok { + internal.Logger.Printf(ctx, logs.InvalidConnectionTypeInHandlerContext("FAILING_OVER", handlerCtx.Conn, handlerCtx)) + return ErrInvalidNotification + } + + // Apply relaxed timeout to this specific connection + if internal.LogLevel.InfoOrAbove() { + connID := conn.GetID() + internal.Logger.Printf(ctx, logs.RelaxedTimeoutDueToNotification(connID, "FAILING_OVER", snh.manager.config.RelaxedTimeout)) + } + conn.SetRelaxedTimeout(snh.manager.config.RelaxedTimeout, snh.manager.config.RelaxedTimeout) + + // Record relaxed timeout metric + if relaxedTimeoutCallback := pool.GetMetricConnectionRelaxedTimeoutCallback(); relaxedTimeoutCallback != nil { + relaxedTimeoutCallback(ctx, 1, conn, PoolNameMain, "FAILING_OVER") + } + + return nil +} + +// handleFailedOver processes FAILED_OVER notifications. +// FAILED_OVER indicates that a failover has completed. +// This is a per-connection notification that clears relaxed timeouts. +// Expected format: ["FAILED_OVER", ...] +func (snh *NotificationHandler) handleFailedOver(ctx context.Context, handlerCtx push.NotificationHandlerContext, notification []interface{}) error { + if len(notification) < 2 { + internal.Logger.Printf(ctx, logs.InvalidNotification("FAILED_OVER", notification)) + return ErrInvalidNotification + } + + if handlerCtx.Conn == nil { + internal.Logger.Printf(ctx, logs.NoConnectionInHandlerContext("FAILED_OVER")) + return ErrInvalidNotification + } + + conn, ok := handlerCtx.Conn.(*pool.Conn) + if !ok { + internal.Logger.Printf(ctx, logs.InvalidConnectionTypeInHandlerContext("FAILED_OVER", handlerCtx.Conn, handlerCtx)) + return ErrInvalidNotification + } + + // Clear relaxed timeout for this specific connection + if internal.LogLevel.InfoOrAbove() { + connID := conn.GetID() + internal.Logger.Printf(ctx, logs.UnrelaxedTimeout(connID)) + } + conn.ClearRelaxedTimeout() + return nil +} + +// handleSMigrating processes SMIGRATING notifications. +// SMIGRATING indicates that a cluster slot is in the process of migrating to a different node. +// This is a per-connection notification that applies relaxed timeouts during slot migration. +// Expected format: ["SMIGRATING", SeqID, slot/range1-range2, ...] +func (snh *NotificationHandler) handleSMigrating(ctx context.Context, handlerCtx push.NotificationHandlerContext, notification []interface{}) error { + if len(notification) < 3 { + internal.Logger.Printf(ctx, logs.InvalidNotification("SMIGRATING", notification)) + return ErrInvalidNotification + } + + // Validate SeqID (position 1) + if _, ok := notification[1].(int64); !ok { + internal.Logger.Printf(ctx, logs.InvalidSeqIDInSMigratingNotification(notification[1])) + return ErrInvalidNotification + } + + if handlerCtx.Conn == nil { + internal.Logger.Printf(ctx, logs.NoConnectionInHandlerContext("SMIGRATING")) + return ErrInvalidNotification + } + + conn, ok := handlerCtx.Conn.(*pool.Conn) + if !ok { + internal.Logger.Printf(ctx, logs.InvalidConnectionTypeInHandlerContext("SMIGRATING", handlerCtx.Conn, handlerCtx)) + return ErrInvalidNotification + } + + // Apply relaxed timeout to this specific connection + if internal.LogLevel.InfoOrAbove() { + internal.Logger.Printf(ctx, logs.RelaxedTimeoutDueToNotification(conn.GetID(), "SMIGRATING", snh.manager.config.RelaxedTimeout)) + } + conn.SetRelaxedTimeout(snh.manager.config.RelaxedTimeout, snh.manager.config.RelaxedTimeout) + return nil +} + +// handleSMigrated processes SMIGRATED notifications. +// SMIGRATED indicates that a cluster slot has finished migrating to a different node. +// This is a cluster-level notification that triggers cluster state reload. +// +// Expected RESP3 format: +// +// >3 +// +SMIGRATED +// :SeqID +// * <- array of triplet arrays +// *3 <- each triplet is a 3-element array +// + <- node from which slots are migrating FROM +// + <- node to which slots are migrating TO +// + <- comma-separated slots and/or ranges (e.g., "123,789-1000") +// +// A source and target endpoint may appear in multiple triplets. +// The notification is only processed if the connection's NodeAddress matches one of the source endpoints. +// +// Note: Multiple connections may receive the same notification, so we deduplicate by SeqID before triggering reload. +// but we still process the notification on each connection to clear the relaxed timeout. +// In the case when the connection is from MOVED/ASK, the connection's original endpoint is not set, +// so we will not be able to match the source endpoint. In such case, we will trigger the reload callback with the first target endpoint. +func (snh *NotificationHandler) handleSMigrated(ctx context.Context, handlerCtx push.NotificationHandlerContext, notification []interface{}) error { + // Expected: ["SMIGRATED", SeqID, [[source, target, slots], ...]] + // Minimum 3 elements: SMIGRATED, SeqID, and the array of triplets + if len(notification) < 3 { + internal.Logger.Printf(ctx, logs.InvalidNotification("SMIGRATED", notification)) + return ErrInvalidNotification + } + + // Extract SeqID (position 1) + seqID, ok := notification[1].(int64) + if !ok { + internal.Logger.Printf(ctx, logs.InvalidSeqIDInSMigratedNotification(notification[1])) + return ErrInvalidNotification + } + + // Extract the array of triplets (position 2) + triplets, ok := notification[2].([]interface{}) + if !ok { + internal.Logger.Printf(ctx, logs.InvalidNotification("SMIGRATED (triplets array)", notification[2])) + return ErrInvalidNotification + } + + if len(triplets) == 0 { + internal.Logger.Printf(ctx, logs.InvalidNotification("SMIGRATED (empty triplets)", notification)) + return ErrInvalidNotification + } + + // Get the connection's endpoints to check if this notification is relevant + // We check against both nodeAddress (from CLUSTER SLOTS) and addr (after resolution) + // since we cannot be certain which format the notification source will use + var connectionNodeAddress string + var connectionAddr string + if snh.manager.options != nil { + connectionNodeAddress = snh.manager.options.GetNodeAddress() + connectionAddr = snh.manager.options.GetAddr() + } + + // Helper function to check if source matches either of our endpoints + // notification source can be either the node address or the addr after resolution + sourceMatchesConnection := func(source string) bool { + if source == connectionNodeAddress { + return true + } + if source == connectionAddr { + return true + } + return false + } + + // Parse triplets and check if any source matches our connection's endpoints + var matchingTriplets []struct { + source string + target string + slots string + } + var allSlotRanges []string + + for _, tripletInterface := range triplets { + // Each triplet should be a 3-element array: [source, target, slots] + triplet, ok := tripletInterface.([]interface{}) + if !ok || len(triplet) != 3 { + internal.Logger.Printf(ctx, logs.InvalidNotification("SMIGRATED (triplet format)", tripletInterface)) + continue + } + + // Extract source endpoint + source, ok := triplet[0].(string) + if !ok { + internal.Logger.Printf(ctx, logs.InvalidNotification("SMIGRATED (source)", triplet[0])) + continue + } + + // Extract target endpoint + target, ok := triplet[1].(string) + if !ok { + internal.Logger.Printf(ctx, logs.InvalidNotification("SMIGRATED (target)", triplet[1])) + continue + } + + // Extract slots + slots, ok := triplet[2].(string) + if !ok { + internal.Logger.Printf(ctx, logs.InvalidNotification("SMIGRATED (slots)", triplet[2])) + continue + } + + // Check if this triplet's source matches our connection's endpoints + if sourceMatchesConnection(source) { + matchingTriplets = append(matchingTriplets, struct { + source string + target string + slots string + }{source, target, slots}) + slotRanges := strings.Split(slots, ",") + allSlotRanges = append(allSlotRanges, slotRanges...) + } + } + + var connID uint64 + // Reset relaxed timeout for this specific connection + if handlerCtx.Conn != nil { + conn, ok := handlerCtx.Conn.(*pool.Conn) + if ok { + if internal.LogLevel.InfoOrAbove() { + connID = conn.GetID() + internal.Logger.Printf(ctx, logs.UnrelaxedTimeout(connID)) + } + conn.ClearRelaxedTimeout() + } + } + + // If no matching triplets, this notification is not relevant to this connection + if len(matchingTriplets) == 0 { + return nil + } + + // Deduplicate by SeqID - multiple connections may receive the same notification + // Only trigger cluster state reload once per seqID + if snh.manager.MarkSMigratedSeqIDProcessed(seqID) { + // Use the first matching triplet + target := matchingTriplets[0].target + slotsForLog := allSlotRanges + + if internal.LogLevel.InfoOrAbove() { + internal.Logger.Printf(ctx, logs.TriggeringClusterStateReload(seqID, target, slotsForLog)) + } + + // Trigger cluster state reload via callback + snh.manager.TriggerClusterStateReload(ctx, target, slotsForLog) + } + + return nil +} diff --git a/vendor/github.com/redis/go-redis/v9/maintnotifications/state.go b/vendor/github.com/redis/go-redis/v9/maintnotifications/state.go new file mode 100644 index 00000000..8180bcd9 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/maintnotifications/state.go @@ -0,0 +1,24 @@ +package maintnotifications + +// State represents the current state of a maintenance operation +type State int + +const ( + // StateIdle indicates no upgrade is in progress + StateIdle State = iota + + // StateHandoff indicates a connection handoff is in progress + StateMoving +) + +// String returns a string representation of the state. +func (s State) String() string { + switch s { + case StateIdle: + return "idle" + case StateMoving: + return "moving" + default: + return "unknown" + } +} diff --git a/vendor/github.com/redis/go-redis/v9/options.go b/vendor/github.com/redis/go-redis/v9/options.go index dff52ae8..5db27102 100644 --- a/vendor/github.com/redis/go-redis/v9/options.go +++ b/vendor/github.com/redis/go-redis/v9/options.go @@ -11,11 +11,27 @@ import ( "sort" "strconv" "strings" + "sync/atomic" "time" + "github.com/redis/go-redis/v9/auth" "github.com/redis/go-redis/v9/internal/pool" + "github.com/redis/go-redis/v9/internal/proto" + "github.com/redis/go-redis/v9/internal/util" + "github.com/redis/go-redis/v9/maintnotifications" + "github.com/redis/go-redis/v9/push" ) +// poolIDCounter is a global auto-increment counter for generating unique pool IDs. +var poolIDCounter atomic.Uint64 + +// generateUniqueID generates a short unique identifier for pool names using auto-increment. +// This makes it easier to identify and track pools in order of creation. +func generateUniqueID() string { + id := poolIDCounter.Add(1) + return strconv.FormatUint(id, 10) +} + // Limiter is the interface of a rate limiter or a circuit breaker. type Limiter interface { // Allow returns nil if operation is allowed or an error otherwise. @@ -29,12 +45,25 @@ type Limiter interface { // Options keeps the settings to set up redis connection. type Options struct { - // The network type, either tcp or unix. - // Default is tcp. + // Network type, either tcp or unix. + // + // default: is tcp. Network string - // host:port address. + + // Addr is the address formated as host:port Addr string + // NodeAddress is the address of the Redis node as reported by the server. + // For cluster clients, this is the exact endpoint string returned by CLUSTER SLOTS + // before any resolution or transformation (e.g., loopback replacement). + // For standalone clients, this defaults to Addr. + // + // This is used to match the source endpoint in maintenance notifications + // (e.g. SMIGRATED). + // + // Use Client.NodeAddress() to access this value. + NodeAddress string + // ClientName will execute the `CLIENT SETNAME ClientName` command for each conn. ClientName string @@ -46,107 +75,235 @@ type Options struct { OnConnect func(ctx context.Context, cn *Conn) error // Protocol 2 or 3. Use the version to negotiate RESP version with redis-server. - // Default is 3. + // + // default: 3. Protocol int - // Use the specified Username to authenticate the current connection + + // Username is used to authenticate the current connection // with one of the connections defined in the ACL list when connecting // to a Redis 6.0 instance, or greater, that is using the Redis ACL system. Username string - // Optional password. Must match the password specified in the - // requirepass server configuration option (if connecting to a Redis 5.0 instance, or lower), + + // Password is an optional password. Must match the password specified in the + // `requirepass` server configuration option (if connecting to a Redis 5.0 instance, or lower), // or the User Password when connecting to a Redis 6.0 instance, or greater, // that is using the Redis ACL system. Password string + // CredentialsProvider allows the username and password to be updated // before reconnecting. It should return the current username and password. CredentialsProvider func() (username string, password string) - // Database to be selected after connecting to the server. + // CredentialsProviderContext is an enhanced parameter of CredentialsProvider, + // done to maintain API compatibility. In the future, + // there might be a merge between CredentialsProviderContext and CredentialsProvider. + // There will be a conflict between them; if CredentialsProviderContext exists, we will ignore CredentialsProvider. + CredentialsProviderContext func(ctx context.Context) (username string, password string, err error) + + // StreamingCredentialsProvider is used to retrieve the credentials + // for the connection from an external source. Those credentials may change + // during the connection lifetime. This is useful for managed identity + // scenarios where the credentials are retrieved from an external source. + // + // Currently, this is a placeholder for the future implementation. + StreamingCredentialsProvider auth.StreamingCredentialsProvider + + // DB is the database to be selected after connecting to the server. DB int - // Maximum number of retries before giving up. - // Default is 3 retries; -1 (not 0) disables retries. + // MaxRetries is the maximum number of retries before giving up. + // -1 (not 0) disables retries. + // + // default: 3 retries MaxRetries int - // Minimum backoff between each retry. - // Default is 8 milliseconds; -1 disables backoff. + + // MinRetryBackoff is the minimum backoff between each retry. + // -1 disables backoff. + // + // default: 8 milliseconds MinRetryBackoff time.Duration - // Maximum backoff between each retry. - // Default is 512 milliseconds; -1 disables backoff. + + // MaxRetryBackoff is the maximum backoff between each retry. + // -1 disables backoff. + // default: 512 milliseconds; MaxRetryBackoff time.Duration - // Dial timeout for establishing new connections. - // Default is 5 seconds. + // DialTimeout for establishing new connections. + // + // default: 5 seconds DialTimeout time.Duration - // Timeout for socket reads. If reached, commands will fail + + // DialerRetries is the maximum number of retry attempts when dialing fails. + // + // default: 5 + DialerRetries int + + // DialerRetryTimeout is the backoff duration between retry attempts. + // + // default: 100 milliseconds + DialerRetryTimeout time.Duration + + // ReadTimeout for socket reads. If reached, commands will fail // with a timeout instead of blocking. Supported values: - // - `0` - default timeout (3 seconds). - // - `-1` - no timeout (block indefinitely). - // - `-2` - disables SetReadDeadline calls completely. + // + // - `-1` - no timeout (block indefinitely). + // - `-2` - disables SetReadDeadline calls completely. + // + // default: 3 seconds ReadTimeout time.Duration - // Timeout for socket writes. If reached, commands will fail + + // WriteTimeout for socket writes. If reached, commands will fail // with a timeout instead of blocking. Supported values: - // - `0` - default timeout (3 seconds). - // - `-1` - no timeout (block indefinitely). - // - `-2` - disables SetWriteDeadline calls completely. + // + // - `-1` - no timeout (block indefinitely). + // - `-2` - disables SetWriteDeadline calls completely. + // + // default: 3 seconds WriteTimeout time.Duration + // ContextTimeoutEnabled controls whether the client respects context timeouts and deadlines. // See https://redis.uptrace.dev/guide/go-redis-debugging.html#timeouts ContextTimeoutEnabled bool - // Type of connection pool. - // true for FIFO pool, false for LIFO pool. + // ReadBufferSize is the size of the bufio.Reader buffer for each connection. + // Larger buffers can improve performance for commands that return large responses. + // Smaller buffers can improve memory usage for larger pools. + // + // default: 32KiB (32768 bytes) + ReadBufferSize int + + // WriteBufferSize is the size of the bufio.Writer buffer for each connection. + // Larger buffers can improve performance for large pipelines and commands with many arguments. + // Smaller buffers can improve memory usage for larger pools. + // + // default: 32KiB (32768 bytes) + WriteBufferSize int + + // PoolFIFO type of connection pool. + // + // - true for FIFO pool + // - false for LIFO pool. + // // Note that FIFO has slightly higher overhead compared to LIFO, // but it helps closing idle connections faster reducing the pool size. + // default: false PoolFIFO bool - // Base number of socket connections. + + // PoolSize is the base number of socket connections. // Default is 10 connections per every available CPU as reported by runtime.GOMAXPROCS. // If there is not enough connections in the pool, new connections will be allocated in excess of PoolSize, // you can limit it through MaxActiveConns + // + // default: 10 * runtime.GOMAXPROCS(0) PoolSize int - // Amount of time client waits for connection if all connections + + // MaxConcurrentDials is the maximum number of concurrent connection creation goroutines. + // If <= 0, defaults to PoolSize. If > PoolSize, it will be capped at PoolSize. + MaxConcurrentDials int + + // PoolTimeout is the amount of time client waits for connection if all connections // are busy before returning an error. - // Default is ReadTimeout + 1 second. + // + // default: ReadTimeout + 1 second PoolTimeout time.Duration - // Minimum number of idle connections which is useful when establishing - // new connection is slow. - // Default is 0. the idle connections are not closed by default. + + // MinIdleConns is the minimum number of idle connections which is useful when establishing + // new connection is slow. The idle connections are not closed by default. + // + // default: 0 MinIdleConns int - // Maximum number of idle connections. - // Default is 0. the idle connections are not closed by default. + + // MaxIdleConns is the maximum number of idle connections. + // The idle connections are not closed by default. + // + // default: 0 MaxIdleConns int - // Maximum number of connections allocated by the pool at a given time. + + // MaxActiveConns is the maximum number of connections allocated by the pool at a given time. // When zero, there is no limit on the number of connections in the pool. + // If the pool is full, the next call to Get() will block until a connection is released. + // + // default: 0 MaxActiveConns int + // ConnMaxIdleTime is the maximum amount of time a connection may be idle. // Should be less than server's timeout. // // Expired connections may be closed lazily before reuse. // If d <= 0, connections are not closed due to a connection's idle time. + // -1 disables idle timeout check. // - // Default is 30 minutes. -1 disables idle timeout check. + // default: 30 minutes ConnMaxIdleTime time.Duration + // ConnMaxLifetime is the maximum amount of time a connection may be reused. // // Expired connections may be closed lazily before reuse. // If <= 0, connections are not closed due to a connection's age. // - // Default is to not close idle connections. + // default: 0 ConnMaxLifetime time.Duration - // TLS Config to use. When set, TLS will be negotiated. + // ConnMaxLifetimeJitter is the absolute jitter duration applied to ConnMaxLifetime + // to prevent all connections from expiring simultaneously. + // + // The jitter is applied as a random offset in the range [-jitter, +jitter]. + // For example, if ConnMaxLifetime is 1 hour and ConnMaxLifetimeJitter is 6 minutes, + // connections will expire between 54 minutes and 66 minutes. + // + // If <= 0, no jitter is applied. + // If > ConnMaxLifetime, it will be capped at ConnMaxLifetime. + // + // default: 0 + ConnMaxLifetimeJitter time.Duration + + // TLSConfig to use. When set, TLS will be negotiated. TLSConfig *tls.Config // Limiter interface used to implement circuit breaker or rate limiter. Limiter Limiter - // Enables read only queries on slave/follower nodes. + // readOnly enables read only queries on slave/follower nodes. readOnly bool - // Disable set-lib on connect. Default is false. + // DisableIndentity - Disable set-lib on connect. + // + // default: false + // + // Deprecated: Use DisableIdentity instead. DisableIndentity bool + // DisableIdentity is used to disable CLIENT SETINFO command on connect. + // + // default: false + DisableIdentity bool + // Add suffix to client name. Default is empty. + // IdentitySuffix - add suffix to client name. IdentitySuffix string + + // UnstableResp3 enables Unstable mode for Redis Search module with RESP3. + // When unstable mode is enabled, the client will use RESP3 protocol and only be able to use RawResult + UnstableResp3 bool + + // Push notifications are always enabled for RESP3 connections (Protocol: 3) + // and are not available for RESP2 connections. No configuration option is needed. + + // PushNotificationProcessor is the processor for handling push notifications. + // If nil, a default processor will be created for RESP3 connections. + PushNotificationProcessor push.NotificationProcessor + + // FailingTimeoutSeconds is the timeout in seconds for marking a cluster node as failing. + // When a node is marked as failing, it will be avoided for this duration. + // Default is 15 seconds. + FailingTimeoutSeconds int + + // MaintNotificationsConfig provides custom configuration for maintnotifications. + // When MaintNotificationsConfig.Mode is not "disabled", the client will handle + // cluster upgrade notifications gracefully and manage connection/pool state + // transitions seamlessly. Requires Protocol: 3 (RESP3) for push notifications. + // If nil, maintnotifications are in "auto" mode and will be enabled if the server supports it. + MaintNotificationsConfig *maintnotifications.Config } func (opt *Options) init() { @@ -160,15 +317,41 @@ func (opt *Options) init() { opt.Network = "tcp" } } + // For standalone clients, default NodeAddress to Addr if not set. + // This ensures maintenance notifications (SMIGRATED, etc.) can match + // the connection's endpoint even for non-cluster clients. + if opt.NodeAddress == "" { + opt.NodeAddress = opt.Addr + } + if opt.Protocol < 2 { + opt.Protocol = 3 + } if opt.DialTimeout == 0 { opt.DialTimeout = 5 * time.Second } + if opt.DialerRetries == 0 { + opt.DialerRetries = 5 + } + if opt.DialerRetryTimeout == 0 { + opt.DialerRetryTimeout = 100 * time.Millisecond + } if opt.Dialer == nil { opt.Dialer = NewDialer(opt) } if opt.PoolSize == 0 { opt.PoolSize = 10 * runtime.GOMAXPROCS(0) } + if opt.MaxConcurrentDials <= 0 { + opt.MaxConcurrentDials = opt.PoolSize + } else if opt.MaxConcurrentDials > opt.PoolSize { + opt.MaxConcurrentDials = opt.PoolSize + } + if opt.ReadBufferSize == 0 { + opt.ReadBufferSize = proto.DefaultBufferSize + } + if opt.WriteBufferSize == 0 { + opt.WriteBufferSize = proto.DefaultBufferSize + } switch opt.ReadTimeout { case -2: opt.ReadTimeout = -1 @@ -196,9 +379,12 @@ func (opt *Options) init() { opt.ConnMaxIdleTime = 30 * time.Minute } - if opt.MaxRetries == -1 { + opt.ConnMaxLifetimeJitter = min(opt.ConnMaxLifetimeJitter, opt.ConnMaxLifetime) + + switch opt.MaxRetries { + case -1: opt.MaxRetries = 0 - } else if opt.MaxRetries == 0 { + case 0: opt.MaxRetries = 3 } switch opt.MinRetryBackoff { @@ -213,13 +399,40 @@ func (opt *Options) init() { case 0: opt.MaxRetryBackoff = 512 * time.Millisecond } + + if opt.FailingTimeoutSeconds == 0 { + opt.FailingTimeoutSeconds = 15 + } + + opt.MaintNotificationsConfig = opt.MaintNotificationsConfig.ApplyDefaultsWithPoolConfig(opt.PoolSize, opt.MaxActiveConns) + + // auto-detect endpoint type if not specified + endpointType := opt.MaintNotificationsConfig.EndpointType + if endpointType == "" || endpointType == maintnotifications.EndpointTypeAuto { + // Auto-detect endpoint type if not specified + endpointType = maintnotifications.DetectEndpointType(opt.Addr, opt.TLSConfig != nil) + } + opt.MaintNotificationsConfig.EndpointType = endpointType } func (opt *Options) clone() *Options { clone := *opt + + // Deep clone MaintNotificationsConfig to avoid sharing between clients + if opt.MaintNotificationsConfig != nil { + configClone := *opt.MaintNotificationsConfig + clone.MaintNotificationsConfig = &configClone + } + return &clone } +// NewDialer returns a function that will be used as the default dialer +// when none is specified in Options.Dialer. +func (opt *Options) NewDialer() func(context.Context, string, string) (net.Conn, error) { + return NewDialer(opt) +} + // NewDialer returns a function that will be used as the default dialer // when none is specified in Options.Dialer. func NewDialer(opt *Options) func(context.Context, string, string) (net.Conn, error) { @@ -235,7 +448,7 @@ func NewDialer(opt *Options) func(context.Context, string, string) (net.Conn, er } } -// ParseURL parses an URL into Options that can be used to connect to Redis. +// ParseURL parses a URL into Options that can be used to connect to Redis. // Scheme is required. // There are two connection types: by tcp socket and by unix socket. // Tcp connection: @@ -250,14 +463,15 @@ func NewDialer(opt *Options) func(context.Context, string, string) (net.Conn, er // - field names are mapped using snake-case conversion: to set MaxRetries, use max_retries // - only scalar type fields are supported (bool, int, time.Duration) // - for time.Duration fields, values must be a valid input for time.ParseDuration(); -// additionally a plain integer as value (i.e. without unit) is intepreted as seconds +// additionally a plain integer as value (i.e. without unit) is interpreted as seconds // - to disable a duration field, use value less than or equal to 0; to use the default // value, leave the value blank or remove the parameter // - only the last value is interpreted if a parameter is given multiple times // - fields "network", "addr", "username" and "password" can only be set using other -// URL attributes (scheme, host, userinfo, resp.), query paremeters using these +// URL attributes (scheme, host, userinfo, resp.), query parameters using these // names will be treated as unknown parameters // - unknown parameter names will result in an error +// - use "skip_verify=true" to ignore TLS certificate validation // // Examples: // @@ -465,6 +679,7 @@ func setupConnParams(u *url.URL, o *Options) (*Options, error) { o.MinIdleConns = q.int("min_idle_conns") o.MaxIdleConns = q.int("max_idle_conns") o.MaxActiveConns = q.int("max_active_conns") + o.MaxConcurrentDials = q.int("max_concurrent_dials") if q.has("conn_max_idle_time") { o.ConnMaxIdleTime = q.duration("conn_max_idle_time") } else { @@ -475,9 +690,15 @@ func setupConnParams(u *url.URL, o *Options) (*Options, error) { } else { o.ConnMaxLifetime = q.duration("max_conn_age") } + if q.has("conn_max_lifetime_jitter") { + o.ConnMaxLifetimeJitter = min(q.duration("conn_max_lifetime_jitter"), o.ConnMaxLifetime) + } if q.err != nil { return nil, q.err } + if o.TLSConfig != nil && q.has("skip_verify") { + o.TLSConfig.InsecureSkipVerify = q.bool("skip_verify") + } // any parameters left? if r := q.remaining(); len(r) > 0 { @@ -501,18 +722,94 @@ func getUserPassword(u *url.URL) (string, string) { func newConnPool( opt *Options, dialer func(ctx context.Context, network, addr string) (net.Conn, error), -) *pool.ConnPool { + poolName string, +) (*pool.ConnPool, error) { + poolSize, err := util.SafeIntToInt32(opt.PoolSize, "PoolSize") + if err != nil { + return nil, err + } + + minIdleConns, err := util.SafeIntToInt32(opt.MinIdleConns, "MinIdleConns") + if err != nil { + return nil, err + } + + maxIdleConns, err := util.SafeIntToInt32(opt.MaxIdleConns, "MaxIdleConns") + if err != nil { + return nil, err + } + + maxActiveConns, err := util.SafeIntToInt32(opt.MaxActiveConns, "MaxActiveConns") + if err != nil { + return nil, err + } + return pool.NewConnPool(&pool.Options{ Dialer: func(ctx context.Context) (net.Conn, error) { return dialer(ctx, opt.Network, opt.Addr) }, - PoolFIFO: opt.PoolFIFO, - PoolSize: opt.PoolSize, - PoolTimeout: opt.PoolTimeout, - MinIdleConns: opt.MinIdleConns, - MaxIdleConns: opt.MaxIdleConns, - MaxActiveConns: opt.MaxActiveConns, - ConnMaxIdleTime: opt.ConnMaxIdleTime, - ConnMaxLifetime: opt.ConnMaxLifetime, - }) + PoolFIFO: opt.PoolFIFO, + PoolSize: poolSize, + MaxConcurrentDials: opt.MaxConcurrentDials, + PoolTimeout: opt.PoolTimeout, + DialTimeout: opt.DialTimeout, + DialerRetries: opt.DialerRetries, + DialerRetryTimeout: opt.DialerRetryTimeout, + MinIdleConns: minIdleConns, + MaxIdleConns: maxIdleConns, + MaxActiveConns: maxActiveConns, + ConnMaxIdleTime: opt.ConnMaxIdleTime, + ConnMaxLifetime: opt.ConnMaxLifetime, + ConnMaxLifetimeJitter: opt.ConnMaxLifetimeJitter, + ReadBufferSize: opt.ReadBufferSize, + WriteBufferSize: opt.WriteBufferSize, + PushNotificationsEnabled: opt.Protocol == 3, + Name: poolName, + }), nil +} + +func newPubSubPool( + opt *Options, + dialer func(ctx context.Context, network, addr string) (net.Conn, error), + poolName string, +) (*pool.PubSubPool, error) { + poolSize, err := util.SafeIntToInt32(opt.PoolSize, "PoolSize") + if err != nil { + return nil, err + } + + minIdleConns, err := util.SafeIntToInt32(opt.MinIdleConns, "MinIdleConns") + if err != nil { + return nil, err + } + + maxIdleConns, err := util.SafeIntToInt32(opt.MaxIdleConns, "MaxIdleConns") + if err != nil { + return nil, err + } + + maxActiveConns, err := util.SafeIntToInt32(opt.MaxActiveConns, "MaxActiveConns") + if err != nil { + return nil, err + } + + return pool.NewPubSubPool(&pool.Options{ + PoolFIFO: opt.PoolFIFO, + PoolSize: poolSize, + MaxConcurrentDials: opt.MaxConcurrentDials, + PoolTimeout: opt.PoolTimeout, + DialTimeout: opt.DialTimeout, + DialerRetries: opt.DialerRetries, + DialerRetryTimeout: opt.DialerRetryTimeout, + MinIdleConns: minIdleConns, + MaxIdleConns: maxIdleConns, + MaxActiveConns: maxActiveConns, + ConnMaxIdleTime: opt.ConnMaxIdleTime, + ConnMaxLifetime: opt.ConnMaxLifetime, + ConnMaxLifetimeJitter: opt.ConnMaxLifetimeJitter, + ReadBufferSize: 32 * 1024, + WriteBufferSize: 32 * 1024, + PushNotificationsEnabled: opt.Protocol == 3, + Name: poolName, + }, dialer), nil } diff --git a/vendor/github.com/redis/go-redis/v9/osscluster.go b/vendor/github.com/redis/go-redis/v9/osscluster.go index 9e5eb046..6fb51dc2 100644 --- a/vendor/github.com/redis/go-redis/v9/osscluster.go +++ b/vendor/github.com/redis/go-redis/v9/osscluster.go @@ -3,6 +3,7 @@ package redis import ( "context" "crypto/tls" + "errors" "fmt" "math" "net" @@ -14,14 +15,27 @@ import ( "sync/atomic" "time" + "github.com/redis/go-redis/v9/auth" "github.com/redis/go-redis/v9/internal" "github.com/redis/go-redis/v9/internal/hashtag" + "github.com/redis/go-redis/v9/internal/otel" "github.com/redis/go-redis/v9/internal/pool" "github.com/redis/go-redis/v9/internal/proto" "github.com/redis/go-redis/v9/internal/rand" + "github.com/redis/go-redis/v9/internal/routing" + "github.com/redis/go-redis/v9/maintnotifications" + "github.com/redis/go-redis/v9/push" ) -var errClusterNoNodes = fmt.Errorf("redis: cluster has no nodes") +const ( + minLatencyMeasurementInterval = 10 * time.Second +) + +var ( + errClusterNoNodes = errors.New("redis: cluster has no nodes") + errNoWatchKeys = errors.New("redis: Watch requires at least one key") + errWatchCrosslot = errors.New("redis: Watch requires all keys to be in the same slot") +) // ClusterOptions are used to configure a cluster client and should be // passed to NewClusterClient. @@ -33,6 +47,7 @@ type ClusterOptions struct { ClientName string // NewClient creates a cluster node client with provided name and options. + // If NewClient is set by the user, the user is responsible for handling maintnotifications upgrades and push notifications. NewClient func(opt *Options) *Client // The maximum number of retries before giving up. Command is retried @@ -62,38 +77,120 @@ type ClusterOptions struct { OnConnect func(ctx context.Context, cn *Conn) error - Protocol int - Username string - Password string + Protocol int + Username string + Password string + CredentialsProvider func() (username string, password string) + CredentialsProviderContext func(ctx context.Context) (username string, password string, err error) + StreamingCredentialsProvider auth.StreamingCredentialsProvider + // MaxRetries is the maximum number of retries before giving up. + // For ClusterClient, retries are disabled by default (set to -1), + // because the cluster client handles all kinds of retries internally. + // This is intentional and differs from the standalone Options default. MaxRetries int MinRetryBackoff time.Duration MaxRetryBackoff time.Duration - DialTimeout time.Duration + DialTimeout time.Duration + + // DialerRetries is the maximum number of retry attempts when dialing fails. + // + // default: 5 + DialerRetries int + + // DialerRetryTimeout is the backoff duration between retry attempts. + // + // default: 100 milliseconds + DialerRetryTimeout time.Duration + ReadTimeout time.Duration WriteTimeout time.Duration ContextTimeoutEnabled bool - PoolFIFO bool - PoolSize int // applies per cluster node and not for the whole cluster - PoolTimeout time.Duration - MinIdleConns int - MaxIdleConns int - MaxActiveConns int // applies per cluster node and not for the whole cluster - ConnMaxIdleTime time.Duration - ConnMaxLifetime time.Duration + // MaxConcurrentDials is the maximum number of concurrent connection creation goroutines. + // If <= 0, defaults to PoolSize. If > PoolSize, it will be capped at PoolSize. + MaxConcurrentDials int - TLSConfig *tls.Config - DisableIndentity bool // Disable set-lib on connect. Default is false. + PoolFIFO bool + PoolSize int // applies per cluster node and not for the whole cluster + PoolTimeout time.Duration + MinIdleConns int + MaxIdleConns int + MaxActiveConns int // applies per cluster node and not for the whole cluster + ConnMaxIdleTime time.Duration + ConnMaxLifetime time.Duration + ConnMaxLifetimeJitter time.Duration + + // ReadBufferSize is the size of the bufio.Reader buffer for each connection. + // Larger buffers can improve performance for commands that return large responses. + // Smaller buffers can improve memory usage for larger pools. + // + // default: 32KiB (32768 bytes) + ReadBufferSize int + + // WriteBufferSize is the size of the bufio.Writer buffer for each connection. + // Larger buffers can improve performance for large pipelines and commands with many arguments. + // Smaller buffers can improve memory usage for larger pools. + // + // default: 32KiB (32768 bytes) + WriteBufferSize int + + TLSConfig *tls.Config + + // DisableRoutingPolicies disables the request/response policy routing system. + // When disabled, all commands use the legacy routing behavior. + // Experimental. Will be removed when shard picker is fully implemented. + DisableRoutingPolicies bool + + // DisableIndentity - Disable set-lib on connect. + // + // default: false + // + // Deprecated: Use DisableIdentity instead. + DisableIndentity bool + + // DisableIdentity is used to disable CLIENT SETINFO command on connect. + // + // default: false + DisableIdentity bool IdentitySuffix string // Add suffix to client name. Default is empty. + + // UnstableResp3 enables Unstable mode for Redis Search module with RESP3. + UnstableResp3 bool + + // PushNotificationProcessor is the processor for handling push notifications. + // If nil, a default processor will be created for RESP3 connections. + PushNotificationProcessor push.NotificationProcessor + + // FailingTimeoutSeconds is the timeout in seconds for marking a cluster node as failing. + // When a node is marked as failing, it will be avoided for this duration. + // Default is 15 seconds. + FailingTimeoutSeconds int + + // MaintNotificationsConfig provides custom configuration for maintnotifications upgrades. + // When MaintNotificationsConfig.Mode is not "disabled", the client will handle + // cluster upgrade notifications gracefully and manage connection/pool state + // transitions seamlessly. Requires Protocol: 3 (RESP3) for push notifications. + // If nil, maintnotifications upgrades are in "auto" mode and will be enabled if the server supports it. + // The ClusterClient supports SMIGRATING and SMIGRATED notifications for cluster state management. + // Individual node clients handle other maintenance notifications (MOVING, MIGRATING, etc.). + MaintNotificationsConfig *maintnotifications.Config + // ShardPicker is used to pick a shard when the request_policy is + // ReqDefault and the command has no keys. + ShardPicker routing.ShardPicker + + // ClusterStateReloadInterval is the interval for reloading the cluster state. + // Default is 10 seconds. + ClusterStateReloadInterval time.Duration } func (opt *ClusterOptions) init() { - if opt.MaxRedirects == -1 { + switch opt.MaxRedirects { + case -1: opt.MaxRedirects = 0 - } else if opt.MaxRedirects == 0 { + case 0: opt.MaxRedirects = 3 } @@ -101,9 +198,30 @@ func (opt *ClusterOptions) init() { opt.ReadOnly = true } + if opt.DialTimeout == 0 { + opt.DialTimeout = 5 * time.Second + } + if opt.DialerRetries == 0 { + opt.DialerRetries = 5 + } + if opt.DialerRetryTimeout == 0 { + opt.DialerRetryTimeout = 100 * time.Millisecond + } + if opt.PoolSize == 0 { opt.PoolSize = 5 * runtime.GOMAXPROCS(0) } + if opt.MaxConcurrentDials <= 0 { + opt.MaxConcurrentDials = opt.PoolSize + } else if opt.MaxConcurrentDials > opt.PoolSize { + opt.MaxConcurrentDials = opt.PoolSize + } + if opt.ReadBufferSize == 0 { + opt.ReadBufferSize = proto.DefaultBufferSize + } + if opt.WriteBufferSize == 0 { + opt.WriteBufferSize = proto.DefaultBufferSize + } switch opt.ReadTimeout { case -1: @@ -137,6 +255,18 @@ func (opt *ClusterOptions) init() { if opt.NewClient == nil { opt.NewClient = NewClient } + + if opt.FailingTimeoutSeconds == 0 { + opt.FailingTimeoutSeconds = 15 + } + + if opt.ShardPicker == nil { + opt.ShardPicker = &routing.RoundRobinPicker{} + } + + if opt.ClusterStateReloadInterval == 0 { + opt.ClusterStateReloadInterval = 10 * time.Second + } } // ParseClusterURL parses a URL into ClusterOptions that can be used to connect to Redis. @@ -156,12 +286,12 @@ func (opt *ClusterOptions) init() { // - field names are mapped using snake-case conversion: to set MaxRetries, use max_retries // - only scalar type fields are supported (bool, int, time.Duration) // - for time.Duration fields, values must be a valid input for time.ParseDuration(); -// additionally a plain integer as value (i.e. without unit) is intepreted as seconds +// additionally a plain integer as value (i.e. without unit) is interpreted as seconds // - to disable a duration field, use value less than or equal to 0; to use the default // value, leave the value blank or remove the parameter // - only the last value is interpreted if a parameter is given multiple times // - fields "network", "addr", "username" and "password" can only be set using other -// URL attributes (scheme, host, userinfo, resp.), query paremeters using these +// URL attributes (scheme, host, userinfo, resp.), query parameters using these // names will be treated as unknown parameters // - unknown parameter names will result in an error // @@ -231,16 +361,23 @@ func setupClusterQueryParams(u *url.URL, o *ClusterOptions) (*ClusterOptions, er o.MinRetryBackoff = q.duration("min_retry_backoff") o.MaxRetryBackoff = q.duration("max_retry_backoff") o.DialTimeout = q.duration("dial_timeout") + o.DialerRetries = q.int("dialer_retries") + o.DialerRetryTimeout = q.duration("dialer_retry_timeout") o.ReadTimeout = q.duration("read_timeout") o.WriteTimeout = q.duration("write_timeout") o.PoolFIFO = q.bool("pool_fifo") o.PoolSize = q.int("pool_size") + o.MaxConcurrentDials = q.int("max_concurrent_dials") o.MinIdleConns = q.int("min_idle_conns") o.MaxIdleConns = q.int("max_idle_conns") o.MaxActiveConns = q.int("max_active_conns") o.PoolTimeout = q.duration("pool_timeout") o.ConnMaxLifetime = q.duration("conn_max_lifetime") + if q.has("conn_max_lifetime_jitter") { + o.ConnMaxLifetimeJitter = min(q.duration("conn_max_lifetime_jitter"), o.ConnMaxLifetime) + } o.ConnMaxIdleTime = q.duration("conn_max_idle_time") + o.FailingTimeoutSeconds = q.int("failing_timeout_seconds") if q.err != nil { return nil, q.err @@ -266,41 +403,63 @@ func setupClusterQueryParams(u *url.URL, o *ClusterOptions) (*ClusterOptions, er } func (opt *ClusterOptions) clientOptions() *Options { + // Clone MaintNotificationsConfig to avoid sharing between cluster node clients + var maintNotificationsConfig *maintnotifications.Config + if opt.MaintNotificationsConfig != nil { + configClone := *opt.MaintNotificationsConfig + maintNotificationsConfig = &configClone + } + return &Options{ ClientName: opt.ClientName, Dialer: opt.Dialer, OnConnect: opt.OnConnect, - Protocol: opt.Protocol, - Username: opt.Username, - Password: opt.Password, + Protocol: opt.Protocol, + Username: opt.Username, + Password: opt.Password, + CredentialsProvider: opt.CredentialsProvider, + CredentialsProviderContext: opt.CredentialsProviderContext, + StreamingCredentialsProvider: opt.StreamingCredentialsProvider, MaxRetries: opt.MaxRetries, MinRetryBackoff: opt.MinRetryBackoff, MaxRetryBackoff: opt.MaxRetryBackoff, - DialTimeout: opt.DialTimeout, - ReadTimeout: opt.ReadTimeout, - WriteTimeout: opt.WriteTimeout, + DialTimeout: opt.DialTimeout, + DialerRetries: opt.DialerRetries, + DialerRetryTimeout: opt.DialerRetryTimeout, + ReadTimeout: opt.ReadTimeout, + WriteTimeout: opt.WriteTimeout, + ContextTimeoutEnabled: opt.ContextTimeoutEnabled, - PoolFIFO: opt.PoolFIFO, - PoolSize: opt.PoolSize, - PoolTimeout: opt.PoolTimeout, - MinIdleConns: opt.MinIdleConns, - MaxIdleConns: opt.MaxIdleConns, - MaxActiveConns: opt.MaxActiveConns, - ConnMaxIdleTime: opt.ConnMaxIdleTime, - ConnMaxLifetime: opt.ConnMaxLifetime, - DisableIndentity: opt.DisableIndentity, - IdentitySuffix: opt.IdentitySuffix, - TLSConfig: opt.TLSConfig, + PoolFIFO: opt.PoolFIFO, + PoolSize: opt.PoolSize, + MaxConcurrentDials: opt.MaxConcurrentDials, + PoolTimeout: opt.PoolTimeout, + MinIdleConns: opt.MinIdleConns, + MaxIdleConns: opt.MaxIdleConns, + MaxActiveConns: opt.MaxActiveConns, + ConnMaxIdleTime: opt.ConnMaxIdleTime, + ConnMaxLifetime: opt.ConnMaxLifetime, + ConnMaxLifetimeJitter: opt.ConnMaxLifetimeJitter, + ReadBufferSize: opt.ReadBufferSize, + WriteBufferSize: opt.WriteBufferSize, + DisableIdentity: opt.DisableIdentity, + DisableIndentity: opt.DisableIdentity, + IdentitySuffix: opt.IdentitySuffix, + FailingTimeoutSeconds: opt.FailingTimeoutSeconds, + TLSConfig: opt.TLSConfig, // If ClusterSlots is populated, then we probably have an artificial // cluster whose nodes are not in clustering mode (otherwise there isn't // much use for ClusterSlots config). This means we cannot execute the // READONLY command against that node -- setting readOnly to false in such // situations in the options below will prevent that from happening. - readOnly: opt.ReadOnly && opt.ClusterSlots == nil, + readOnly: opt.ReadOnly && opt.ClusterSlots == nil, + UnstableResp3: opt.UnstableResp3, + MaintNotificationsConfig: maintNotificationsConfig, + PushNotificationProcessor: opt.PushNotificationProcessor, } } @@ -312,11 +471,16 @@ type clusterNode struct { latency uint32 // atomic generation uint32 // atomic failing uint32 // atomic + loaded uint32 // atomic + + // last time the latency measurement was performed for the node, stored in nanoseconds from epoch + lastLatencyMeasurement int64 // atomic } -func newClusterNode(clOpt *ClusterOptions, addr string) *clusterNode { +func newClusterNodeWithNodeAddress(clOpt *ClusterOptions, addr, nodeAddress string) *clusterNode { opt := clOpt.clientOptions() opt.Addr = addr + opt.NodeAddress = nodeAddress node := clusterNode{ Client: clOpt.NewClient(opt), } @@ -337,6 +501,8 @@ func (n *clusterNode) Close() error { return n.Client.Close() } +const maximumNodeLatency = 1 * time.Minute + func (n *clusterNode) updateLatency() { const numProbe = 10 var dur uint64 @@ -357,11 +523,12 @@ func (n *clusterNode) updateLatency() { if successes == 0 { // If none of the pings worked, set latency to some arbitrarily high value so this node gets // least priority. - latency = float64((1 * time.Minute) / time.Microsecond) + latency = float64((maximumNodeLatency) / time.Microsecond) } else { latency = float64(dur) / float64(successes) } atomic.StoreUint32(&n.latency, uint32(latency+0.5)) + n.SetLastLatencyMeasurement(time.Now()) } func (n *clusterNode) Latency() time.Duration { @@ -371,10 +538,11 @@ func (n *clusterNode) Latency() time.Duration { func (n *clusterNode) MarkAsFailing() { atomic.StoreUint32(&n.failing, uint32(time.Now().Unix())) + atomic.StoreUint32(&n.loaded, 0) } func (n *clusterNode) Failing() bool { - const timeout = 15 // 15 seconds + timeout := int64(n.Client.opt.FailingTimeoutSeconds) failing := atomic.LoadUint32(&n.failing) if failing == 0 { @@ -391,6 +559,10 @@ func (n *clusterNode) Generation() uint32 { return atomic.LoadUint32(&n.generation) } +func (n *clusterNode) LastLatencyMeasurement() int64 { + return atomic.LoadInt64(&n.lastLatencyMeasurement) +} + func (n *clusterNode) SetGeneration(gen uint32) { for { v := atomic.LoadUint32(&n.generation) @@ -400,6 +572,33 @@ func (n *clusterNode) SetGeneration(gen uint32) { } } +func (n *clusterNode) SetLastLatencyMeasurement(t time.Time) { + for { + v := atomic.LoadInt64(&n.lastLatencyMeasurement) + if t.UnixNano() < v || atomic.CompareAndSwapInt64(&n.lastLatencyMeasurement, v, t.UnixNano()) { + break + } + } +} + +func (n *clusterNode) Loading() bool { + loaded := atomic.LoadUint32(&n.loaded) + if loaded == 1 { + return false + } + + // check if the node is loading + ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + + err := n.Client.Ping(ctx).Err() + loading := err != nil && isLoadingError(err) + if !loading { + atomic.StoreUint32(&n.loaded, 1) + } + return loading +} + //------------------------------------------------------------------------------ type clusterNodes struct { @@ -412,13 +611,12 @@ type clusterNodes struct { closed bool onNewNode []func(rdb *Client) - _generation uint32 // atomic + generation uint32 // atomic } func newClusterNodes(opt *ClusterOptions) *clusterNodes { return &clusterNodes{ - opt: opt, - + opt: opt, addrs: opt.Addrs, nodes: make(map[string]*clusterNode), } @@ -459,9 +657,11 @@ func (c *clusterNodes) Addrs() ([]string, error) { closed := c.closed //nolint:ifshort if !closed { if len(c.activeAddrs) > 0 { - addrs = c.activeAddrs + addrs = make([]string, len(c.activeAddrs)) + copy(addrs, c.activeAddrs) } else { - addrs = c.addrs + addrs = make([]string, len(c.addrs)) + copy(addrs, c.addrs) } } c.mu.RUnlock() @@ -476,21 +676,21 @@ func (c *clusterNodes) Addrs() ([]string, error) { } func (c *clusterNodes) NextGeneration() uint32 { - return atomic.AddUint32(&c._generation, 1) + return atomic.AddUint32(&c.generation, 1) } // GC removes unused nodes. func (c *clusterNodes) GC(generation uint32) { - //nolint:prealloc var collected []*clusterNode c.mu.Lock() c.activeAddrs = c.activeAddrs[:0] + now := time.Now() for addr, node := range c.nodes { if node.Generation() >= generation { c.activeAddrs = append(c.activeAddrs, addr) - if c.opt.RouteByLatency { + if c.opt.RouteByLatency && node.LastLatencyMeasurement() < now.Add(-minLatencyMeasurementInterval).UnixNano() { go node.updateLatency() } continue @@ -508,6 +708,10 @@ func (c *clusterNodes) GC(generation uint32) { } func (c *clusterNodes) GetOrCreate(addr string) (*clusterNode, error) { + return c.GetOrCreateWithNodeAddress(addr, "") +} + +func (c *clusterNodes) GetOrCreateWithNodeAddress(addr, nodeAddress string) (*clusterNode, error) { node, err := c.get(addr) if err != nil { return nil, err @@ -528,28 +732,25 @@ func (c *clusterNodes) GetOrCreate(addr string) (*clusterNode, error) { return node, nil } - node = newClusterNode(c.opt, addr) + node = newClusterNodeWithNodeAddress(c.opt, addr, nodeAddress) for _, fn := range c.onNewNode { fn(node.Client) } - c.addrs = appendIfNotExists(c.addrs, addr) + c.addrs = appendIfNotExist(c.addrs, addr) c.nodes[addr] = node return node, nil } func (c *clusterNodes) get(addr string) (*clusterNode, error) { - var node *clusterNode - var err error c.mu.RLock() + defer c.mu.RUnlock() + if c.closed { - err = pool.ErrClosed - } else { - node = c.nodes[addr] + return nil, pool.ErrClosed } - c.mu.RUnlock() - return node, err + return c.nodes[addr], nil } func (c *clusterNodes) All() ([]*clusterNode, error) { @@ -580,8 +781,9 @@ func (c *clusterNodes) Random() (*clusterNode, error) { //------------------------------------------------------------------------------ type clusterSlot struct { - start, end int - nodes []*clusterNode + start int + end int + nodes []*clusterNode } type clusterSlotSlice []*clusterSlot @@ -627,12 +829,14 @@ func newClusterState( for _, slot := range slots { var nodes []*clusterNode for i, slotNode := range slot.Nodes { - addr := slotNode.Addr + // slotNode.Addr is the node address from CLUSTER SLOTS + nodeAddress := slotNode.Addr + addr := nodeAddress if !isLoopbackOrigin { addr = replaceLoopbackHost(addr, originHost) } - node, err := c.nodes.GetOrCreate(addr) + node, err := c.nodes.GetOrCreateWithNodeAddress(addr, nodeAddress) if err != nil { return nil, err } @@ -641,9 +845,9 @@ func newClusterState( nodes = append(nodes, node) if i == 0 { - c.Masters = appendUniqueNode(c.Masters, node) + c.Masters = appendIfNotExist(c.Masters, node) } else { - c.Slaves = appendUniqueNode(c.Slaves, node) + c.Slaves = appendIfNotExist(c.Slaves, node) } } @@ -682,12 +886,25 @@ func replaceLoopbackHost(nodeAddr, originHost string) string { return net.JoinHostPort(originHost, nodePort) } +// isLoopback returns true if the host is a loopback address. +// For IP addresses, it uses net.IP.IsLoopback(). +// For hostnames, it recognizes well-known loopback hostnames like "localhost" +// and Docker-specific loopback patterns like "*.docker.internal". func isLoopback(host string) bool { ip := net.ParseIP(host) - if ip == nil { + if ip != nil { + return ip.IsLoopback() + } + + if strings.ToLower(host) == "localhost" { return true } - return ip.IsLoopback() + + if strings.HasSuffix(strings.ToLower(host), ".docker.internal") { + return true + } + + return false } func (c *clusterState) slotMasterNode(slot int) (*clusterNode, error) { @@ -706,7 +923,8 @@ func (c *clusterState) slotSlaveNode(slot int) (*clusterNode, error) { case 1: return nodes[0], nil case 2: - if slave := nodes[1]; !slave.Failing() { + slave := nodes[1] + if !slave.Failing() && !slave.Loading() { return slave, nil } return nodes[0], nil @@ -715,7 +933,7 @@ func (c *clusterState) slotSlaveNode(slot int) (*clusterNode, error) { for i := 0; i < 10; i++ { n := rand.Intn(len(nodes)-1) + 1 slave = nodes[n] - if !slave.Failing() { + if !slave.Failing() && !slave.Loading() { return slave, nil } } @@ -731,20 +949,40 @@ func (c *clusterState) slotClosestNode(slot int) (*clusterNode, error) { return c.nodes.Random() } - var node *clusterNode + var allNodesFailing = true + var ( + closestNonFailingNode *clusterNode + closestNode *clusterNode + minLatency time.Duration + ) + + // setting the max possible duration as zerovalue for minlatency + minLatency = time.Duration(math.MaxInt64) + for _, n := range nodes { - if n.Failing() { - continue + if closestNode == nil || n.Latency() < minLatency { + closestNode = n + minLatency = n.Latency() + if !n.Failing() { + closestNonFailingNode = n + allNodesFailing = false + } } - if node == nil || n.Latency() < node.Latency() { - node = n - } - } - if node != nil { - return node, nil } - // If all nodes are failing - return random node + // pick the healthly node with the lowest latency + if !allNodesFailing && closestNonFailingNode != nil { + return closestNonFailingNode, nil + } + + // if all nodes are failing, we will pick the temporarily failing node with lowest latency + if minLatency < maximumNodeLatency && closestNode != nil { + internal.Logger.Printf(context.TODO(), "redis: all nodes are marked as failed, picking the temporarily failing node with lowest latency") + return closestNode, nil + } + + // If all nodes are having the maximum latency(all pings are failing) - return a random node across the cluster + internal.Logger.Printf(context.TODO(), "redis: pings to all nodes are failing, picking a random node across the cluster") return c.nodes.Random() } @@ -765,6 +1003,29 @@ func (c *clusterState) slotRandomNode(slot int) (*clusterNode, error) { return nodes[randomNodes[0]], nil } +func (c *clusterState) slotShardPickerSlaveNode(slot int, shardPicker routing.ShardPicker) (*clusterNode, error) { + nodes := c.slotNodes(slot) + if len(nodes) == 0 { + return c.nodes.Random() + } + + // nodes[0] is master, nodes[1:] are slaves + // First, try all slave nodes for this slot using ShardPicker order + slaves := nodes[1:] + if len(slaves) > 0 { + for i := 0; i < len(slaves); i++ { + idx := shardPicker.Next(len(slaves)) + slave := slaves[idx] + if !slave.Failing() && !slave.Loading() { + return slave, nil + } + } + } + + // All slaves are failing or loading - return master + return nodes[0], nil +} + func (c *clusterState) slotNodes(slot int) []*clusterNode { i := sort.Search(len(c.slots), func(i int) bool { return c.slots[i].end >= slot @@ -784,13 +1045,16 @@ func (c *clusterState) slotNodes(slot int) []*clusterNode { type clusterStateHolder struct { load func(ctx context.Context) (*clusterState, error) - state atomic.Value - reloading uint32 // atomic + reloadInterval time.Duration + state atomic.Value + reloading uint32 // atomic + reloadPending uint32 // atomic - set to 1 when reload is requested during active reload } -func newClusterStateHolder(fn func(ctx context.Context) (*clusterState, error)) *clusterStateHolder { +func newClusterStateHolder(load func(ctx context.Context) (*clusterState, error), reloadInterval time.Duration) *clusterStateHolder { return &clusterStateHolder{ - load: fn, + load: load, + reloadInterval: reloadInterval, } } @@ -804,17 +1068,37 @@ func (c *clusterStateHolder) Reload(ctx context.Context) (*clusterState, error) } func (c *clusterStateHolder) LazyReload() { + // If already reloading, mark that another reload is pending if !atomic.CompareAndSwapUint32(&c.reloading, 0, 1) { + atomic.StoreUint32(&c.reloadPending, 1) return } - go func() { - defer atomic.StoreUint32(&c.reloading, 0) - _, err := c.Reload(context.Background()) - if err != nil { - return + go func() { + for { + _, err := c.Reload(context.Background()) + if err != nil { + atomic.StoreUint32(&c.reloadPending, 0) + atomic.StoreUint32(&c.reloading, 0) + return + } + + // Clear pending flag after reload completes, before cooldown + // This captures notifications that arrived during the reload + atomic.StoreUint32(&c.reloadPending, 0) + + // Wait cooldown period + time.Sleep(200 * time.Millisecond) + + // Check if another reload was requested during cooldown + if atomic.LoadUint32(&c.reloadPending) == 0 { + // No pending reload, we're done + atomic.StoreUint32(&c.reloading, 0) + return + } + + // Pending reload requested, loop to reload again } - time.Sleep(200 * time.Millisecond) }() } @@ -825,7 +1109,7 @@ func (c *clusterStateHolder) Get(ctx context.Context) (*clusterState, error) { } state := v.(*clusterState) - if time.Since(state.createdAt) > 10*time.Second { + if time.Since(state.createdAt) > c.reloadInterval { c.LazyReload() } return state, nil @@ -845,10 +1129,11 @@ func (c *clusterStateHolder) ReloadOrGet(ctx context.Context) (*clusterState, er // or more underlying connections. It's safe for concurrent use by // multiple goroutines. type ClusterClient struct { - opt *ClusterOptions - nodes *clusterNodes - state *clusterStateHolder - cmdsInfoCache *cmdsInfoCache + opt *ClusterOptions + nodes *clusterNodes + state *clusterStateHolder + cmdsInfoCache *cmdsInfoCache + cmdInfoResolver *commandInfoResolver cmdable hooksMixin } @@ -863,10 +1148,13 @@ func NewClusterClient(opt *ClusterOptions) *ClusterClient { nodes: newClusterNodes(opt), } - c.state = newClusterStateHolder(c.loadState) c.cmdsInfoCache = newCmdsInfoCache(c.cmdsInfo) - c.cmdable = c.Process + c.state = newClusterStateHolder(c.loadState, opt.ClusterStateReloadInterval) + + c.SetCommandInfoResolver(NewDefaultCommandPolicyResolver()) + + c.cmdable = c.Process c.initHooks(hooks{ dial: nil, process: c.process, @@ -874,6 +1162,26 @@ func NewClusterClient(opt *ClusterOptions) *ClusterClient { txPipeline: c.processTxPipeline, }) + // Set up SMIGRATED notification handling for cluster state reload + // When a node client receives a SMIGRATED notification, it should trigger + // cluster state reload on the parent ClusterClient + if opt.MaintNotificationsConfig != nil { + c.nodes.OnNewNode(func(nodeClient *Client) { + manager := nodeClient.GetMaintNotificationsManager() + if manager != nil { + manager.SetClusterStateReloadCallback(func(ctx context.Context, hostPort string, slotRanges []string) { + // Log the migration details for now + if internal.LogLevel.InfoOrAbove() { + internal.Logger.Printf(ctx, "cluster: slots %v migrated to %s, reloading cluster state", slotRanges, hostPort) + } + // Currently we reload the entire cluster state + // In the future, this could be optimized to reload only the specific slots + c.state.LazyReload() + }) + } + }) + } + return c } @@ -896,13 +1204,6 @@ func (c *ClusterClient) Close() error { return c.nodes.Close() } -// Do create a Cmd from the args and processes the cmd. -func (c *ClusterClient) Do(ctx context.Context, args ...interface{}) *Cmd { - cmd := NewCmd(ctx, args...) - _ = c.Process(ctx, cmd) - return cmd -} - func (c *ClusterClient) Process(ctx context.Context, cmd Cmder) error { err := c.processHook(ctx, cmd) cmd.SetErr(err) @@ -910,12 +1211,15 @@ func (c *ClusterClient) Process(ctx context.Context, cmd Cmder) error { } func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error { - slot := c.cmdSlot(ctx, cmd) + slot := c.cmdSlot(cmd, -1) var node *clusterNode + var moved bool var ask bool var lastErr error for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { - if attempt > 0 { + // MOVED and ASK responses are not transient errors that require retry delay; they + // should be attempted immediately. + if attempt > 0 && !moved && !ask { if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { return err } @@ -923,7 +1227,11 @@ func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error { if node == nil { var err error - node, err = c.cmdNode(ctx, cmd.Name(), slot) + if !c.opt.DisableRoutingPolicies && c.opt.ShardPicker != nil { + node, err = c.cmdNodeWithShardPicker(ctx, cmd.Name(), slot, c.opt.ShardPicker) + } else { + node, err = c.cmdNode(ctx, cmd.Name(), slot) + } if err != nil { return err } @@ -931,13 +1239,16 @@ func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error { if ask { ask = false - pipe := node.Client.Pipeline() _ = pipe.Process(ctx, NewCmd(ctx, "asking")) _ = pipe.Process(ctx, cmd) _, lastErr = pipe.Exec(ctx) } else { - lastErr = node.Client.Process(ctx, cmd) + if !c.opt.DisableRoutingPolicies { + lastErr = c.routeAndRun(ctx, cmd, node) + } else { + lastErr = node.Client.Process(ctx, cmd) + } } // If there is no error - we are done. @@ -959,12 +1270,23 @@ func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error { continue } - var moved bool var addr string moved, ask, addr = isMovedError(lastErr) if moved || ask { c.state.LazyReload() + // Record error metrics + if errorCallback := pool.GetMetricErrorCallback(); errorCallback != nil { + errorType := "MOVED" + statusCode := "MOVED" + if ask { + errorType = "ASK" + statusCode = "ASK" + } + // MOVED/ASK are not internal errors, and this is the first attempt (retry count = 0) + errorCallback(ctx, errorType, nil, statusCode, false, 0) + } + var err error node, err = c.nodes.GetOrCreate(addr) if err != nil { @@ -1183,7 +1505,7 @@ func (c *ClusterClient) loadState(ctx context.Context) (*clusterState, error) { continue } - return newClusterState(c.nodes, slots, node.Client.opt.Addr) + return newClusterState(c.nodes, slots, addr) } /* @@ -1212,17 +1534,35 @@ func (c *ClusterClient) Pipelined(ctx context.Context, fn func(Pipeliner) error) } func (c *ClusterClient) processPipeline(ctx context.Context, cmds []Cmder) error { + // Only call time.Now() if pipeline operation duration callback is set to avoid overhead + var operationStart time.Time + pipelineOpDurationCallback := otel.GetPipelineOperationDurationCallback() + if pipelineOpDurationCallback != nil { + operationStart = time.Now() + } + totalAttempts := 0 + cmdsMap := newCmdsMap() if err := c.mapCmdsByNode(ctx, cmdsMap, cmds); err != nil { setCmdsErr(cmds, err) + if pipelineOpDurationCallback != nil { + operationDuration := time.Since(operationStart) + pipelineOpDurationCallback(ctx, operationDuration, "PIPELINE", len(cmds), 1, err, nil, 0) + } return err } + var lastErr error for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { + totalAttempts++ if attempt > 0 { if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { setCmdsErr(cmds, err) + if pipelineOpDurationCallback != nil { + operationDuration := time.Since(operationStart) + pipelineOpDurationCallback(ctx, operationDuration, "PIPELINE", len(cmds), totalAttempts, err, nil, 0) + } return err } } @@ -1243,6 +1583,17 @@ func (c *ClusterClient) processPipeline(ctx context.Context, cmds []Cmder) error break } cmdsMap = failedCmds + lastErr = cmdsFirstErr(cmds) + } + + // Record pipeline operation duration + if pipelineOpDurationCallback != nil { + operationDuration := time.Since(operationStart) + finalErr := cmdsFirstErr(cmds) + if finalErr == nil { + finalErr = lastErr + } + pipelineOpDurationCallback(ctx, operationDuration, "PIPELINE", len(cmds), totalAttempts, finalErr, nil, 0) } return cmdsFirstErr(cmds) @@ -1256,10 +1607,31 @@ func (c *ClusterClient) mapCmdsByNode(ctx context.Context, cmdsMap *cmdsMap, cmd if c.opt.ReadOnly && c.cmdsAreReadOnly(ctx, cmds) { for _, cmd := range cmds { - slot := c.cmdSlot(ctx, cmd) - node, err := c.slotReadOnlyNode(state, slot) - if err != nil { - return err + var policy *routing.CommandPolicy + if c.cmdInfoResolver != nil { + policy = c.cmdInfoResolver.GetCommandPolicy(ctx, cmd) + } + if policy != nil && !policy.CanBeUsedInPipeline() { + return fmt.Errorf( + "redis: cannot pipeline command %q with request policy ReqAllNodes/ReqAllShards/ReqMultiShard; Note: This behavior is subject to change in the future", cmd.Name(), + ) + } + slot := c.cmdSlot(cmd, -1) + var node *clusterNode + // For keyless commands (slot == -1), use ShardPicker if routing policies are enabled + if slot == -1 && !c.opt.DisableRoutingPolicies && c.opt.ShardPicker != nil { + if len(state.Masters) == 0 { + return errClusterNoNodes + } + // For read-only keyless commands, pick from all nodes (masters + slaves) + allNodes := append(state.Masters, state.Slaves...) + idx := c.opt.ShardPicker.Next(len(allNodes)) + node = allNodes[idx] + } else { + node, err = c.slotReadOnlyNode(state, slot) + if err != nil { + return err + } } cmdsMap.Add(node, cmd) } @@ -1267,10 +1639,29 @@ func (c *ClusterClient) mapCmdsByNode(ctx context.Context, cmdsMap *cmdsMap, cmd } for _, cmd := range cmds { - slot := c.cmdSlot(ctx, cmd) - node, err := state.slotMasterNode(slot) - if err != nil { - return err + var policy *routing.CommandPolicy + if c.cmdInfoResolver != nil { + policy = c.cmdInfoResolver.GetCommandPolicy(ctx, cmd) + } + if policy != nil && !policy.CanBeUsedInPipeline() { + return fmt.Errorf( + "redis: cannot pipeline command %q with request policy ReqAllNodes/ReqAllShards/ReqMultiShard; Note: This behavior is subject to change in the future", cmd.Name(), + ) + } + slot := c.cmdSlot(cmd, -1) + var node *clusterNode + // For keyless commands (slot == -1), use ShardPicker if routing policies are enabled + if slot == -1 && !c.opt.DisableRoutingPolicies && c.opt.ShardPicker != nil { + if len(state.Masters) == 0 { + return errClusterNoNodes + } + idx := c.opt.ShardPicker.Next(len(state.Masters)) + node = state.Masters[idx] + } else { + node, err = state.slotMasterNode(slot) + if err != nil { + return err + } } cmdsMap.Add(node, cmd) } @@ -1293,6 +1684,9 @@ func (c *ClusterClient) processPipelineNode( _ = node.Client.withProcessPipelineHook(ctx, cmds, func(ctx context.Context, cmds []Cmder) error { cn, err := node.Client.getConn(ctx) if err != nil { + if !isContextError(err) { + node.MarkAsFailing() + } _ = c.mapCmdsByNode(ctx, failedCmds, cmds) setCmdsErr(cmds, err) return err @@ -1314,6 +1708,9 @@ func (c *ClusterClient) processPipelineNodeConn( if err := cn.WithWriter(c.context(ctx), c.opt.WriteTimeout, func(wr *proto.Writer) error { return writeCmds(wr, cmds) }); err != nil { + if isBadConn(err, false, node.Client.getAddr()) { + node.MarkAsFailing() + } if shouldRetry(err, true) { _ = c.mapCmdsByNode(ctx, failedCmds, cmds) } @@ -1345,7 +1742,7 @@ func (c *ClusterClient) pipelineReadCmds( continue } - if c.opt.ReadOnly { + if c.opt.ReadOnly && isBadConn(err, false, node.Client.getAddr()) { node.MarkAsFailing() } @@ -1410,61 +1807,127 @@ func (c *ClusterClient) TxPipelined(ctx context.Context, fn func(Pipeliner) erro } func (c *ClusterClient) processTxPipeline(ctx context.Context, cmds []Cmder) error { + // Only call time.Now() if pipeline operation duration callback is set to avoid overhead + var operationStart time.Time + pipelineOpDurationCallback := otel.GetPipelineOperationDurationCallback() + if pipelineOpDurationCallback != nil { + operationStart = time.Now() + } + totalAttempts := 0 + // Trim multi .. exec. cmds = cmds[1 : len(cmds)-1] + if len(cmds) == 0 { + return nil + } + state, err := c.state.Get(ctx) if err != nil { setCmdsErr(cmds, err) + if pipelineOpDurationCallback != nil { + operationDuration := time.Since(operationStart) + pipelineOpDurationCallback(ctx, operationDuration, "MULTI", len(cmds), 1, err, nil, 0) + } return err } - cmdsMap := c.mapCmdsBySlot(ctx, cmds) - for slot, cmds := range cmdsMap { - node, err := state.slotMasterNode(slot) - if err != nil { - setCmdsErr(cmds, err) - continue + keyedCmdsBySlot := c.slottedKeyedCommands(ctx, cmds) + slot := -1 + switch len(keyedCmdsBySlot) { + case 0: + slot = hashtag.RandomSlot() + case 1: + for sl := range keyedCmdsBySlot { + slot = sl + break } + default: + // TxPipeline does not support cross slot transaction. + setCmdsErr(cmds, ErrCrossSlot) + if pipelineOpDurationCallback != nil { + operationDuration := time.Since(operationStart) + pipelineOpDurationCallback(ctx, operationDuration, "MULTI", len(cmds), 1, ErrCrossSlot, nil, 0) + } + return ErrCrossSlot + } - cmdsMap := map[*clusterNode][]Cmder{node: cmds} - for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { - if attempt > 0 { - if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { - setCmdsErr(cmds, err) - return err + node, err := state.slotMasterNode(slot) + if err != nil { + setCmdsErr(cmds, err) + if pipelineOpDurationCallback != nil { + operationDuration := time.Since(operationStart) + pipelineOpDurationCallback(ctx, operationDuration, "MULTI", len(cmds), 1, err, nil, 0) + } + return err + } + + var lastErr error + cmdsMap := map[*clusterNode][]Cmder{node: cmds} + for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { + totalAttempts++ + if attempt > 0 { + if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { + setCmdsErr(cmds, err) + if pipelineOpDurationCallback != nil { + operationDuration := time.Since(operationStart) + pipelineOpDurationCallback(ctx, operationDuration, "MULTI", len(cmds), totalAttempts, err, nil, 0) } + return err } - - failedCmds := newCmdsMap() - var wg sync.WaitGroup - - for node, cmds := range cmdsMap { - wg.Add(1) - go func(node *clusterNode, cmds []Cmder) { - defer wg.Done() - c.processTxPipelineNode(ctx, node, cmds, failedCmds) - }(node, cmds) - } - - wg.Wait() - if len(failedCmds.m) == 0 { - break - } - cmdsMap = failedCmds.m } + + failedCmds := newCmdsMap() + var wg sync.WaitGroup + + for node, cmds := range cmdsMap { + wg.Add(1) + go func(node *clusterNode, cmds []Cmder) { + defer wg.Done() + c.processTxPipelineNode(ctx, node, cmds, failedCmds) + }(node, cmds) + } + + wg.Wait() + if len(failedCmds.m) == 0 { + break + } + cmdsMap = failedCmds.m + lastErr = cmdsFirstErr(cmds) + } + + if pipelineOpDurationCallback != nil { + operationDuration := time.Since(operationStart) + finalErr := cmdsFirstErr(cmds) + if finalErr == nil { + finalErr = lastErr + } + pipelineOpDurationCallback(ctx, operationDuration, "MULTI", len(cmds), totalAttempts, finalErr, nil, 0) } return cmdsFirstErr(cmds) } -func (c *ClusterClient) mapCmdsBySlot(ctx context.Context, cmds []Cmder) map[int][]Cmder { - cmdsMap := make(map[int][]Cmder) +// slottedKeyedCommands returns a map of slot to commands taking into account +// only commands that have keys. +func (c *ClusterClient) slottedKeyedCommands(ctx context.Context, cmds []Cmder) map[int][]Cmder { + cmdsSlots := map[int][]Cmder{} + + prefferedRandomSlot := -1 for _, cmd := range cmds { - slot := c.cmdSlot(ctx, cmd) - cmdsMap[slot] = append(cmdsMap[slot], cmd) + if cmdFirstKeyPos(cmd) == 0 { + continue + } + + slot := c.cmdSlot(cmd, prefferedRandomSlot) + if prefferedRandomSlot == -1 { + prefferedRandomSlot = slot + } + + cmdsSlots[slot] = append(cmdsSlots[slot], cmd) } - return cmdsMap + + return cmdsSlots } func (c *ClusterClient) processTxPipelineNode( @@ -1508,7 +1971,7 @@ func (c *ClusterClient) processTxPipelineNodeConn( trimmedCmds := cmds[1 : len(cmds)-1] if err := c.txPipelineReadQueued( - ctx, rd, statusCmd, trimmedCmds, failedCmds, + ctx, node, cn, rd, statusCmd, trimmedCmds, failedCmds, ); err != nil { setCmdsErr(cmds, err) @@ -1520,30 +1983,56 @@ func (c *ClusterClient) processTxPipelineNodeConn( return err } - return pipelineReadCmds(rd, trimmedCmds) + return node.Client.pipelineReadCmds(ctx, cn, rd, trimmedCmds) }) } func (c *ClusterClient) txPipelineReadQueued( ctx context.Context, + node *clusterNode, + cn *pool.Conn, rd *proto.Reader, statusCmd *StatusCmd, cmds []Cmder, failedCmds *cmdsMap, ) error { // Parse queued replies. + // To be sure there are no buffered push notifications, we process them before reading the reply + if err := node.Client.processPendingPushNotificationWithReader(ctx, cn, rd); err != nil { + // Log the error but don't fail the command execution + // Push notification processing errors shouldn't break normal Redis operations + internal.Logger.Printf(ctx, "push: error processing pending notifications before reading reply: %v", err) + } if err := statusCmd.readReply(rd); err != nil { return err } for _, cmd := range cmds { - err := statusCmd.readReply(rd) - if err == nil || c.checkMovedErr(ctx, cmd, err, failedCmds) || isRedisError(err) { - continue + // To be sure there are no buffered push notifications, we process them before reading the reply + if err := node.Client.processPendingPushNotificationWithReader(ctx, cn, rd); err != nil { + // Log the error but don't fail the command execution + // Push notification processing errors shouldn't break normal Redis operations + internal.Logger.Printf(ctx, "push: error processing pending notifications before reading reply: %v", err) + } + err := statusCmd.readReply(rd) + if err != nil { + if c.checkMovedErr(ctx, cmd, err, failedCmds) { + // will be processed later + continue + } + cmd.SetErr(err) + if !isRedisError(err) { + return err + } } - return err } + // To be sure there are no buffered push notifications, we process them before reading the reply + if err := node.Client.processPendingPushNotificationWithReader(ctx, cn, rd); err != nil { + // Log the error but don't fail the command execution + // Push notification processing errors shouldn't break normal Redis operations + internal.Logger.Printf(ctx, "push: error processing pending notifications before reading reply: %v", err) + } // Parse number of replies. line, err := rd.ReadLine() if err != nil { @@ -1591,14 +2080,13 @@ func (c *ClusterClient) cmdsMoved( func (c *ClusterClient) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error { if len(keys) == 0 { - return fmt.Errorf("redis: Watch requires at least one key") + return errNoWatchKeys } slot := hashtag.Slot(keys[0]) for _, key := range keys[1:] { if hashtag.Slot(key) != slot { - err := fmt.Errorf("redis: Watch requires all keys to be in the same slot") - return err + return errWatchCrosslot } } @@ -1649,38 +2137,64 @@ func (c *ClusterClient) Watch(ctx context.Context, fn func(*Tx) error, keys ...s return err } +// maintenance notifications won't work here for now func (c *ClusterClient) pubSub() *PubSub { var node *clusterNode pubsub := &PubSub{ opt: c.opt.clientOptions(), - - newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) { + newConn: func(ctx context.Context, addr string, channels []string) (*pool.Conn, error) { if node != nil { panic("node != nil") } var err error + if len(channels) > 0 { slot := hashtag.Slot(channels[0]) - node, err = c.slotMasterNode(ctx, slot) + + // newConn in PubSub is only used for subscription connections, so it is safe to + // assume that a slave node can always be used when client options specify ReadOnly. + if c.opt.ReadOnly { + state, err := c.state.Get(ctx) + if err != nil { + return nil, err + } + + node, err = c.slotReadOnlyNode(state, slot) + if err != nil { + return nil, err + } + } else { + node, err = c.slotMasterNode(ctx, slot) + if err != nil { + return nil, err + } + } } else { node, err = c.nodes.Random() + if err != nil { + return nil, err + } } - if err != nil { - return nil, err - } - - cn, err := node.Client.newConn(context.TODO()) + cn, err := node.Client.pubSubPool.NewConn(ctx, node.Client.opt.Network, node.Client.opt.Addr, channels) if err != nil { node = nil - return nil, err } - + // will return nil if already initialized + err = node.Client.initConn(ctx, cn) + if err != nil { + _ = cn.Close() + node = nil + return nil, err + } + node.Client.pubSubPool.TrackConn(cn) return cn, nil }, closeConn: func(cn *pool.Conn) error { - err := node.Client.connPool.CloseConn(cn) + // Untrack connection from PubSubPool + node.Client.pubSubPool.UntrackConn(cn) + err := cn.Close() node = nil return err }, @@ -1741,7 +2255,6 @@ func (c *ClusterClient) cmdsInfo(ctx context.Context) (map[string]*CommandInfo, for _, idx := range perm { addr := addrs[idx] - node, err := c.nodes.GetOrCreate(addr) if err != nil { if firstErr == nil { @@ -1754,6 +2267,7 @@ func (c *ClusterClient) cmdsInfo(ctx context.Context) (map[string]*CommandInfo, if err == nil { return info, nil } + if firstErr == nil { firstErr = err } @@ -1765,32 +2279,48 @@ func (c *ClusterClient) cmdsInfo(ctx context.Context) (map[string]*CommandInfo, return nil, firstErr } +// cmdInfo will fetch and cache the command policies after the first execution func (c *ClusterClient) cmdInfo(ctx context.Context, name string) *CommandInfo { - cmdsInfo, err := c.cmdsInfoCache.Get(ctx) + // Use a separate context that won't be canceled to ensure command info lookup + // doesn't fail due to original context cancellation + cmdInfoCtx := c.context(ctx) + if c.opt.ContextTimeoutEnabled && ctx != nil { + // If context timeout is enabled, still use a reasonable timeout + var cancel context.CancelFunc + cmdInfoCtx, cancel = context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + } + + cmdsInfo, err := c.cmdsInfoCache.Get(cmdInfoCtx) if err != nil { - internal.Logger.Printf(context.TODO(), "getting command info: %s", err) + internal.Logger.Printf(cmdInfoCtx, "getting command info: %s", err) return nil } info := cmdsInfo[name] if info == nil { - internal.Logger.Printf(context.TODO(), "info for cmd=%s not found", name) + internal.Logger.Printf(cmdInfoCtx, "info for cmd=%s not found", name) } + return info } -func (c *ClusterClient) cmdSlot(ctx context.Context, cmd Cmder) int { +func (c *ClusterClient) cmdSlot(cmd Cmder, prefferedSlot int) int { args := cmd.Args() - if args[0] == "cluster" && args[1] == "getkeysinslot" { + if args[0] == "cluster" && (args[1] == "getkeysinslot" || args[1] == "countkeysinslot") { return args[2].(int) } - return cmdSlot(cmd, cmdFirstKeyPos(cmd)) + return cmdSlot(cmd, cmdFirstKeyPos(cmd), prefferedSlot) } -func cmdSlot(cmd Cmder, pos int) int { +func cmdSlot(cmd Cmder, pos int, prefferedRandomSlot int) int { if pos == 0 { - return hashtag.RandomSlot() + if prefferedRandomSlot != -1 { + return prefferedRandomSlot + } + // Return -1 for keyless commands to signal that ShardPicker should be used + return -1 } firstKey := cmd.stringArg(pos) return hashtag.Slot(firstKey) @@ -1815,6 +2345,36 @@ func (c *ClusterClient) cmdNode( return state.slotMasterNode(slot) } +func (c *ClusterClient) cmdNodeWithShardPicker( + ctx context.Context, + cmdName string, + slot int, + shardPicker routing.ShardPicker, +) (*clusterNode, error) { + state, err := c.state.Get(ctx) + if err != nil { + return nil, err + } + + // For keyless commands (slot == -1), use ShardPicker to select a shard + // This respects the user's configured ShardPicker policy + if slot == -1 { + if len(state.Masters) == 0 { + return nil, errClusterNoNodes + } + idx := shardPicker.Next(len(state.Masters)) + return state.Masters[idx], nil + } + + if c.opt.ReadOnly { + cmdInfo := c.cmdInfo(ctx, cmdName) + if cmdInfo != nil && cmdInfo.ReadOnly { + return c.slotReadOnlyNode(state, slot) + } + } + return state.slotMasterNode(slot) +} + func (c *ClusterClient) slotReadOnlyNode(state *clusterState, slot int) (*clusterNode, error) { if c.opt.RouteByLatency { return state.slotClosestNode(slot) @@ -1822,6 +2382,11 @@ func (c *ClusterClient) slotReadOnlyNode(state *clusterState, slot int) (*cluste if c.opt.RouteRandomly { return state.slotRandomNode(slot) } + + if c.opt.ShardPicker != nil { + return state.slotShardPickerSlaveNode(slot, c.opt.ShardPicker) + } + return state.slotSlaveNode(slot) } @@ -1859,7 +2424,7 @@ func (c *ClusterClient) MasterForKey(ctx context.Context, key string) (*Client, if err != nil { return nil, err } - return node.Client, err + return node.Client, nil } func (c *ClusterClient) context(ctx context.Context) context.Context { @@ -1869,26 +2434,38 @@ func (c *ClusterClient) context(ctx context.Context) context.Context { return context.Background() } -func appendUniqueNode(nodes []*clusterNode, node *clusterNode) []*clusterNode { - for _, n := range nodes { - if n == node { - return nodes - } - } - return append(nodes, node) +func (c *ClusterClient) GetResolver() *commandInfoResolver { + return c.cmdInfoResolver } -func appendIfNotExists(ss []string, es ...string) []string { -loop: - for _, e := range es { - for _, s := range ss { - if s == e { - continue loop - } - } - ss = append(ss, e) +func (c *ClusterClient) SetCommandInfoResolver(cmdInfoResolver *commandInfoResolver) { + c.cmdInfoResolver = cmdInfoResolver +} + +// extractCommandInfo retrieves the routing policy for a command +func (c *ClusterClient) extractCommandInfo(ctx context.Context, cmd Cmder) *routing.CommandPolicy { + if cmdInfo := c.cmdInfo(ctx, cmd.Name()); cmdInfo != nil && cmdInfo.CommandPolicy != nil { + return cmdInfo.CommandPolicy } - return ss + + return nil +} + +// NewDynamicResolver returns a CommandInfoResolver +// that uses the underlying cmdInfo cache to resolve the policies +func (c *ClusterClient) NewDynamicResolver() *commandInfoResolver { + return &commandInfoResolver{ + resolveFunc: c.extractCommandInfo, + } +} + +func appendIfNotExist[T comparable](vals []T, newVal T) []T { + for _, v := range vals { + if v == newVal { + return vals + } + } + return append(vals, newVal) } //------------------------------------------------------------------------------ diff --git a/vendor/github.com/redis/go-redis/v9/osscluster_router.go b/vendor/github.com/redis/go-redis/v9/osscluster_router.go new file mode 100644 index 00000000..3b001fef --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/osscluster_router.go @@ -0,0 +1,992 @@ +package redis + +import ( + "context" + "errors" + "fmt" + "reflect" + "sync" + "time" + + "github.com/redis/go-redis/v9/internal/hashtag" + "github.com/redis/go-redis/v9/internal/routing" +) + +var ( + errInvalidCmdPointer = errors.New("redis: invalid command pointer") + errNoCmdsToAggregate = errors.New("redis: no commands to aggregate") + errNoResToAggregate = errors.New("redis: no results to aggregate") + errInvalidCursorCmdArgsCount = errors.New("redis: FT.CURSOR command requires at least 3 arguments") + errInvalidCursorIdType = errors.New("redis: invalid cursor ID type") +) + +// slotResult represents the result of executing a command on a specific slot +type slotResult struct { + cmd Cmder + keys []string + err error +} + +// routeAndRun routes a command to the appropriate cluster nodes and executes it +func (c *ClusterClient) routeAndRun(ctx context.Context, cmd Cmder, node *clusterNode) error { + var policy *routing.CommandPolicy + if c.cmdInfoResolver != nil { + policy = c.cmdInfoResolver.GetCommandPolicy(ctx, cmd) + } + + // Set stepCount from cmdInfo if not already set + if cmd.stepCount() == 0 { + if cmdInfo := c.cmdInfo(ctx, cmd.Name()); cmdInfo != nil && cmdInfo.StepCount > 0 { + cmd.SetStepCount(cmdInfo.StepCount) + } + } + + if policy == nil { + return c.executeDefault(ctx, cmd, policy, node) + } + switch policy.Request { + case routing.ReqAllNodes: + return c.executeOnAllNodes(ctx, cmd, policy) + case routing.ReqAllShards: + return c.executeOnAllShards(ctx, cmd, policy) + case routing.ReqMultiShard: + return c.executeMultiShard(ctx, cmd, policy) + case routing.ReqSpecial: + return c.executeSpecialCommand(ctx, cmd, policy, node) + default: + return c.executeDefault(ctx, cmd, policy, node) + } +} + +// executeDefault handles standard command routing based on keys +func (c *ClusterClient) executeDefault(ctx context.Context, cmd Cmder, policy *routing.CommandPolicy, node *clusterNode) error { + if policy != nil && !c.hasKeys(cmd) { + if c.readOnlyEnabled() && policy.IsReadOnly() { + return c.executeOnArbitraryNode(ctx, cmd) + } + } + + return node.Client.Process(ctx, cmd) +} + +// executeOnArbitraryNode routes command to an arbitrary node +func (c *ClusterClient) executeOnArbitraryNode(ctx context.Context, cmd Cmder) error { + node := c.pickArbitraryNode(ctx) + if node == nil { + return errClusterNoNodes + } + return node.Client.Process(ctx, cmd) +} + +// executeOnAllNodes executes command on all nodes (masters and replicas) +func (c *ClusterClient) executeOnAllNodes(ctx context.Context, cmd Cmder, policy *routing.CommandPolicy) error { + state, err := c.state.Get(ctx) + if err != nil { + return err + } + + nodes := append(state.Masters, state.Slaves...) + if len(nodes) == 0 { + return errClusterNoNodes + } + + return c.executeParallel(ctx, cmd, nodes, policy) +} + +// executeOnAllShards executes command on all master shards +func (c *ClusterClient) executeOnAllShards(ctx context.Context, cmd Cmder, policy *routing.CommandPolicy) error { + state, err := c.state.Get(ctx) + if err != nil { + return err + } + + if len(state.Masters) == 0 { + return errClusterNoNodes + } + + return c.executeParallel(ctx, cmd, state.Masters, policy) +} + +// executeMultiShard handles commands that operate on multiple keys across shards +func (c *ClusterClient) executeMultiShard(ctx context.Context, cmd Cmder, policy *routing.CommandPolicy) error { + args := cmd.Args() + firstKeyPos := int(cmdFirstKeyPos(cmd)) + stepCount := int(cmd.stepCount()) + if stepCount == 0 { + stepCount = 1 // Default to 1 if not set + } + + if firstKeyPos == 0 || firstKeyPos >= len(args) { + return fmt.Errorf("redis: multi-shard command %s has no key arguments", cmd.Name()) + } + + // Group keys by slot + slotMap := make(map[int][]string) + keyOrder := make([]string, 0) + + for i := firstKeyPos; i < len(args); i += stepCount { + key, ok := args[i].(string) + if !ok { + return fmt.Errorf("redis: non-string key at position %d: %v", i, args[i]) + } + + slot := hashtag.Slot(key) + slotMap[slot] = append(slotMap[slot], key) + for j := 1; j < stepCount; j++ { + if i+j >= len(args) { + break + } + slotMap[slot] = append(slotMap[slot], args[i+j].(string)) + } + keyOrder = append(keyOrder, key) + } + + return c.executeMultiSlot(ctx, cmd, slotMap, keyOrder, policy) +} + +// executeMultiSlot executes commands across multiple slots concurrently +func (c *ClusterClient) executeMultiSlot(ctx context.Context, cmd Cmder, slotMap map[int][]string, keyOrder []string, policy *routing.CommandPolicy) error { + results := make(chan slotResult, len(slotMap)) + var wg sync.WaitGroup + + // Execute on each slot concurrently + for slot, keys := range slotMap { + wg.Add(1) + go func(slot int, keys []string) { + defer wg.Done() + + node, err := c.cmdNodeWithShardPicker(ctx, cmd.Name(), slot, c.opt.ShardPicker) + if err != nil { + results <- slotResult{nil, keys, err} + return + } + + // Create a command for this specific slot's keys + subCmd := c.createSlotSpecificCommand(ctx, cmd, keys) + err = node.Client.Process(ctx, subCmd) + results <- slotResult{subCmd, keys, err} + }(slot, keys) + } + + go func() { + wg.Wait() + close(results) + }() + + return c.aggregateMultiSlotResults(ctx, cmd, results, keyOrder, policy) +} + +// createSlotSpecificCommand creates a new command for a specific slot's keys +func (c *ClusterClient) createSlotSpecificCommand(ctx context.Context, originalCmd Cmder, keys []string) Cmder { + originalArgs := originalCmd.Args() + firstKeyPos := int(cmdFirstKeyPos(originalCmd)) + + // Build new args with only the specified keys + newArgs := make([]interface{}, 0, firstKeyPos+len(keys)) + + // Copy command name and arguments before the keys + newArgs = append(newArgs, originalArgs[:firstKeyPos]...) + + // Add the slot-specific keys + for _, key := range keys { + newArgs = append(newArgs, key) + } + + // Create a new command of the same type using the helper function + return createCommandByType(ctx, originalCmd.GetCmdType(), newArgs...) +} + +// createCommandByType creates a new command of the specified type with the given arguments +func createCommandByType(ctx context.Context, cmdType CmdType, args ...interface{}) Cmder { + switch cmdType { + case CmdTypeString: + return NewStringCmd(ctx, args...) + case CmdTypeInt: + return NewIntCmd(ctx, args...) + case CmdTypeBool: + return NewBoolCmd(ctx, args...) + case CmdTypeFloat: + return NewFloatCmd(ctx, args...) + case CmdTypeStringSlice: + return NewStringSliceCmd(ctx, args...) + case CmdTypeIntSlice: + return NewIntSliceCmd(ctx, args...) + case CmdTypeFloatSlice: + return NewFloatSliceCmd(ctx, args...) + case CmdTypeBoolSlice: + return NewBoolSliceCmd(ctx, args...) + case CmdTypeStatus: + return NewStatusCmd(ctx, args...) + case CmdTypeTime: + return NewTimeCmd(ctx, args...) + case CmdTypeMapStringString: + return NewMapStringStringCmd(ctx, args...) + case CmdTypeMapStringInt: + return NewMapStringIntCmd(ctx, args...) + case CmdTypeMapStringInterface: + return NewMapStringInterfaceCmd(ctx, args...) + case CmdTypeMapStringInterfaceSlice: + return NewMapStringInterfaceSliceCmd(ctx, args...) + case CmdTypeSlice: + return NewSliceCmd(ctx, args...) + case CmdTypeStringStructMap: + return NewStringStructMapCmd(ctx, args...) + case CmdTypeXMessageSlice: + return NewXMessageSliceCmd(ctx, args...) + case CmdTypeXStreamSlice: + return NewXStreamSliceCmd(ctx, args...) + case CmdTypeXPending: + return NewXPendingCmd(ctx, args...) + case CmdTypeXPendingExt: + return NewXPendingExtCmd(ctx, args...) + case CmdTypeXAutoClaim: + return NewXAutoClaimCmd(ctx, args...) + case CmdTypeXAutoClaimJustID: + return NewXAutoClaimJustIDCmd(ctx, args...) + case CmdTypeXInfoStreamFull: + return NewXInfoStreamFullCmd(ctx, args...) + case CmdTypeZSlice: + return NewZSliceCmd(ctx, args...) + case CmdTypeZWithKey: + return NewZWithKeyCmd(ctx, args...) + case CmdTypeClusterSlots: + return NewClusterSlotsCmd(ctx, args...) + case CmdTypeGeoPos: + return NewGeoPosCmd(ctx, args...) + case CmdTypeCommandsInfo: + return NewCommandsInfoCmd(ctx, args...) + case CmdTypeSlowLog: + return NewSlowLogCmd(ctx, args...) + case CmdTypeKeyValues: + return NewKeyValuesCmd(ctx, args...) + case CmdTypeZSliceWithKey: + return NewZSliceWithKeyCmd(ctx, args...) + case CmdTypeFunctionList: + return NewFunctionListCmd(ctx, args...) + case CmdTypeFunctionStats: + return NewFunctionStatsCmd(ctx, args...) + case CmdTypeKeyFlags: + return NewKeyFlagsCmd(ctx, args...) + case CmdTypeDuration: + return NewDurationCmd(ctx, time.Millisecond, args...) + } + return NewCmd(ctx, args...) +} + +// executeSpecialCommand handles commands with special routing requirements +func (c *ClusterClient) executeSpecialCommand(ctx context.Context, cmd Cmder, policy *routing.CommandPolicy, node *clusterNode) error { + switch cmd.Name() { + case "ft.cursor": + return c.executeCursorCommand(ctx, cmd) + default: + return c.executeDefault(ctx, cmd, policy, node) + } +} + +// executeCursorCommand handles FT.CURSOR commands with sticky routing +func (c *ClusterClient) executeCursorCommand(ctx context.Context, cmd Cmder) error { + args := cmd.Args() + if len(args) < 4 { + return errInvalidCursorCmdArgsCount + } + + cursorID, ok := args[3].(string) + if !ok { + return errInvalidCursorIdType + } + + // Route based on cursor ID to maintain stickiness + slot := hashtag.Slot(cursorID) + node, err := c.cmdNodeWithShardPicker(ctx, cmd.Name(), slot, c.opt.ShardPicker) + if err != nil { + return err + } + + return node.Client.Process(ctx, cmd) +} + +// executeParallel executes a command on multiple nodes concurrently +func (c *ClusterClient) executeParallel(ctx context.Context, cmd Cmder, nodes []*clusterNode, policy *routing.CommandPolicy) error { + if len(nodes) == 0 { + return errClusterNoNodes + } + + if len(nodes) == 1 { + return nodes[0].Client.Process(ctx, cmd) + } + + type nodeResult struct { + cmd Cmder + err error + } + + results := make(chan nodeResult, len(nodes)) + var wg sync.WaitGroup + + for _, node := range nodes { + wg.Add(1) + go func(n *clusterNode) { + defer wg.Done() + cmdCopy := cmd.Clone() + err := n.Client.Process(ctx, cmdCopy) + results <- nodeResult{cmdCopy, err} + }(node) + } + + go func() { + wg.Wait() + close(results) + }() + + // Collect results and check for errors + cmds := make([]Cmder, 0, len(nodes)) + var firstErr error + + for result := range results { + if result.err != nil && firstErr == nil { + firstErr = result.err + } + cmds = append(cmds, result.cmd) + } + + // If there was an error and no policy specified, fail fast + if firstErr != nil && (policy == nil || policy.Response == routing.RespDefaultKeyless) { + cmd.SetErr(firstErr) + return firstErr + } + + return c.aggregateResponses(cmd, cmds, policy) +} + +// aggregateMultiSlotResults aggregates results from multi-slot execution +func (c *ClusterClient) aggregateMultiSlotResults(ctx context.Context, cmd Cmder, results <-chan slotResult, keyOrder []string, policy *routing.CommandPolicy) error { + keyedResults := make(map[string]routing.AggregatorResErr) + var firstErr error + + for result := range results { + if result.err != nil && firstErr == nil { + firstErr = result.err + } + if result.cmd != nil && result.err == nil { + value, err := ExtractCommandValue(result.cmd) + + // Check if the result is a slice (e.g., from MGET) + if sliceValue, ok := value.([]interface{}); ok { + // Map each element to its corresponding key + for i, key := range result.keys { + if i < len(sliceValue) { + keyedResults[key] = routing.AggregatorResErr{Result: sliceValue[i], Err: err} + } else { + keyedResults[key] = routing.AggregatorResErr{Result: nil, Err: err} + } + } + } else { + // For non-slice results, map the entire result to each key + for _, key := range result.keys { + keyedResults[key] = routing.AggregatorResErr{Result: value, Err: err} + } + } + } + + // TODO: return multiple errors by order when we will implement multiple errors returning + if result.err != nil { + firstErr = result.err + } + } + + return c.aggregateKeyedValues(cmd, keyedResults, keyOrder, policy) +} + +// aggregateKeyedValues aggregates individual key-value pairs while preserving key order +func (c *ClusterClient) aggregateKeyedValues(cmd Cmder, keyedResults map[string]routing.AggregatorResErr, keyOrder []string, policy *routing.CommandPolicy) error { + if len(keyedResults) == 0 { + return errNoResToAggregate + } + + aggregator := c.createAggregator(policy, cmd, true) + + // Set key order for keyed aggregators + var keyedAgg *routing.DefaultKeyedAggregator + var isKeyedAgg bool + var err error + if keyedAgg, isKeyedAgg = aggregator.(*routing.DefaultKeyedAggregator); isKeyedAgg { + err = keyedAgg.BatchAddWithKeyOrder(keyedResults, keyOrder) + } else { + err = aggregator.BatchAdd(keyedResults) + } + + if err != nil { + return err + } + + return c.finishAggregation(cmd, aggregator) +} + +// aggregateResponses aggregates multiple shard responses +func (c *ClusterClient) aggregateResponses(cmd Cmder, cmds []Cmder, policy *routing.CommandPolicy) error { + if len(cmds) == 0 { + return errNoCmdsToAggregate + } + + if len(cmds) == 1 { + shardCmd := cmds[0] + if err := shardCmd.Err(); err != nil { + cmd.SetErr(err) + return err + } + value, _ := ExtractCommandValue(shardCmd) + return c.setCommandValue(cmd, value) + } + + aggregator := c.createAggregator(policy, cmd, false) + + batchWithErrs := []routing.AggregatorResErr{} + // Add all results to aggregator + for _, shardCmd := range cmds { + value, err := ExtractCommandValue(shardCmd) + batchWithErrs = append(batchWithErrs, routing.AggregatorResErr{ + Result: value, + Err: err, + }) + } + + err := aggregator.BatchSlice(batchWithErrs) + if err != nil { + return err + } + + return c.finishAggregation(cmd, aggregator) +} + +// createAggregator creates the appropriate response aggregator +func (c *ClusterClient) createAggregator(policy *routing.CommandPolicy, cmd Cmder, isKeyed bool) routing.ResponseAggregator { + if policy != nil { + return routing.NewResponseAggregator(policy.Response, cmd.Name()) + } + + if !isKeyed { + firstKeyPos := cmdFirstKeyPos(cmd) + isKeyed = firstKeyPos > 0 + } + + return routing.NewDefaultAggregator(isKeyed) +} + +// finishAggregation completes the aggregation process and sets the result +func (c *ClusterClient) finishAggregation(cmd Cmder, aggregator routing.ResponseAggregator) error { + finalValue, finalErr := aggregator.Result() + if finalErr != nil { + cmd.SetErr(finalErr) + return finalErr + } + + return c.setCommandValue(cmd, finalValue) +} + +// pickArbitraryNode selects a master or slave shard using the configured ShardPicker +func (c *ClusterClient) pickArbitraryNode(ctx context.Context) *clusterNode { + state, err := c.state.Get(ctx) + if err != nil || len(state.Masters) == 0 { + return nil + } + + allNodes := append(state.Masters, state.Slaves...) + + idx := c.opt.ShardPicker.Next(len(allNodes)) + return allNodes[idx] +} + +// hasKeys checks if a command operates on keys +func (c *ClusterClient) hasKeys(cmd Cmder) bool { + firstKeyPos := cmdFirstKeyPos(cmd) + return firstKeyPos > 0 +} + +func (c *ClusterClient) readOnlyEnabled() bool { + return c.opt.ReadOnly +} + +// setCommandValue sets the aggregated value on a command using the enum-based approach +func (c *ClusterClient) setCommandValue(cmd Cmder, value interface{}) error { + // If value is nil, it might mean ExtractCommandValue couldn't extract the value + // but the command might have executed successfully. In this case, don't set an error. + if value == nil { + // ExtractCommandValue returned nil - this means the command type is not supported + // in the aggregation flow. This is a programming error, not a runtime error. + if cmd.Err() != nil { + // Command already has an error, preserve it + return cmd.Err() + } + // Command executed successfully but we can't extract/set the aggregated value + // This indicates the command type needs to be added to ExtractCommandValue + return fmt.Errorf("redis: cannot aggregate command %s: unsupported command type %d", + cmd.Name(), cmd.GetCmdType()) + } + + switch cmd.GetCmdType() { + case CmdTypeGeneric: + if c, ok := cmd.(*Cmd); ok { + c.SetVal(value) + } + case CmdTypeString: + if c, ok := cmd.(*StringCmd); ok { + if v, ok := value.(string); ok { + c.SetVal(v) + } + } + case CmdTypeInt: + if c, ok := cmd.(*IntCmd); ok { + if v, ok := value.(int64); ok { + c.SetVal(v) + } else if v, ok := value.(float64); ok { + c.SetVal(int64(v)) + } + } + case CmdTypeBool: + if c, ok := cmd.(*BoolCmd); ok { + if v, ok := value.(bool); ok { + c.SetVal(v) + } + } + case CmdTypeFloat: + if c, ok := cmd.(*FloatCmd); ok { + if v, ok := value.(float64); ok { + c.SetVal(v) + } + } + case CmdTypeStringSlice: + if c, ok := cmd.(*StringSliceCmd); ok { + if v, ok := value.([]string); ok { + c.SetVal(v) + } + } + case CmdTypeIntSlice: + if c, ok := cmd.(*IntSliceCmd); ok { + if v, ok := value.([]int64); ok { + c.SetVal(v) + } else if v, ok := value.([]float64); ok { + els := len(v) + intSlc := make([]int, els) + for i := range v { + intSlc[i] = int(v[i]) + } + } + } + case CmdTypeFloatSlice: + if c, ok := cmd.(*FloatSliceCmd); ok { + if v, ok := value.([]float64); ok { + c.SetVal(v) + } + } + case CmdTypeBoolSlice: + if c, ok := cmd.(*BoolSliceCmd); ok { + if v, ok := value.([]bool); ok { + c.SetVal(v) + } + } + case CmdTypeMapStringString: + if c, ok := cmd.(*MapStringStringCmd); ok { + if v, ok := value.(map[string]string); ok { + c.SetVal(v) + } + } + case CmdTypeMapStringInt: + if c, ok := cmd.(*MapStringIntCmd); ok { + if v, ok := value.(map[string]int64); ok { + c.SetVal(v) + } + } + case CmdTypeMapStringInterface: + if c, ok := cmd.(*MapStringInterfaceCmd); ok { + if v, ok := value.(map[string]interface{}); ok { + c.SetVal(v) + } + } + case CmdTypeSlice: + if c, ok := cmd.(*SliceCmd); ok { + if v, ok := value.([]interface{}); ok { + c.SetVal(v) + } + } + case CmdTypeStatus: + if c, ok := cmd.(*StatusCmd); ok { + if v, ok := value.(string); ok { + c.SetVal(v) + } + } + case CmdTypeDuration: + if c, ok := cmd.(*DurationCmd); ok { + if v, ok := value.(time.Duration); ok { + c.SetVal(v) + } + } + case CmdTypeTime: + if c, ok := cmd.(*TimeCmd); ok { + if v, ok := value.(time.Time); ok { + c.SetVal(v) + } + } + case CmdTypeKeyValueSlice: + if c, ok := cmd.(*KeyValueSliceCmd); ok { + if v, ok := value.([]KeyValue); ok { + c.SetVal(v) + } + } + case CmdTypeStringStructMap: + if c, ok := cmd.(*StringStructMapCmd); ok { + if v, ok := value.(map[string]struct{}); ok { + c.SetVal(v) + } + } + case CmdTypeXMessageSlice: + if c, ok := cmd.(*XMessageSliceCmd); ok { + if v, ok := value.([]XMessage); ok { + c.SetVal(v) + } + } + case CmdTypeXStreamSlice: + if c, ok := cmd.(*XStreamSliceCmd); ok { + if v, ok := value.([]XStream); ok { + c.SetVal(v) + } + } + case CmdTypeXPending: + if c, ok := cmd.(*XPendingCmd); ok { + if v, ok := value.(*XPending); ok { + c.SetVal(v) + } + } + case CmdTypeXPendingExt: + if c, ok := cmd.(*XPendingExtCmd); ok { + if v, ok := value.([]XPendingExt); ok { + c.SetVal(v) + } + } + case CmdTypeXAutoClaim: + if c, ok := cmd.(*XAutoClaimCmd); ok { + if v, ok := value.(CmdTypeXAutoClaimValue); ok { + c.SetVal(v.messages, v.start) + } + } + case CmdTypeXAutoClaimJustID: + if c, ok := cmd.(*XAutoClaimJustIDCmd); ok { + if v, ok := value.(CmdTypeXAutoClaimJustIDValue); ok { + c.SetVal(v.ids, v.start) + } + } + case CmdTypeXInfoConsumers: + if c, ok := cmd.(*XInfoConsumersCmd); ok { + if v, ok := value.([]XInfoConsumer); ok { + c.SetVal(v) + } + } + case CmdTypeXInfoGroups: + if c, ok := cmd.(*XInfoGroupsCmd); ok { + if v, ok := value.([]XInfoGroup); ok { + c.SetVal(v) + } + } + case CmdTypeXInfoStream: + if c, ok := cmd.(*XInfoStreamCmd); ok { + if v, ok := value.(*XInfoStream); ok { + c.SetVal(v) + } + } + case CmdTypeXInfoStreamFull: + if c, ok := cmd.(*XInfoStreamFullCmd); ok { + if v, ok := value.(*XInfoStreamFull); ok { + c.SetVal(v) + } + } + case CmdTypeZSlice: + if c, ok := cmd.(*ZSliceCmd); ok { + if v, ok := value.([]Z); ok { + c.SetVal(v) + } + } + case CmdTypeZWithKey: + if c, ok := cmd.(*ZWithKeyCmd); ok { + if v, ok := value.(*ZWithKey); ok { + c.SetVal(v) + } + } + case CmdTypeScan: + if c, ok := cmd.(*ScanCmd); ok { + if v, ok := value.(CmdTypeScanValue); ok { + c.SetVal(v.keys, v.cursor) + } + } + case CmdTypeClusterSlots: + if c, ok := cmd.(*ClusterSlotsCmd); ok { + if v, ok := value.([]ClusterSlot); ok { + c.SetVal(v) + } + } + case CmdTypeGeoLocation: + if c, ok := cmd.(*GeoLocationCmd); ok { + if v, ok := value.([]GeoLocation); ok { + c.SetVal(v) + } + } + case CmdTypeGeoSearchLocation: + if c, ok := cmd.(*GeoSearchLocationCmd); ok { + if v, ok := value.([]GeoLocation); ok { + c.SetVal(v) + } + } + case CmdTypeGeoPos: + if c, ok := cmd.(*GeoPosCmd); ok { + if v, ok := value.([]*GeoPos); ok { + c.SetVal(v) + } + } + case CmdTypeCommandsInfo: + if c, ok := cmd.(*CommandsInfoCmd); ok { + if v, ok := value.(map[string]*CommandInfo); ok { + c.SetVal(v) + } + } + case CmdTypeSlowLog: + if c, ok := cmd.(*SlowLogCmd); ok { + if v, ok := value.([]SlowLog); ok { + c.SetVal(v) + } + } + case CmdTypeMapStringStringSlice: + if c, ok := cmd.(*MapStringStringSliceCmd); ok { + if v, ok := value.([]map[string]string); ok { + c.SetVal(v) + } + } + case CmdTypeMapMapStringInterface: + if c, ok := cmd.(*MapMapStringInterfaceCmd); ok { + if v, ok := value.(map[string]interface{}); ok { + c.SetVal(v) + } + } + case CmdTypeMapStringInterfaceSlice: + if c, ok := cmd.(*MapStringInterfaceSliceCmd); ok { + if v, ok := value.([]map[string]interface{}); ok { + c.SetVal(v) + } + } + case CmdTypeKeyValues: + if c, ok := cmd.(*KeyValuesCmd); ok { + // KeyValuesCmd needs a key string and values slice + if v, ok := value.(CmdTypeKeyValuesValue); ok { + c.SetVal(v.key, v.values) + } + } + case CmdTypeZSliceWithKey: + if c, ok := cmd.(*ZSliceWithKeyCmd); ok { + // ZSliceWithKeyCmd needs a key string and Z slice + if v, ok := value.(CmdTypeZSliceWithKeyValue); ok { + c.SetVal(v.key, v.zSlice) + } + } + case CmdTypeFunctionList: + if c, ok := cmd.(*FunctionListCmd); ok { + if v, ok := value.([]Library); ok { + c.SetVal(v) + } + } + case CmdTypeFunctionStats: + if c, ok := cmd.(*FunctionStatsCmd); ok { + if v, ok := value.(FunctionStats); ok { + c.SetVal(v) + } + } + case CmdTypeLCS: + if c, ok := cmd.(*LCSCmd); ok { + if v, ok := value.(*LCSMatch); ok { + c.SetVal(v) + } + } + case CmdTypeKeyFlags: + if c, ok := cmd.(*KeyFlagsCmd); ok { + if v, ok := value.([]KeyFlags); ok { + c.SetVal(v) + } + } + case CmdTypeClusterLinks: + if c, ok := cmd.(*ClusterLinksCmd); ok { + if v, ok := value.([]ClusterLink); ok { + c.SetVal(v) + } + } + case CmdTypeClusterShards: + if c, ok := cmd.(*ClusterShardsCmd); ok { + if v, ok := value.([]ClusterShard); ok { + c.SetVal(v) + } + } + case CmdTypeRankWithScore: + if c, ok := cmd.(*RankWithScoreCmd); ok { + if v, ok := value.(RankScore); ok { + c.SetVal(v) + } + } + case CmdTypeClientInfo: + if c, ok := cmd.(*ClientInfoCmd); ok { + if v, ok := value.(*ClientInfo); ok { + c.SetVal(v) + } + } + case CmdTypeACLLog: + if c, ok := cmd.(*ACLLogCmd); ok { + if v, ok := value.([]*ACLLogEntry); ok { + c.SetVal(v) + } + } + case CmdTypeInfo: + if c, ok := cmd.(*InfoCmd); ok { + if v, ok := value.(map[string]map[string]string); ok { + c.SetVal(v) + } + } + case CmdTypeMonitor: + // MonitorCmd doesn't have SetVal method + // Skip setting value for MonitorCmd + case CmdTypeJSON: + if c, ok := cmd.(*JSONCmd); ok { + if v, ok := value.(string); ok { + c.SetVal(v) + } + } + case CmdTypeJSONSlice: + if c, ok := cmd.(*JSONSliceCmd); ok { + if v, ok := value.([]interface{}); ok { + c.SetVal(v) + } + } + case CmdTypeIntPointerSlice: + if c, ok := cmd.(*IntPointerSliceCmd); ok { + if v, ok := value.([]*int64); ok { + c.SetVal(v) + } + } + case CmdTypeScanDump: + if c, ok := cmd.(*ScanDumpCmd); ok { + if v, ok := value.(ScanDump); ok { + c.SetVal(v) + } + } + case CmdTypeBFInfo: + if c, ok := cmd.(*BFInfoCmd); ok { + if v, ok := value.(BFInfo); ok { + c.SetVal(v) + } + } + case CmdTypeCFInfo: + if c, ok := cmd.(*CFInfoCmd); ok { + if v, ok := value.(CFInfo); ok { + c.SetVal(v) + } + } + case CmdTypeCMSInfo: + if c, ok := cmd.(*CMSInfoCmd); ok { + if v, ok := value.(CMSInfo); ok { + c.SetVal(v) + } + } + case CmdTypeTopKInfo: + if c, ok := cmd.(*TopKInfoCmd); ok { + if v, ok := value.(TopKInfo); ok { + c.SetVal(v) + } + } + case CmdTypeTDigestInfo: + if c, ok := cmd.(*TDigestInfoCmd); ok { + if v, ok := value.(TDigestInfo); ok { + c.SetVal(v) + } + } + case CmdTypeFTSynDump: + if c, ok := cmd.(*FTSynDumpCmd); ok { + if v, ok := value.([]FTSynDumpResult); ok { + c.SetVal(v) + } + } + case CmdTypeAggregate: + if c, ok := cmd.(*AggregateCmd); ok { + if v, ok := value.(*FTAggregateResult); ok { + c.SetVal(v) + } + } + case CmdTypeFTInfo: + if c, ok := cmd.(*FTInfoCmd); ok { + if v, ok := value.(FTInfoResult); ok { + c.SetVal(v) + } + } + case CmdTypeFTSpellCheck: + if c, ok := cmd.(*FTSpellCheckCmd); ok { + if v, ok := value.([]SpellCheckResult); ok { + c.SetVal(v) + } + } + case CmdTypeFTSearch: + if c, ok := cmd.(*FTSearchCmd); ok { + if v, ok := value.(FTSearchResult); ok { + c.SetVal(v) + } + } + case CmdTypeTSTimestampValue: + if c, ok := cmd.(*TSTimestampValueCmd); ok { + if v, ok := value.(TSTimestampValue); ok { + c.SetVal(v) + } + } + case CmdTypeTSTimestampValueSlice: + if c, ok := cmd.(*TSTimestampValueSliceCmd); ok { + if v, ok := value.([]TSTimestampValue); ok { + c.SetVal(v) + } + } + default: + // Fallback to reflection for unknown types + return c.setCommandValueReflection(cmd, value) + } + + return nil +} + +// setCommandValueReflection is a fallback function that uses reflection +func (c *ClusterClient) setCommandValueReflection(cmd Cmder, value interface{}) error { + cmdValue := reflect.ValueOf(cmd) + if cmdValue.Kind() != reflect.Ptr || cmdValue.IsNil() { + return errInvalidCmdPointer + } + + setValMethod := cmdValue.MethodByName("SetVal") + if !setValMethod.IsValid() { + return fmt.Errorf("redis: command %T does not have SetVal method", cmd) + } + + args := []reflect.Value{reflect.ValueOf(value)} + + switch cmd.(type) { + case *XAutoClaimCmd, *XAutoClaimJustIDCmd: + args = append(args, reflect.ValueOf("")) + case *ScanCmd: + args = append(args, reflect.ValueOf(uint64(0))) + case *KeyValuesCmd, *ZSliceWithKeyCmd: + if key, ok := value.(string); ok { + args = []reflect.Value{reflect.ValueOf(key)} + if _, ok := cmd.(*ZSliceWithKeyCmd); ok { + args = append(args, reflect.ValueOf([]Z{})) + } else { + args = append(args, reflect.ValueOf([]string{})) + } + } + } + + defer func() { + if r := recover(); r != nil { + cmd.SetErr(fmt.Errorf("redis: failed to set command value: %v", r)) + } + }() + + setValMethod.Call(args) + return nil +} diff --git a/vendor/github.com/redis/go-redis/v9/otel.go b/vendor/github.com/redis/go-redis/v9/otel.go new file mode 100644 index 00000000..a81377d4 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/otel.go @@ -0,0 +1,204 @@ +package redis + +import ( + "context" + "net" + "time" + + "github.com/redis/go-redis/v9/internal/otel" + "github.com/redis/go-redis/v9/internal/pool" +) + +// ConnInfo provides information about a Redis connection for metrics. +type ConnInfo interface { + RemoteAddr() net.Addr + PoolName() string +} + +type Pooler interface { + PoolStats() *pool.Stats +} + +type PubSubPooler interface { + Stats() *pool.PubSubStats +} + +// OTelRecorder is the interface for recording OpenTelemetry metrics. + +type OTelRecorder interface { + // RecordOperationDuration records the total operation duration (including all retries) + RecordOperationDuration(ctx context.Context, duration time.Duration, cmd Cmder, attempts int, err error, cn ConnInfo, dbIndex int) + + // RecordPipelineOperationDuration records the total pipeline/transaction duration. + // operationName should be "PIPELINE" for regular pipelines or "MULTI" for transactions. + RecordPipelineOperationDuration(ctx context.Context, duration time.Duration, operationName string, cmdCount int, attempts int, err error, cn ConnInfo, dbIndex int) + + // RecordConnectionCreateTime records the time it took to create a new connection + RecordConnectionCreateTime(ctx context.Context, duration time.Duration, cn ConnInfo) + + // RecordConnectionRelaxedTimeout records when connection timeout is relaxed/unrelaxed + // delta: +1 for relaxed, -1 for unrelaxed + // poolName: name of the connection pool (e.g., "main", "pubsub") + // notificationType: the notification type that triggered the timeout relaxation (e.g., "MOVING", "HANDOFF") + RecordConnectionRelaxedTimeout(ctx context.Context, delta int, cn ConnInfo, poolName, notificationType string) + + // RecordConnectionHandoff records when a connection is handed off to another node + // poolName: name of the connection pool (e.g., "main", "pubsub") + RecordConnectionHandoff(ctx context.Context, cn ConnInfo, poolName string) + + // RecordError records client errors (ASK, MOVED, handshake failures, etc.) + // errorType: type of error (e.g., "ASK", "MOVED", "HANDSHAKE_FAILED") + // statusCode: Redis response status code if available (e.g., "MOVED", "ASK") + // isInternal: whether this is an internal error + // retryAttempts: number of retry attempts made + RecordError(ctx context.Context, errorType string, cn ConnInfo, statusCode string, isInternal bool, retryAttempts int) + + // RecordMaintenanceNotification records when a maintenance notification is received + // notificationType: the type of notification (e.g., "MOVING", "MIGRATING", etc.) + RecordMaintenanceNotification(ctx context.Context, cn ConnInfo, notificationType string) + + // RecordConnectionWaitTime records the time spent waiting for a connection from the pool + RecordConnectionWaitTime(ctx context.Context, duration time.Duration, cn ConnInfo) + + // RecordConnectionClosed records when a connection is closed + // reason: reason for closing (e.g., "idle", "max_lifetime", "error", "pool_closed") + // err: the error that caused the close (nil for non-error closures) + RecordConnectionClosed(ctx context.Context, cn ConnInfo, reason string, err error) + + // RecordPubSubMessage records a Pub/Sub message + // direction: "sent" or "received" + // channel: channel name (may be hidden for cardinality reduction) + // sharded: true for sharded pub/sub (SPUBLISH/SSUBSCRIBE) + RecordPubSubMessage(ctx context.Context, cn ConnInfo, direction, channel string, sharded bool) + + // RecordStreamLag records the lag for stream consumer group processing + // lag: time difference between message creation and consumption + // streamName: name of the stream (may be hidden for cardinality reduction) + // consumerGroup: name of the consumer group + // consumerName: name of the consumer + RecordStreamLag(ctx context.Context, lag time.Duration, cn ConnInfo, streamName, consumerGroup, consumerName string) +} + +// This is used for async gauge metrics that need to pull stats from pools periodically. +type OTelPoolRegistrar interface { + // RegisterPool is called when a new client is created with its main connection pool. + // poolName: unique identifier for the pool (e.g., "main_abc123") + RegisterPool(poolName string, pool Pooler) + // UnregisterPool is called when a client is closed to remove its pool from the registry. + UnregisterPool(pool Pooler) + // RegisterPubSubPool is called when a new client is created with a PubSub pool. + // poolName: unique identifier for the pool (e.g., "main_abc123_pubsub") + RegisterPubSubPool(poolName string, pool PubSubPooler) + // UnregisterPubSubPool is called when a PubSub client is closed to remove its pool. + UnregisterPubSubPool(pool PubSubPooler) +} + +// SetOTelRecorder sets the global OpenTelemetry recorder. +func SetOTelRecorder(r OTelRecorder) { + if r == nil { + otel.SetGlobalRecorder(nil) + return + } + otel.SetGlobalRecorder(&otelRecorderAdapter{r}) +} + +type otelRecorderAdapter struct { + recorder OTelRecorder +} + +// toConnInfo converts *pool.Conn to ConnInfo interface properly. +// This ensures that a nil *pool.Conn becomes a true nil interface, +// not a non-nil interface containing a nil pointer. +func toConnInfo(cn *pool.Conn) ConnInfo { + if cn == nil { + return nil + } + return cn +} + +func (a *otelRecorderAdapter) RecordOperationDuration(ctx context.Context, duration time.Duration, cmd otel.Cmder, attempts int, err error, cn *pool.Conn, dbIndex int) { + // Convert internal Cmder to public Cmder + if publicCmd, ok := cmd.(Cmder); ok { + a.recorder.RecordOperationDuration(ctx, duration, publicCmd, attempts, err, toConnInfo(cn), dbIndex) + } +} + +func (a *otelRecorderAdapter) RecordPipelineOperationDuration(ctx context.Context, duration time.Duration, operationName string, cmdCount int, attempts int, err error, cn *pool.Conn, dbIndex int) { + a.recorder.RecordPipelineOperationDuration(ctx, duration, operationName, cmdCount, attempts, err, toConnInfo(cn), dbIndex) +} + +func (a *otelRecorderAdapter) RecordConnectionCreateTime(ctx context.Context, duration time.Duration, cn *pool.Conn) { + a.recorder.RecordConnectionCreateTime(ctx, duration, toConnInfo(cn)) +} + +func (a *otelRecorderAdapter) RecordConnectionRelaxedTimeout(ctx context.Context, delta int, cn *pool.Conn, poolName, notificationType string) { + a.recorder.RecordConnectionRelaxedTimeout(ctx, delta, toConnInfo(cn), poolName, notificationType) +} + +func (a *otelRecorderAdapter) RecordConnectionHandoff(ctx context.Context, cn *pool.Conn, poolName string) { + a.recorder.RecordConnectionHandoff(ctx, toConnInfo(cn), poolName) +} + +func (a *otelRecorderAdapter) RecordError(ctx context.Context, errorType string, cn *pool.Conn, statusCode string, isInternal bool, retryAttempts int) { + a.recorder.RecordError(ctx, errorType, toConnInfo(cn), statusCode, isInternal, retryAttempts) +} + +func (a *otelRecorderAdapter) RecordMaintenanceNotification(ctx context.Context, cn *pool.Conn, notificationType string) { + a.recorder.RecordMaintenanceNotification(ctx, toConnInfo(cn), notificationType) +} + +func (a *otelRecorderAdapter) RecordConnectionWaitTime(ctx context.Context, duration time.Duration, cn *pool.Conn) { + a.recorder.RecordConnectionWaitTime(ctx, duration, toConnInfo(cn)) +} + +func (a *otelRecorderAdapter) RecordConnectionClosed(ctx context.Context, cn *pool.Conn, reason string, err error) { + a.recorder.RecordConnectionClosed(ctx, toConnInfo(cn), reason, err) +} + +func (a *otelRecorderAdapter) RecordPubSubMessage(ctx context.Context, cn *pool.Conn, direction, channel string, sharded bool) { + a.recorder.RecordPubSubMessage(ctx, toConnInfo(cn), direction, channel, sharded) +} + +func (a *otelRecorderAdapter) RecordStreamLag(ctx context.Context, lag time.Duration, cn *pool.Conn, streamName, consumerGroup, consumerName string) { + a.recorder.RecordStreamLag(ctx, lag, toConnInfo(cn), streamName, consumerGroup, consumerName) +} + +func (a *otelRecorderAdapter) RegisterPool(poolName string, p pool.Pooler) { + if registrar, ok := a.recorder.(OTelPoolRegistrar); ok { + registrar.RegisterPool(poolName, &poolerAdapter{p}) + } +} + +func (a *otelRecorderAdapter) UnregisterPool(p pool.Pooler) { + if registrar, ok := a.recorder.(OTelPoolRegistrar); ok { + registrar.UnregisterPool(&poolerAdapter{p}) + } +} + +func (a *otelRecorderAdapter) RegisterPubSubPool(poolName string, p otel.PubSubPooler) { + if registrar, ok := a.recorder.(OTelPoolRegistrar); ok { + registrar.RegisterPubSubPool(poolName, &pubSubPoolerAdapter{p}) + } +} + +func (a *otelRecorderAdapter) UnregisterPubSubPool(p otel.PubSubPooler) { + if registrar, ok := a.recorder.(OTelPoolRegistrar); ok { + registrar.UnregisterPubSubPool(&pubSubPoolerAdapter{p}) + } +} + +type poolerAdapter struct { + p pool.Pooler +} + +func (a *poolerAdapter) PoolStats() *pool.Stats { + return a.p.Stats() +} + +type pubSubPoolerAdapter struct { + p otel.PubSubPooler +} + +func (a *pubSubPoolerAdapter) Stats() *pool.PubSubStats { + return a.p.Stats() +} diff --git a/vendor/github.com/redis/go-redis/v9/package.json b/vendor/github.com/redis/go-redis/v9/package.json deleted file mode 100644 index 1a690047..00000000 --- a/vendor/github.com/redis/go-redis/v9/package.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "name": "redis", - "version": "9.4.0", - "main": "index.js", - "repository": "git@github.com:redis/go-redis.git", - "author": "Vladimir Mihailenco ", - "license": "BSD-2-clause" -} diff --git a/vendor/github.com/redis/go-redis/v9/pipeline.go b/vendor/github.com/redis/go-redis/v9/pipeline.go index 1c114205..567bf121 100644 --- a/vendor/github.com/redis/go-redis/v9/pipeline.go +++ b/vendor/github.com/redis/go-redis/v9/pipeline.go @@ -7,7 +7,7 @@ import ( type pipelineExecer func(context.Context, []Cmder) error -// Pipeliner is an mechanism to realise Redis Pipeline technique. +// Pipeliner is a mechanism to realise Redis Pipeline technique. // // Pipelining is a technique to extremely speed up processing by packing // operations to batches, send them at once to Redis and read a replies in a @@ -23,21 +23,27 @@ type pipelineExecer func(context.Context, []Cmder) error type Pipeliner interface { StatefulCmdable - // Len is to obtain the number of commands in the pipeline that have not yet been executed. + // Len obtains the number of commands in the pipeline that have not yet been executed. Len() int // Do is an API for executing any command. // If a certain Redis command is not yet supported, you can use Do to execute it. Do(ctx context.Context, args ...interface{}) *Cmd - // Process is to put the commands to be executed into the pipeline buffer. + // Process queues the cmd for later execution. Process(ctx context.Context, cmd Cmder) error - // Discard is to discard all commands in the cache that have not yet been executed. + // BatchProcess adds multiple commands to be executed into the pipeline buffer. + BatchProcess(ctx context.Context, cmd ...Cmder) error + + // Discard discards all commands in the pipeline buffer that have not yet been executed. Discard() - // Exec is to send all the commands buffered in the pipeline to the redis-server. + // Exec sends all the commands buffered in the pipeline to the redis server. Exec(ctx context.Context) ([]Cmder, error) + + // Cmds returns the list of queued commands. + Cmds() []Cmder } var _ Pipeliner = (*Pipeline)(nil) @@ -76,7 +82,12 @@ func (c *Pipeline) Do(ctx context.Context, args ...interface{}) *Cmd { // Process queues the cmd for later execution. func (c *Pipeline) Process(ctx context.Context, cmd Cmder) error { - c.cmds = append(c.cmds, cmd) + return c.BatchProcess(ctx, cmd) +} + +// BatchProcess queues multiple cmds for later execution. +func (c *Pipeline) BatchProcess(ctx context.Context, cmd ...Cmder) error { + c.cmds = append(c.cmds, cmd...) return nil } @@ -119,3 +130,7 @@ func (c *Pipeline) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([ func (c *Pipeline) TxPipeline() Pipeliner { return c } + +func (c *Pipeline) Cmds() []Cmder { + return c.cmds +} diff --git a/vendor/github.com/redis/go-redis/v9/probabilistic.go b/vendor/github.com/redis/go-redis/v9/probabilistic.go index 5d5cd1a6..ee67911e 100644 --- a/vendor/github.com/redis/go-redis/v9/probabilistic.go +++ b/vendor/github.com/redis/go-redis/v9/probabilistic.go @@ -225,8 +225,9 @@ type ScanDumpCmd struct { func newScanDumpCmd(ctx context.Context, args ...interface{}) *ScanDumpCmd { return &ScanDumpCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeScanDump, }, } } @@ -270,6 +271,13 @@ func (cmd *ScanDumpCmd) readReply(rd *proto.Reader) (err error) { return nil } +func (cmd *ScanDumpCmd) Clone() Cmder { + return &ScanDumpCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: cmd.val, // ScanDump is a simple struct, can be copied directly + } +} + // Returns information about a Bloom filter. // For more information - https://redis.io/commands/bf.info/ func (c cmdable) BFInfo(ctx context.Context, key string) *BFInfoCmd { @@ -296,8 +304,9 @@ type BFInfoCmd struct { func NewBFInfoCmd(ctx context.Context, args ...interface{}) *BFInfoCmd { return &BFInfoCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeBFInfo, }, } } @@ -319,43 +328,82 @@ func (cmd *BFInfoCmd) Result() (BFInfo, error) { } func (cmd *BFInfoCmd) readReply(rd *proto.Reader) (err error) { - n, err := rd.ReadMapLen() + result := BFInfo{} + + // Create a mapping from key names to pointers of struct fields + respMapping := map[string]*int64{ + "Capacity": &result.Capacity, + "CAPACITY": &result.Capacity, + "Size": &result.Size, + "SIZE": &result.Size, + "Number of filters": &result.Filters, + "FILTERS": &result.Filters, + "Number of items inserted": &result.ItemsInserted, + "ITEMS": &result.ItemsInserted, + "Expansion rate": &result.ExpansionRate, + "EXPANSION": &result.ExpansionRate, + } + + // Helper function to read and assign a value based on the key + readAndAssignValue := func(key string) error { + fieldPtr, exists := respMapping[key] + if !exists { + return fmt.Errorf("redis: BLOOM.INFO unexpected key %s", key) + } + + // Read the integer and assign to the field via pointer dereferencing + val, err := rd.ReadInt() + if err != nil { + return err + } + *fieldPtr = val + return nil + } + + readType, err := rd.PeekReplyType() if err != nil { return err } - var key string - var result BFInfo - for f := 0; f < n; f++ { - key, err = rd.ReadString() + if len(cmd.args) > 2 && readType == proto.RespArray { + n, err := rd.ReadArrayLen() if err != nil { return err } - - switch key { - case "Capacity": - result.Capacity, err = rd.ReadInt() - case "Size": - result.Size, err = rd.ReadInt() - case "Number of filters": - result.Filters, err = rd.ReadInt() - case "Number of items inserted": - result.ItemsInserted, err = rd.ReadInt() - case "Expansion rate": - result.ExpansionRate, err = rd.ReadInt() - default: - return fmt.Errorf("redis: BLOOM.INFO unexpected key %s", key) + if key, ok := cmd.args[2].(string); ok && n == 1 { + if err := readAndAssignValue(key); err != nil { + return err + } + } else { + return fmt.Errorf("redis: BLOOM.INFO invalid argument key type") } - + } else { + n, err := rd.ReadMapLen() if err != nil { return err } + for i := 0; i < n; i++ { + key, err := rd.ReadString() + if err != nil { + return err + } + if err := readAndAssignValue(key); err != nil { + return err + } + } } cmd.val = result return nil } +func (cmd *BFInfoCmd) Clone() Cmder { + return &BFInfoCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: cmd.val, // BFInfo is a simple struct, can be copied directly + } +} + // BFInfoCapacity returns information about the capacity of a Bloom filter. // For more information - https://redis.io/commands/bf.info/ func (c cmdable) BFInfoCapacity(ctx context.Context, key string) *BFInfoCmd { @@ -593,8 +641,9 @@ type CFInfoCmd struct { func NewCFInfoCmd(ctx context.Context, args ...interface{}) *CFInfoCmd { return &CFInfoCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeCFInfo, }, } } @@ -660,6 +709,13 @@ func (cmd *CFInfoCmd) readReply(rd *proto.Reader) (err error) { return nil } +func (cmd *CFInfoCmd) Clone() Cmder { + return &CFInfoCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: cmd.val, // CFInfo is a simple struct, can be copied directly + } +} + // CFInfo returns information about a Cuckoo filter. // For more information - https://redis.io/commands/cf.info/ func (c cmdable) CFInfo(ctx context.Context, key string) *CFInfoCmd { @@ -755,8 +811,9 @@ type CMSInfoCmd struct { func NewCMSInfoCmd(ctx context.Context, args ...interface{}) *CMSInfoCmd { return &CMSInfoCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeCMSInfo, }, } } @@ -811,6 +868,13 @@ func (cmd *CMSInfoCmd) readReply(rd *proto.Reader) (err error) { return nil } +func (cmd *CMSInfoCmd) Clone() Cmder { + return &CMSInfoCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: cmd.val, // CMSInfo is a simple struct, can be copied directly + } +} + // CMSInfo returns information about a Count-Min Sketch filter. // For more information - https://redis.io/commands/cms.info/ func (c cmdable) CMSInfo(ctx context.Context, key string) *CMSInfoCmd { @@ -948,8 +1012,9 @@ type TopKInfoCmd struct { func NewTopKInfoCmd(ctx context.Context, args ...interface{}) *TopKInfoCmd { return &TopKInfoCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeTopKInfo, }, } } @@ -1006,6 +1071,13 @@ func (cmd *TopKInfoCmd) readReply(rd *proto.Reader) (err error) { return nil } +func (cmd *TopKInfoCmd) Clone() Cmder { + return &TopKInfoCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: cmd.val, // TopKInfo is a simple struct, can be copied directly + } +} + // TopKInfo returns information about a Top-K filter. // For more information - https://redis.io/commands/topk.info/ func (c cmdable) TopKInfo(ctx context.Context, key string) *TopKInfoCmd { @@ -1084,18 +1156,14 @@ func (c cmdable) TopKListWithCount(ctx context.Context, key string) *MapStringIn // Returns OK on success or an error if the operation could not be completed. // For more information - https://redis.io/commands/tdigest.add/ func (c cmdable) TDigestAdd(ctx context.Context, key string, elements ...float64) *StatusCmd { - args := make([]interface{}, 2, 2+len(elements)) + args := make([]interface{}, 2+len(elements)) args[0] = "TDIGEST.ADD" args[1] = key - // Convert floatSlice to []interface{} - interfaceSlice := make([]interface{}, len(elements)) for i, v := range elements { - interfaceSlice[i] = v + args[2+i] = v } - args = append(args, interfaceSlice...) - cmd := NewStatusCmd(ctx, args...) _ = c(ctx, cmd) return cmd @@ -1106,18 +1174,14 @@ func (c cmdable) TDigestAdd(ctx context.Context, key string, elements ...float64 // Returns an array of floats representing the values at the specified ranks or an error if the operation could not be completed. // For more information - https://redis.io/commands/tdigest.byrank/ func (c cmdable) TDigestByRank(ctx context.Context, key string, rank ...uint64) *FloatSliceCmd { - args := make([]interface{}, 2, 2+len(rank)) + args := make([]interface{}, 2+len(rank)) args[0] = "TDIGEST.BYRANK" args[1] = key - // Convert uint slice to []interface{} - interfaceSlice := make([]interface{}, len(rank)) - for i, v := range rank { - interfaceSlice[i] = v + for i, r := range rank { + args[2+i] = r } - args = append(args, interfaceSlice...) - cmd := NewFloatSliceCmd(ctx, args...) _ = c(ctx, cmd) return cmd @@ -1128,18 +1192,14 @@ func (c cmdable) TDigestByRank(ctx context.Context, key string, rank ...uint64) // Returns an array of floats representing the values at the specified ranks or an error if the operation could not be completed. // For more information - https://redis.io/commands/tdigest.byrevrank/ func (c cmdable) TDigestByRevRank(ctx context.Context, key string, rank ...uint64) *FloatSliceCmd { - args := make([]interface{}, 2, 2+len(rank)) + args := make([]interface{}, 2+len(rank)) args[0] = "TDIGEST.BYREVRANK" args[1] = key - // Convert uint slice to []interface{} - interfaceSlice := make([]interface{}, len(rank)) - for i, v := range rank { - interfaceSlice[i] = v + for i, r := range rank { + args[2+i] = r } - args = append(args, interfaceSlice...) - cmd := NewFloatSliceCmd(ctx, args...) _ = c(ctx, cmd) return cmd @@ -1150,18 +1210,14 @@ func (c cmdable) TDigestByRevRank(ctx context.Context, key string, rank ...uint6 // Returns an array of floats representing the CDF values for each element or an error if the operation could not be completed. // For more information - https://redis.io/commands/tdigest.cdf/ func (c cmdable) TDigestCDF(ctx context.Context, key string, elements ...float64) *FloatSliceCmd { - args := make([]interface{}, 2, 2+len(elements)) + args := make([]interface{}, 2+len(elements)) args[0] = "TDIGEST.CDF" args[1] = key - // Convert floatSlice to []interface{} - interfaceSlice := make([]interface{}, len(elements)) for i, v := range elements { - interfaceSlice[i] = v + args[2+i] = v } - args = append(args, interfaceSlice...) - cmd := NewFloatSliceCmd(ctx, args...) _ = c(ctx, cmd) return cmd @@ -1211,8 +1267,9 @@ type TDigestInfoCmd struct { func NewTDigestInfoCmd(ctx context.Context, args ...interface{}) *TDigestInfoCmd { return &TDigestInfoCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeTDigestInfo, }, } } @@ -1279,6 +1336,13 @@ func (cmd *TDigestInfoCmd) readReply(rd *proto.Reader) (err error) { return nil } +func (cmd *TDigestInfoCmd) Clone() Cmder { + return &TDigestInfoCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: cmd.val, // TDigestInfo is a simple struct, can be copied directly + } +} + // TDigestInfo returns information about a t-Digest data structure. // For more information - https://redis.io/commands/tdigest.info/ func (c cmdable) TDigestInfo(ctx context.Context, key string) *TDigestInfoCmd { @@ -1344,18 +1408,14 @@ func (c cmdable) TDigestMin(ctx context.Context, key string) *FloatCmd { // Returns an array of floats representing the quantile values for each element or an error if the operation could not be completed. // For more information - https://redis.io/commands/tdigest.quantile/ func (c cmdable) TDigestQuantile(ctx context.Context, key string, elements ...float64) *FloatSliceCmd { - args := make([]interface{}, 2, 2+len(elements)) + args := make([]interface{}, 2+len(elements)) args[0] = "TDIGEST.QUANTILE" args[1] = key - // Convert floatSlice to []interface{} - interfaceSlice := make([]interface{}, len(elements)) for i, v := range elements { - interfaceSlice[i] = v + args[2+i] = v } - args = append(args, interfaceSlice...) - cmd := NewFloatSliceCmd(ctx, args...) _ = c(ctx, cmd) return cmd @@ -1366,18 +1426,14 @@ func (c cmdable) TDigestQuantile(ctx context.Context, key string, elements ...fl // Returns an array of integers representing the rank values for each element or an error if the operation could not be completed. // For more information - https://redis.io/commands/tdigest.rank/ func (c cmdable) TDigestRank(ctx context.Context, key string, values ...float64) *IntSliceCmd { - args := make([]interface{}, 2, 2+len(values)) + args := make([]interface{}, 2+len(values)) args[0] = "TDIGEST.RANK" args[1] = key - // Convert floatSlice to []interface{} - interfaceSlice := make([]interface{}, len(values)) for i, v := range values { - interfaceSlice[i] = v + args[i+2] = v } - args = append(args, interfaceSlice...) - cmd := NewIntSliceCmd(ctx, args...) _ = c(ctx, cmd) return cmd @@ -1399,18 +1455,14 @@ func (c cmdable) TDigestReset(ctx context.Context, key string) *StatusCmd { // Returns an array of integers representing the reverse rank values for each element or an error if the operation could not be completed. // For more information - https://redis.io/commands/tdigest.revrank/ func (c cmdable) TDigestRevRank(ctx context.Context, key string, values ...float64) *IntSliceCmd { - args := make([]interface{}, 2, 2+len(values)) + args := make([]interface{}, 2+len(values)) args[0] = "TDIGEST.REVRANK" args[1] = key - // Convert floatSlice to []interface{} - interfaceSlice := make([]interface{}, len(values)) for i, v := range values { - interfaceSlice[i] = v + args[2+i] = v } - args = append(args, interfaceSlice...) - cmd := NewIntSliceCmd(ctx, args...) _ = c(ctx, cmd) return cmd diff --git a/vendor/github.com/redis/go-redis/v9/pubsub.go b/vendor/github.com/redis/go-redis/v9/pubsub.go index 5df537c4..49eec935 100644 --- a/vendor/github.com/redis/go-redis/v9/pubsub.go +++ b/vendor/github.com/redis/go-redis/v9/pubsub.go @@ -8,8 +8,10 @@ import ( "time" "github.com/redis/go-redis/v9/internal" + "github.com/redis/go-redis/v9/internal/otel" "github.com/redis/go-redis/v9/internal/pool" "github.com/redis/go-redis/v9/internal/proto" + "github.com/redis/go-redis/v9/push" ) // PubSub implements Pub/Sub commands as described in @@ -21,7 +23,7 @@ import ( type PubSub struct { opt *Options - newConn func(ctx context.Context, channels []string) (*pool.Conn, error) + newConn func(ctx context.Context, addr string, channels []string) (*pool.Conn, error) closeConn func(*pool.Conn) error mu sync.Mutex @@ -38,6 +40,12 @@ type PubSub struct { chOnce sync.Once msgCh *channel allCh *channel + + // Push notification processor for handling generic push notifications + pushProcessor push.NotificationProcessor + + // Cleanup callback for maintenanceNotifications upgrade tracking + onClose func() } func (c *PubSub) init() { @@ -45,6 +53,9 @@ func (c *PubSub) init() { } func (c *PubSub) String() string { + c.mu.Lock() + defer c.mu.Unlock() + channels := mapKeys(c.channels) channels = append(channels, mapKeys(c.patterns)...) channels = append(channels, mapKeys(c.schannels)...) @@ -66,10 +77,18 @@ func (c *PubSub) conn(ctx context.Context, newChannels []string) (*pool.Conn, er return c.cn, nil } + if c.opt.Addr == "" { + // TODO(maintenanceNotifications): + // this is probably cluster client + // c.newConn will ignore the addr argument + // will be changed when we have maintenanceNotifications upgrades for cluster clients + c.opt.Addr = internal.RedisNull + } + channels := mapKeys(c.channels) channels = append(channels, newChannels...) - cn, err := c.newConn(ctx, channels) + cn, err := c.newConn(ctx, c.opt.Addr, channels) if err != nil { return nil, err } @@ -84,7 +103,7 @@ func (c *PubSub) conn(ctx context.Context, newChannels []string) (*pool.Conn, er } func (c *PubSub) writeCmd(ctx context.Context, cn *pool.Conn, cmd Cmder) error { - return cn.WithWriter(context.Background(), c.opt.WriteTimeout, func(wr *proto.Writer) error { + return cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error { return writeCmd(wr, cmd) }) } @@ -150,12 +169,31 @@ func (c *PubSub) releaseConn(ctx context.Context, cn *pool.Conn, err error, allo if c.cn != cn { return } + + if !cn.IsUsable() || cn.ShouldHandoff() { + c.reconnect(ctx, fmt.Errorf("pubsub: connection is not usable")) + } + if isBadConn(err, allowTimeout, c.opt.Addr) { c.reconnect(ctx, err) } } func (c *PubSub) reconnect(ctx context.Context, reason error) { + if c.cn != nil && c.cn.ShouldHandoff() { + newEndpoint := c.cn.GetHandoffEndpoint() + // If new endpoint is NULL, use the original address + if newEndpoint == internal.RedisNull { + newEndpoint = c.opt.Addr + } + + if newEndpoint != "" { + // Update the address in the options + oldAddr := c.cn.RemoteAddr().String() + c.opt.Addr = newEndpoint + internal.Logger.Printf(ctx, "pubsub: reconnecting to new endpoint %s (was %s)", newEndpoint, oldAddr) + } + } _ = c.closeTheCn(reason) _, _ = c.conn(ctx, nil) } @@ -164,9 +202,6 @@ func (c *PubSub) closeTheCn(reason error) error { if c.cn == nil { return nil } - if !c.closed { - internal.Logger.Printf(c.getContext(), "redis: discarding bad PubSub connection: %s", reason) - } err := c.closeConn(c.cn) c.cn = nil return err @@ -182,6 +217,11 @@ func (c *PubSub) Close() error { c.closed = true close(c.exit) + // Call cleanup callback if set + if c.onClose != nil { + c.onClose() + } + return c.closeTheCn(pool.ErrClosed) } @@ -364,7 +404,7 @@ func (p *Pong) String() string { return "Pong" } -func (c *PubSub) newMessage(reply interface{}) (interface{}, error) { +func (c *PubSub) newMessage(ctx context.Context, cn *pool.Conn, reply interface{}) (interface{}, error) { switch reply := reply.(type) { case string: return &Pong{ @@ -381,30 +421,42 @@ func (c *PubSub) newMessage(reply interface{}) (interface{}, error) { Count: int(reply[2].(int64)), }, nil case "message", "smessage": + channel := reply[1].(string) + sharded := kind == "smessage" switch payload := reply[2].(type) { case string: - return &Message{ - Channel: reply[1].(string), + msg := &Message{ + Channel: channel, Payload: payload, - }, nil + } + // Record PubSub message received + otel.RecordPubSubMessage(ctx, cn, "received", channel, sharded) + return msg, nil case []interface{}: ss := make([]string, len(payload)) for i, s := range payload { ss[i] = s.(string) } - return &Message{ - Channel: reply[1].(string), + msg := &Message{ + Channel: channel, PayloadSlice: ss, - }, nil + } + // Record PubSub message received + otel.RecordPubSubMessage(ctx, cn, "received", channel, sharded) + return msg, nil default: return nil, fmt.Errorf("redis: unsupported pubsub message payload: %T", payload) } case "pmessage": - return &Message{ + channel := reply[2].(string) + msg := &Message{ Pattern: reply[1].(string), - Channel: reply[2].(string), + Channel: channel, Payload: reply[3].(string), - }, nil + } + // Record PubSub message received (pattern message, not sharded) + otel.RecordPubSubMessage(ctx, cn, "received", channel, false) + return msg, nil case "pong": return &Pong{ Payload: reply[1].(string), @@ -426,28 +478,38 @@ func (c *PubSub) ReceiveTimeout(ctx context.Context, timeout time.Duration) (int } // Don't hold the lock to allow subscriptions and pings. - cn, err := c.connWithLock(ctx) if err != nil { return nil, err } - err = cn.WithReader(context.Background(), timeout, func(rd *proto.Reader) error { + err = cn.WithReader(ctx, timeout, func(rd *proto.Reader) error { + // To be sure there are no buffered push notifications, we process them before reading the reply + if err := c.processPendingPushNotificationWithReader(ctx, cn, rd); err != nil { + // Log the error but don't fail the command execution + // Push notification processing errors shouldn't break normal Redis operations + internal.Logger.Printf(ctx, "push: conn[%d] error processing pending notifications before reading reply: %v", cn.GetID(), err) + } return c.cmd.readReply(rd) }) - c.releaseConnWithLock(ctx, cn, err, timeout > 0) if err != nil { return nil, err } - return c.newMessage(c.cmd.Val()) + return c.newMessage(ctx, cn, c.cmd.Val()) } // Receive returns a message as a Subscription, Message, Pong or error. // See PubSub example for details. This is low-level API and in most cases // Channel should be used instead. +// Receive returns a message as a Subscription, Message, Pong, or an error. +// See PubSub example for details. This is a low-level API and in most cases +// Channel should be used instead. +// This method blocks until a message is received or an error occurs. +// It may return early with an error if the context is canceled, the connection fails, +// or other internal errors occur. func (c *PubSub) Receive(ctx context.Context) (interface{}, error) { return c.ReceiveTimeout(ctx, 0) } @@ -491,7 +553,7 @@ func (c *PubSub) getContext() context.Context { // Receive* APIs can not be used after channel is created. // // go-redis periodically sends ping messages to test connection health -// and re-subscribes if ping can not not received for 1 minute. +// and re-subscribes if ping can not received for 1 minute. func (c *PubSub) Channel(opts ...ChannelOption) <-chan *Message { c.chOnce.Do(func() { c.msgCh = newChannel(c, opts...) @@ -529,6 +591,27 @@ func (c *PubSub) ChannelWithSubscriptions(opts ...ChannelOption) <-chan interfac return c.allCh.allCh } +func (c *PubSub) processPendingPushNotificationWithReader(ctx context.Context, cn *pool.Conn, rd *proto.Reader) error { + // Only process push notifications for RESP3 connections with a processor + if c.opt.Protocol != 3 || c.pushProcessor == nil { + return nil + } + + // Create handler context with client, connection pool, and connection information + handlerCtx := c.pushNotificationHandlerContext(cn) + return c.pushProcessor.ProcessPendingNotifications(ctx, handlerCtx, rd) +} + +func (c *PubSub) pushNotificationHandlerContext(cn *pool.Conn) push.NotificationHandlerContext { + // PubSub doesn't have a client or connection pool, so we pass nil for those + // PubSub connections are blocking + return push.NotificationHandlerContext{ + PubSub: c, + Conn: cn, + IsBlocking: true, + } +} + type ChannelOption func(c *channel) // WithChannelSize specifies the Go chan size that is used to buffer incoming messages. @@ -664,7 +747,7 @@ func (c *channel) initMsgChan() { } case <-timer.C: internal.Logger.Printf( - ctx, "redis: %s channel is full for %s (message is dropped)", + ctx, "redis: %v channel is full for %s (message is dropped)", c, c.chanSendTimeout) } default: @@ -718,7 +801,7 @@ func (c *channel) initAllChan() { } case <-timer.C: internal.Logger.Printf( - ctx, "redis: %s channel is full for %s (message is dropped)", + ctx, "redis: %v channel is full for %s (message is dropped)", c, c.chanSendTimeout) } default: diff --git a/vendor/github.com/redis/go-redis/v9/pubsub_commands.go b/vendor/github.com/redis/go-redis/v9/pubsub_commands.go index 28622aa6..ccc0ed52 100644 --- a/vendor/github.com/redis/go-redis/v9/pubsub_commands.go +++ b/vendor/github.com/redis/go-redis/v9/pubsub_commands.go @@ -1,6 +1,10 @@ package redis -import "context" +import ( + "context" + + "github.com/redis/go-redis/v9/internal/otel" +) type PubSubCmdable interface { Publish(ctx context.Context, channel string, message interface{}) *IntCmd @@ -16,12 +20,20 @@ type PubSubCmdable interface { func (c cmdable) Publish(ctx context.Context, channel string, message interface{}) *IntCmd { cmd := NewIntCmd(ctx, "publish", channel, message) _ = c(ctx, cmd) + // Record PubSub message sent (if command succeeded) + if cmd.Err() == nil { + otel.RecordPubSubMessage(ctx, nil, "sent", channel, false) + } return cmd } func (c cmdable) SPublish(ctx context.Context, channel string, message interface{}) *IntCmd { cmd := NewIntCmd(ctx, "spublish", channel, message) _ = c(ctx, cmd) + // Record PubSub message sent (if command succeeded) + if cmd.Err() == nil { + otel.RecordPubSubMessage(ctx, nil, "sent", channel, true) + } return cmd } diff --git a/vendor/github.com/redis/go-redis/v9/push/errors.go b/vendor/github.com/redis/go-redis/v9/push/errors.go new file mode 100644 index 00000000..c10c98aa --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/push/errors.go @@ -0,0 +1,176 @@ +package push + +import ( + "errors" + "fmt" +) + +// Push notification error definitions +// This file contains all error types and messages used by the push notification system + +// Error reason constants +const ( + // HandlerReasons + ReasonHandlerNil = "handler cannot be nil" + ReasonHandlerExists = "cannot overwrite existing handler" + ReasonHandlerProtected = "handler is protected" + + // ProcessorReasons + ReasonPushNotificationsDisabled = "push notifications are disabled" +) + +// ProcessorType represents the type of processor involved in the error +// defined as a custom type for better readability and easier maintenance +type ProcessorType string + +const ( + // ProcessorTypes + ProcessorTypeProcessor = ProcessorType("processor") + ProcessorTypeVoidProcessor = ProcessorType("void_processor") + ProcessorTypeCustom = ProcessorType("custom") +) + +// ProcessorOperation represents the operation being performed by the processor +// defined as a custom type for better readability and easier maintenance +type ProcessorOperation string + +const ( + // ProcessorOperations + ProcessorOperationProcess = ProcessorOperation("process") + ProcessorOperationRegister = ProcessorOperation("register") + ProcessorOperationUnregister = ProcessorOperation("unregister") + ProcessorOperationUnknown = ProcessorOperation("unknown") +) + +// Common error variables for reuse +var ( + // ErrHandlerNil is returned when attempting to register a nil handler + ErrHandlerNil = errors.New(ReasonHandlerNil) +) + +// Registry errors + +// ErrHandlerExists creates an error for when attempting to overwrite an existing handler +func ErrHandlerExists(pushNotificationName string) error { + return NewHandlerError(ProcessorOperationRegister, pushNotificationName, ReasonHandlerExists, nil) +} + +// ErrProtectedHandler creates an error for when attempting to unregister a protected handler +func ErrProtectedHandler(pushNotificationName string) error { + return NewHandlerError(ProcessorOperationUnregister, pushNotificationName, ReasonHandlerProtected, nil) +} + +// VoidProcessor errors + +// ErrVoidProcessorRegister creates an error for when attempting to register a handler on void processor +func ErrVoidProcessorRegister(pushNotificationName string) error { + return NewProcessorError(ProcessorTypeVoidProcessor, ProcessorOperationRegister, pushNotificationName, ReasonPushNotificationsDisabled, nil) +} + +// ErrVoidProcessorUnregister creates an error for when attempting to unregister a handler on void processor +func ErrVoidProcessorUnregister(pushNotificationName string) error { + return NewProcessorError(ProcessorTypeVoidProcessor, ProcessorOperationUnregister, pushNotificationName, ReasonPushNotificationsDisabled, nil) +} + +// Error type definitions for advanced error handling + +// HandlerError represents errors related to handler operations +type HandlerError struct { + Operation ProcessorOperation + PushNotificationName string + Reason string + Err error +} + +func (e *HandlerError) Error() string { + if e.Err != nil { + return fmt.Sprintf("handler %s failed for '%s': %s (%v)", e.Operation, e.PushNotificationName, e.Reason, e.Err) + } + return fmt.Sprintf("handler %s failed for '%s': %s", e.Operation, e.PushNotificationName, e.Reason) +} + +func (e *HandlerError) Unwrap() error { + return e.Err +} + +// NewHandlerError creates a new HandlerError +func NewHandlerError(operation ProcessorOperation, pushNotificationName, reason string, err error) *HandlerError { + return &HandlerError{ + Operation: operation, + PushNotificationName: pushNotificationName, + Reason: reason, + Err: err, + } +} + +// ProcessorError represents errors related to processor operations +type ProcessorError struct { + ProcessorType ProcessorType // "processor", "void_processor" + Operation ProcessorOperation // "process", "register", "unregister" + PushNotificationName string // Name of the push notification involved + Reason string + Err error +} + +func (e *ProcessorError) Error() string { + notifInfo := "" + if e.PushNotificationName != "" { + notifInfo = fmt.Sprintf(" for '%s'", e.PushNotificationName) + } + if e.Err != nil { + return fmt.Sprintf("%s %s failed%s: %s (%v)", e.ProcessorType, e.Operation, notifInfo, e.Reason, e.Err) + } + return fmt.Sprintf("%s %s failed%s: %s", e.ProcessorType, e.Operation, notifInfo, e.Reason) +} + +func (e *ProcessorError) Unwrap() error { + return e.Err +} + +// NewProcessorError creates a new ProcessorError +func NewProcessorError(processorType ProcessorType, operation ProcessorOperation, pushNotificationName, reason string, err error) *ProcessorError { + return &ProcessorError{ + ProcessorType: processorType, + Operation: operation, + PushNotificationName: pushNotificationName, + Reason: reason, + Err: err, + } +} + +// Helper functions for common error scenarios + +// IsHandlerNilError checks if an error is due to a nil handler +func IsHandlerNilError(err error) bool { + return errors.Is(err, ErrHandlerNil) +} + +// IsHandlerExistsError checks if an error is due to attempting to overwrite an existing handler. +// This function works correctly even when the error is wrapped. +func IsHandlerExistsError(err error) bool { + var handlerErr *HandlerError + if errors.As(err, &handlerErr) { + return handlerErr.Operation == ProcessorOperationRegister && handlerErr.Reason == ReasonHandlerExists + } + return false +} + +// IsProtectedHandlerError checks if an error is due to attempting to unregister a protected handler. +// This function works correctly even when the error is wrapped. +func IsProtectedHandlerError(err error) bool { + var handlerErr *HandlerError + if errors.As(err, &handlerErr) { + return handlerErr.Operation == ProcessorOperationUnregister && handlerErr.Reason == ReasonHandlerProtected + } + return false +} + +// IsVoidProcessorError checks if an error is due to void processor operations. +// This function works correctly even when the error is wrapped. +func IsVoidProcessorError(err error) bool { + var procErr *ProcessorError + if errors.As(err, &procErr) { + return procErr.ProcessorType == ProcessorTypeVoidProcessor && procErr.Reason == ReasonPushNotificationsDisabled + } + return false +} diff --git a/vendor/github.com/redis/go-redis/v9/push/handler.go b/vendor/github.com/redis/go-redis/v9/push/handler.go new file mode 100644 index 00000000..815edce3 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/push/handler.go @@ -0,0 +1,14 @@ +package push + +import ( + "context" +) + +// NotificationHandler defines the interface for push notification handlers. +type NotificationHandler interface { + // HandlePushNotification processes a push notification with context information. + // The handlerCtx provides information about the client, connection pool, and connection + // on which the notification was received, allowing handlers to make informed decisions. + // Returns an error if the notification could not be handled. + HandlePushNotification(ctx context.Context, handlerCtx NotificationHandlerContext, notification []interface{}) error +} diff --git a/vendor/github.com/redis/go-redis/v9/push/handler_context.go b/vendor/github.com/redis/go-redis/v9/push/handler_context.go new file mode 100644 index 00000000..c39e186b --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/push/handler_context.go @@ -0,0 +1,44 @@ +package push + +// No imports needed for this file + +// NotificationHandlerContext provides context information about where a push notification was received. +// This struct allows handlers to make informed decisions based on the source of the notification +// with strongly typed access to different client types using concrete types. +type NotificationHandlerContext struct { + // Client is the Redis client instance that received the notification. + // It is interface to both allow for future expansion and to avoid + // circular dependencies. The developer is responsible for type assertion. + // It can be one of the following types: + // - *redis.baseClient + // - *redis.Client + // - *redis.ClusterClient + // - *redis.Conn + Client interface{} + + // ConnPool is the connection pool from which the connection was obtained. + // It is interface to both allow for future expansion and to avoid + // circular dependencies. The developer is responsible for type assertion. + // It can be one of the following types: + // - *pool.ConnPool + // - *pool.SingleConnPool + // - *pool.StickyConnPool + ConnPool interface{} + + // PubSub is the PubSub instance that received the notification. + // It is interface to both allow for future expansion and to avoid + // circular dependencies. The developer is responsible for type assertion. + // It can be one of the following types: + // - *redis.PubSub + PubSub interface{} + + // Conn is the specific connection on which the notification was received. + // It is interface to both allow for future expansion and to avoid + // circular dependencies. The developer is responsible for type assertion. + // It can be one of the following types: + // - *pool.Conn + Conn interface{} + + // IsBlocking indicates if the notification was received on a blocking connection. + IsBlocking bool +} diff --git a/vendor/github.com/redis/go-redis/v9/push/processor.go b/vendor/github.com/redis/go-redis/v9/push/processor.go new file mode 100644 index 00000000..b8112ddc --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/push/processor.go @@ -0,0 +1,203 @@ +package push + +import ( + "context" + + "github.com/redis/go-redis/v9/internal" + "github.com/redis/go-redis/v9/internal/proto" +) + +// NotificationProcessor defines the interface for push notification processors. +type NotificationProcessor interface { + // GetHandler returns the handler for a specific push notification name. + GetHandler(pushNotificationName string) NotificationHandler + // ProcessPendingNotifications checks for and processes any pending push notifications. + // To be used when it is known that there are notifications on the socket. + // It will try to read from the socket and if it is empty - it may block. + ProcessPendingNotifications(ctx context.Context, handlerCtx NotificationHandlerContext, rd *proto.Reader) error + // RegisterHandler registers a handler for a specific push notification name. + RegisterHandler(pushNotificationName string, handler NotificationHandler, protected bool) error + // UnregisterHandler removes a handler for a specific push notification name. + UnregisterHandler(pushNotificationName string) error +} + +// Processor handles push notifications with a registry of handlers +type Processor struct { + registry *Registry +} + +// NewProcessor creates a new push notification processor +func NewProcessor() *Processor { + return &Processor{ + registry: NewRegistry(), + } +} + +// GetHandler returns the handler for a specific push notification name +func (p *Processor) GetHandler(pushNotificationName string) NotificationHandler { + return p.registry.GetHandler(pushNotificationName) +} + +// RegisterHandler registers a handler for a specific push notification name +func (p *Processor) RegisterHandler(pushNotificationName string, handler NotificationHandler, protected bool) error { + return p.registry.RegisterHandler(pushNotificationName, handler, protected) +} + +// UnregisterHandler removes a handler for a specific push notification name +func (p *Processor) UnregisterHandler(pushNotificationName string) error { + return p.registry.UnregisterHandler(pushNotificationName) +} + +// ProcessPendingNotifications checks for and processes any pending push notifications +// This method should be called by the client in WithReader before reading the reply +// It will try to read from the socket and if it is empty - it may block. +func (p *Processor) ProcessPendingNotifications(ctx context.Context, handlerCtx NotificationHandlerContext, rd *proto.Reader) error { + if rd == nil { + return nil + } + + for { + // Check if there's data available to read + replyType, err := rd.PeekReplyType() + if err != nil { + // No more data available or error reading + // if timeout, it will be handled by the caller + break + } + + // Only process push notifications (arrays starting with >) + if replyType != proto.RespPush { + break + } + + // see if we should skip this notification + notificationName, err := rd.PeekPushNotificationName() + if err != nil { + break + } + + if willHandleNotificationInClient(notificationName) { + break + } + + // Read the push notification + reply, err := rd.ReadReply() + if err != nil { + internal.Logger.Printf(ctx, "push: error reading push notification: %v", err) + break + } + + // Convert to slice of interfaces + notification, ok := reply.([]interface{}) + if !ok { + break + } + + // Handle the notification directly + if len(notification) > 0 { + // Extract the notification type (first element) + if notificationType, ok := notification[0].(string); ok { + // Get the handler for this notification type + if handler := p.registry.GetHandler(notificationType); handler != nil { + // Handle the notification + err := handler.HandlePushNotification(ctx, handlerCtx, notification) + if err != nil { + internal.Logger.Printf(ctx, "push: error handling push notification: %v", err) + } + } + } + } + } + + return nil +} + +// VoidProcessor discards all push notifications without processing them +type VoidProcessor struct{} + +// NewVoidProcessor creates a new void push notification processor +func NewVoidProcessor() *VoidProcessor { + return &VoidProcessor{} +} + +// GetHandler returns nil for void processor since it doesn't maintain handlers +func (v *VoidProcessor) GetHandler(_ string) NotificationHandler { + return nil +} + +// RegisterHandler returns an error for void processor since it doesn't maintain handlers +func (v *VoidProcessor) RegisterHandler(pushNotificationName string, _ NotificationHandler, _ bool) error { + return ErrVoidProcessorRegister(pushNotificationName) +} + +// UnregisterHandler returns an error for void processor since it doesn't maintain handlers +func (v *VoidProcessor) UnregisterHandler(pushNotificationName string) error { + return ErrVoidProcessorUnregister(pushNotificationName) +} + +// ProcessPendingNotifications for VoidProcessor does nothing since push notifications +// are only available in RESP3 and this processor is used for RESP2 connections. +// This avoids unnecessary buffer scanning overhead. +// It does however read and discard all push notifications from the buffer to avoid +// them being interpreted as a reply. +// This method should be called by the client in WithReader before reading the reply +// to be sure there are no buffered push notifications. +// It will try to read from the socket and if it is empty - it may block. +func (v *VoidProcessor) ProcessPendingNotifications(_ context.Context, handlerCtx NotificationHandlerContext, rd *proto.Reader) error { + // read and discard all push notifications + if rd == nil { + return nil + } + + for { + // Check if there's data available to read + replyType, err := rd.PeekReplyType() + if err != nil { + // No more data available or error reading + // if timeout, it will be handled by the caller + break + } + + // Only process push notifications (arrays starting with >) + if replyType != proto.RespPush { + break + } + // see if we should skip this notification + notificationName, err := rd.PeekPushNotificationName() + if err != nil { + break + } + + if willHandleNotificationInClient(notificationName) { + break + } + + // Read the push notification + _, err = rd.ReadReply() + if err != nil { + internal.Logger.Printf(context.Background(), "push: error reading push notification: %v", err) + return nil + } + } + return nil +} + +// willHandleNotificationInClient checks if a notification type should be ignored by the push notification +// processor and handled by other specialized systems instead (pub/sub, streams, keyspace, etc.). +func willHandleNotificationInClient(notificationType string) bool { + switch notificationType { + // Pub/Sub notifications - handled by pub/sub system + case "message", // Regular pub/sub message + "pmessage", // Pattern pub/sub message + "subscribe", // Subscription confirmation + "unsubscribe", // Unsubscription confirmation + "psubscribe", // Pattern subscription confirmation + "punsubscribe", // Pattern unsubscription confirmation + "smessage", // Sharded pub/sub message (Redis 7.0+) + "ssubscribe", // Sharded subscription confirmation + "sunsubscribe": // Sharded unsubscription confirmation + return true + default: + return false + } +} diff --git a/vendor/github.com/redis/go-redis/v9/push/push.go b/vendor/github.com/redis/go-redis/v9/push/push.go new file mode 100644 index 00000000..e6adeaa4 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/push/push.go @@ -0,0 +1,7 @@ +// Package push provides push notifications for Redis. +// This is an EXPERIMENTAL API for handling push notifications from Redis. +// It is not yet stable and may change in the future. +// Although this is in a public package, in its current form public use is not advised. +// Pending push notifications should be processed before executing any readReply from the connection +// as per RESP3 specification push notifications can be sent at any time. +package push diff --git a/vendor/github.com/redis/go-redis/v9/push/registry.go b/vendor/github.com/redis/go-redis/v9/push/registry.go new file mode 100644 index 00000000..a265ae92 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/push/registry.go @@ -0,0 +1,61 @@ +package push + +import ( + "sync" +) + +// Registry manages push notification handlers +type Registry struct { + mu sync.RWMutex + handlers map[string]NotificationHandler + protected map[string]bool +} + +// NewRegistry creates a new push notification registry +func NewRegistry() *Registry { + return &Registry{ + handlers: make(map[string]NotificationHandler), + protected: make(map[string]bool), + } +} + +// RegisterHandler registers a handler for a specific push notification name +func (r *Registry) RegisterHandler(pushNotificationName string, handler NotificationHandler, protected bool) error { + if handler == nil { + return ErrHandlerNil + } + + r.mu.Lock() + defer r.mu.Unlock() + + // Check if handler already exists + if _, exists := r.protected[pushNotificationName]; exists { + return ErrHandlerExists(pushNotificationName) + } + + r.handlers[pushNotificationName] = handler + r.protected[pushNotificationName] = protected + return nil +} + +// GetHandler returns the handler for a specific push notification name +func (r *Registry) GetHandler(pushNotificationName string) NotificationHandler { + r.mu.RLock() + defer r.mu.RUnlock() + return r.handlers[pushNotificationName] +} + +// UnregisterHandler removes a handler for a specific push notification name +func (r *Registry) UnregisterHandler(pushNotificationName string) error { + r.mu.Lock() + defer r.mu.Unlock() + + // Check if handler is protected + if protected, exists := r.protected[pushNotificationName]; exists && protected { + return ErrProtectedHandler(pushNotificationName) + } + + delete(r.handlers, pushNotificationName) + delete(r.protected, pushNotificationName) + return nil +} diff --git a/vendor/github.com/redis/go-redis/v9/push_notifications.go b/vendor/github.com/redis/go-redis/v9/push_notifications.go new file mode 100644 index 00000000..572955fe --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/push_notifications.go @@ -0,0 +1,21 @@ +package redis + +import ( + "github.com/redis/go-redis/v9/push" +) + +// NewPushNotificationProcessor creates a new push notification processor +// This processor maintains a registry of handlers and processes push notifications +// It is used for RESP3 connections where push notifications are available +func NewPushNotificationProcessor() push.NotificationProcessor { + return push.NewProcessor() +} + +// NewVoidPushNotificationProcessor creates a new void push notification processor +// This processor does not maintain any handlers and always returns nil for all operations +// It is used for RESP2 connections where push notifications are not available +// It can also be used to disable push notifications for RESP3 connections, where +// it will discard all push notifications without processing them +func NewVoidPushNotificationProcessor() push.NotificationProcessor { + return push.NewVoidProcessor() +} diff --git a/vendor/github.com/redis/go-redis/v9/redis.go b/vendor/github.com/redis/go-redis/v9/redis.go index 4dd862b8..85622e43 100644 --- a/vendor/github.com/redis/go-redis/v9/redis.go +++ b/vendor/github.com/redis/go-redis/v9/redis.go @@ -9,10 +9,15 @@ import ( "sync/atomic" "time" + "github.com/redis/go-redis/v9/auth" "github.com/redis/go-redis/v9/internal" + "github.com/redis/go-redis/v9/internal/auth/streaming" "github.com/redis/go-redis/v9/internal/hscan" + "github.com/redis/go-redis/v9/internal/otel" "github.com/redis/go-redis/v9/internal/pool" "github.com/redis/go-redis/v9/internal/proto" + "github.com/redis/go-redis/v9/maintnotifications" + "github.com/redis/go-redis/v9/push" ) // Scanner internal/hscan.Scanner exposed interface. @@ -22,10 +27,20 @@ type Scanner = hscan.Scanner const Nil = proto.Nil // SetLogger set custom log +// Use with VoidLogger to disable logging. +// If logger is nil, the call is ignored and the existing logger is kept. func SetLogger(logger internal.Logging) { + if logger == nil { + return + } internal.Logger = logger } +// SetLogLevel sets the log level for the library. +func SetLogLevel(logLevel internal.LogLevelT) { + internal.LogLevel = logLevel +} + //------------------------------------------------------------------------------ type Hook interface { @@ -41,7 +56,7 @@ type ( ) type hooksMixin struct { - hooksMu *sync.Mutex + hooksMu *sync.RWMutex slice []Hook initial hooks @@ -49,7 +64,7 @@ type hooksMixin struct { } func (hs *hooksMixin) initHooks(hooks hooks) { - hs.hooksMu = new(sync.Mutex) + hs.hooksMu = new(sync.RWMutex) hs.initial = hooks hs.chain() } @@ -151,7 +166,7 @@ func (hs *hooksMixin) clone() hooksMixin { clone := *hs l := len(clone.slice) clone.slice = clone.slice[:l:l] - clone.hooksMu = new(sync.Mutex) + clone.hooksMu = new(sync.RWMutex) return clone } @@ -176,9 +191,14 @@ func (hs *hooksMixin) withProcessPipelineHook( } func (hs *hooksMixin) dialHook(ctx context.Context, network, addr string) (net.Conn, error) { - hs.hooksMu.Lock() - defer hs.hooksMu.Unlock() - return hs.current.dial(ctx, network, addr) + // Access to hs.current is guarded by a read-only lock since it may be mutated by AddHook(...) + // while this dialer is concurrently accessed by the background connection pool population + // routine when MinIdleConns > 0. + hs.hooksMu.RLock() + current := hs.current + hs.hooksMu.RUnlock() + + return current.dial(ctx, network, addr) } func (hs *hooksMixin) processHook(ctx context.Context, cmd Cmder) error { @@ -196,15 +216,40 @@ func (hs *hooksMixin) processTxPipelineHook(ctx context.Context, cmds []Cmder) e //------------------------------------------------------------------------------ type baseClient struct { - opt *Options - connPool pool.Pooler + opt *Options + optLock sync.RWMutex + connPool pool.Pooler + pubSubPool *pool.PubSubPool + hooksMixin onClose func() error // hook called when client is closed + + // Push notification processing + pushProcessor push.NotificationProcessor + + // Maintenance notifications manager + maintNotificationsManager *maintnotifications.Manager + maintNotificationsManagerLock sync.RWMutex + + // streamingCredentialsManager is used to manage streaming credentials + streamingCredentialsManager *streaming.Manager } func (c *baseClient) clone() *baseClient { - clone := *c - return &clone + c.maintNotificationsManagerLock.RLock() + maintNotificationsManager := c.maintNotificationsManager + c.maintNotificationsManagerLock.RUnlock() + + clone := &baseClient{ + opt: c.opt, + connPool: c.connPool, + pubSubPool: c.pubSubPool, + onClose: c.onClose, + pushProcessor: c.pushProcessor, + maintNotificationsManager: maintNotificationsManager, + streamingCredentialsManager: c.streamingCredentialsManager, + } + return clone } func (c *baseClient) withTimeout(timeout time.Duration) *baseClient { @@ -222,21 +267,6 @@ func (c *baseClient) String() string { return fmt.Sprintf("Redis<%s db:%d>", c.getAddr(), c.opt.DB) } -func (c *baseClient) newConn(ctx context.Context) (*pool.Conn, error) { - cn, err := c.connPool.NewConn(ctx) - if err != nil { - return nil, err - } - - err = c.initConn(ctx, cn) - if err != nil { - _ = c.connPool.CloseConn(cn) - return nil, err - } - - return cn, nil -} - func (c *baseClient) getConn(ctx context.Context) (*pool.Conn, error) { if c.opt.Limiter != nil { err := c.opt.Limiter.Allow() @@ -262,7 +292,7 @@ func (c *baseClient) _getConn(ctx context.Context) (*pool.Conn, error) { return nil, err } - if cn.Inited { + if cn.IsInited() { return cn, nil } @@ -274,35 +304,206 @@ func (c *baseClient) _getConn(ctx context.Context) (*pool.Conn, error) { return nil, err } + if dialStartNs := cn.GetDialStartNs(); dialStartNs > 0 { + if cb := pool.GetMetricConnectionCreateTimeCallback(); cb != nil { + duration := time.Duration(time.Now().UnixNano() - dialStartNs) + cb(ctx, duration, cn) + } + } + + // initConn will transition to IDLE state, so we need to acquire it + // before returning it to the user. + if !cn.TryAcquire() { + return nil, fmt.Errorf("redis: connection is not usable") + } + return cn, nil } +func (c *baseClient) reAuthConnection() func(poolCn *pool.Conn, credentials auth.Credentials) error { + return func(poolCn *pool.Conn, credentials auth.Credentials) error { + var err error + username, password := credentials.BasicAuth() + + // Use background context - timeout is handled by ReadTimeout in WithReader/WithWriter + ctx := context.Background() + + connPool := pool.NewSingleConnPool(c.connPool, poolCn) + + // Pass hooks so that reauth commands are recorded/traced + cn := newConn(c.opt, connPool, &c.hooksMixin) + + if username != "" { + err = cn.AuthACL(ctx, username, password).Err() + } else { + err = cn.Auth(ctx, password).Err() + } + + return err + } +} +func (c *baseClient) onAuthenticationErr() func(poolCn *pool.Conn, err error) { + return func(poolCn *pool.Conn, err error) { + if err != nil { + if isBadConn(err, false, c.opt.Addr) { + // Close the connection to force a reconnection. + err := c.connPool.CloseConn(poolCn) + if err != nil { + internal.Logger.Printf(context.Background(), "redis: failed to close connection: %v", err) + // try to close the network connection directly + // so that no resource is leaked + err := poolCn.Close() + if err != nil { + internal.Logger.Printf(context.Background(), "redis: failed to close network connection: %v", err) + } + } + } + internal.Logger.Printf(context.Background(), "redis: re-authentication failed: %v", err) + } + } +} + +func (c *baseClient) wrappedOnClose(newOnClose func() error) func() error { + onClose := c.onClose + return func() error { + var firstErr error + err := newOnClose() + // Even if we have an error we would like to execute the onClose hook + // if it exists. We will return the first error that occurred. + // This is to keep error handling consistent with the rest of the code. + if err != nil { + firstErr = err + } + if onClose != nil { + err = onClose() + if err != nil && firstErr == nil { + firstErr = err + } + } + return firstErr + } +} + func (c *baseClient) initConn(ctx context.Context, cn *pool.Conn) error { - if cn.Inited { + // This function is called in two scenarios: + // 1. First-time init: Connection is in CREATED state (from pool.Get()) + // - We need to transition CREATED → INITIALIZING and do the initialization + // - If another goroutine is already initializing, we WAIT for it to finish + // 2. Re-initialization: Connection is in INITIALIZING state (from SetNetConnAndInitConn()) + // - We're already in INITIALIZING, so just proceed with initialization + + currentState := cn.GetStateMachine().GetState() + + // Fast path: Check if already initialized (IDLE or IN_USE) + if currentState == pool.StateIdle || currentState == pool.StateInUse { return nil } - cn.Inited = true - username, password := c.opt.Username, c.opt.Password - if c.opt.CredentialsProvider != nil { - username, password = c.opt.CredentialsProvider() + // If in CREATED state, try to transition to INITIALIZING + if currentState == pool.StateCreated { + finalState, err := cn.GetStateMachine().TryTransition([]pool.ConnState{pool.StateCreated}, pool.StateInitializing) + if err != nil { + // Another goroutine is initializing or connection is in unexpected state + // Check what state we're in now + if finalState == pool.StateIdle || finalState == pool.StateInUse { + // Already initialized by another goroutine + return nil + } + + if finalState == pool.StateInitializing { + // Another goroutine is initializing - WAIT for it to complete + // Use a context with timeout = min(remaining command timeout, DialTimeout) + // This prevents waiting too long while respecting the caller's deadline + var waitCtx context.Context + var cancel context.CancelFunc + dialTimeout := c.opt.DialTimeout + + if cmdDeadline, hasCmdDeadline := ctx.Deadline(); hasCmdDeadline { + // Calculate remaining time until command deadline + remainingTime := time.Until(cmdDeadline) + // Use the minimum of remaining time and DialTimeout + if remainingTime < dialTimeout { + // Command deadline is sooner, use it + waitCtx = ctx + } else { + // DialTimeout is shorter, cap the wait at DialTimeout + waitCtx, cancel = context.WithTimeout(ctx, dialTimeout) + } + } else { + // No command deadline, use DialTimeout to prevent waiting indefinitely + waitCtx, cancel = context.WithTimeout(ctx, dialTimeout) + } + if cancel != nil { + defer cancel() + } + + finalState, err := cn.GetStateMachine().AwaitAndTransition( + waitCtx, + []pool.ConnState{pool.StateIdle, pool.StateInUse}, + pool.StateIdle, // Target is IDLE (but we're already there, so this is a no-op) + ) + if err != nil { + return err + } + // Verify we're now initialized + if finalState == pool.StateIdle || finalState == pool.StateInUse { + return nil + } + // Unexpected state after waiting + return fmt.Errorf("connection in unexpected state after initialization: %s", finalState) + } + + // Unexpected state (CLOSED, UNUSABLE, etc.) + return err + } } + // At this point, we're in INITIALIZING state and we own the initialization + // If we fail, we must transition to CLOSED + var initErr error connPool := pool.NewSingleConnPool(c.connPool, cn) - conn := newConn(c.opt, connPool) + conn := newConn(c.opt, connPool, &c.hooksMixin) - var auth bool - protocol := c.opt.Protocol - // By default, use RESP3 in current version. - if protocol < 2 { - protocol = 3 + username, password := "", "" + if c.opt.StreamingCredentialsProvider != nil { + credListener, initErr := c.streamingCredentialsManager.Listener( + cn, + c.reAuthConnection(), + c.onAuthenticationErr(), + ) + if initErr != nil { + cn.GetStateMachine().Transition(pool.StateClosed) + return fmt.Errorf("failed to create credentials listener: %w", initErr) + } + + credentials, unsubscribeFromCredentialsProvider, initErr := c.opt.StreamingCredentialsProvider. + Subscribe(credListener) + if initErr != nil { + cn.GetStateMachine().Transition(pool.StateClosed) + return fmt.Errorf("failed to subscribe to streaming credentials: %w", initErr) + } + + c.onClose = c.wrappedOnClose(unsubscribeFromCredentialsProvider) + cn.SetOnClose(unsubscribeFromCredentialsProvider) + + username, password = credentials.BasicAuth() + } else if c.opt.CredentialsProviderContext != nil { + username, password, initErr = c.opt.CredentialsProviderContext(ctx) + if initErr != nil { + cn.GetStateMachine().Transition(pool.StateClosed) + return fmt.Errorf("failed to get credentials from context provider: %w", initErr) + } + } else if c.opt.CredentialsProvider != nil { + username, password = c.opt.CredentialsProvider() + } else if c.opt.Username != "" || c.opt.Password != "" { + username, password = c.opt.Username, c.opt.Password } // for redis-server versions that do not support the HELLO command, // RESP2 will continue to be used. - if err := conn.Hello(ctx, protocol, username, password, "").Err(); err == nil { - auth = true - } else if !isRedisError(err) { + if initErr = conn.Hello(ctx, c.opt.Protocol, username, password, c.opt.ClientName).Err(); initErr == nil { + // Authentication successful with HELLO command + } else if !isRedisError(initErr) { // When the server responds with the RESP protocol and the result is not a normal // execution result of the HELLO command, we consider it to be an indication that // the server does not support the HELLO command. @@ -310,28 +511,22 @@ func (c *baseClient) initConn(ctx context.Context, cn *pool.Conn) error { // or it could be DragonflyDB or a third-party redis-proxy. They all respond // with different error string results for unsupported commands, making it // difficult to rely on error strings to determine all results. - return err - } - if !c.opt.DisableIndentity { - libName := "" - libVer := Version() - if c.opt.IdentitySuffix != "" { - libName = c.opt.IdentitySuffix + cn.GetStateMachine().Transition(pool.StateClosed) + return initErr + } else if password != "" { + // Try legacy AUTH command if HELLO failed + if username != "" { + initErr = conn.AuthACL(ctx, username, password).Err() + } else { + initErr = conn.Auth(ctx, password).Err() } - libInfo := LibraryInfo{LibName: &libName} - conn.ClientSetInfo(ctx, libInfo) - libInfo = LibraryInfo{LibVer: &libVer} - conn.ClientSetInfo(ctx, libInfo) - } - _, err := conn.Pipelined(ctx, func(pipe Pipeliner) error { - if !auth && password != "" { - if username != "" { - pipe.AuthACL(ctx, username, password) - } else { - pipe.Auth(ctx, password) - } + if initErr != nil { + cn.GetStateMachine().Transition(pool.StateClosed) + return fmt.Errorf("failed to authenticate: %w", initErr) } + } + _, initErr = conn.Pipelined(ctx, func(pipe Pipeliner) error { if c.opt.DB > 0 { pipe.Select(ctx, c.opt.DB) } @@ -346,13 +541,107 @@ func (c *baseClient) initConn(ctx context.Context, cn *pool.Conn) error { return nil }) - if err != nil { - return err + if initErr != nil { + cn.GetStateMachine().Transition(pool.StateClosed) + return fmt.Errorf("failed to initialize connection options: %w", initErr) } - if c.opt.OnConnect != nil { - return c.opt.OnConnect(ctx, conn) + // Enable maintnotifications if maintnotifications are configured + c.optLock.RLock() + maintNotifEnabled := c.opt.MaintNotificationsConfig != nil && c.opt.MaintNotificationsConfig.Mode != maintnotifications.ModeDisabled + protocol := c.opt.Protocol + var endpointType maintnotifications.EndpointType + if maintNotifEnabled { + endpointType = c.opt.MaintNotificationsConfig.EndpointType } + c.optLock.RUnlock() + var maintNotifHandshakeErr error + if maintNotifEnabled && protocol == 3 { + maintNotifHandshakeErr = conn.ClientMaintNotifications( + ctx, + true, + endpointType.String(), + ).Err() + if maintNotifHandshakeErr != nil { + if !isRedisError(maintNotifHandshakeErr) { + // if not redis error, fail the connection + cn.GetStateMachine().Transition(pool.StateClosed) + return maintNotifHandshakeErr + } + c.optLock.Lock() + // handshake failed - check and modify config atomically + switch c.opt.MaintNotificationsConfig.Mode { + case maintnotifications.ModeEnabled: + // enabled mode, fail the connection + c.optLock.Unlock() + cn.GetStateMachine().Transition(pool.StateClosed) + + // Record handshake failure metric + if errorCallback := pool.GetMetricErrorCallback(); errorCallback != nil { + errorCallback(ctx, "HANDSHAKE_FAILED", cn, "HANDSHAKE_FAILED", true, 0) + } + + return fmt.Errorf("failed to enable maintnotifications: %w", maintNotifHandshakeErr) + default: // will handle auto and any other + // Disabling logging here as it's too noisy. + // TODO: Enable when we have a better logging solution for log levels + // internal.Logger.Printf(ctx, "auto mode fallback: maintnotifications disabled due to handshake error: %v", maintNotifHandshakeErr) + c.opt.MaintNotificationsConfig.Mode = maintnotifications.ModeDisabled + c.optLock.Unlock() + // auto mode, disable maintnotifications and continue + if initErr := c.disableMaintNotificationsUpgrades(); initErr != nil { + // Log error but continue - auto mode should be resilient + internal.Logger.Printf(ctx, "failed to disable maintnotifications in auto mode: %v", initErr) + } + } + } else { + // handshake was executed successfully + // to make sure that the handshake will be executed on other connections as well if it was successfully + // executed on this connection, we will force the handshake to be executed on all connections + c.optLock.Lock() + c.opt.MaintNotificationsConfig.Mode = maintnotifications.ModeEnabled + c.optLock.Unlock() + } + } + + if !c.opt.DisableIdentity && !c.opt.DisableIndentity { + libName := "" + libVer := Version() + if c.opt.IdentitySuffix != "" { + libName = c.opt.IdentitySuffix + } + p := conn.Pipeline() + p.ClientSetInfo(ctx, WithLibraryName(libName)) + p.ClientSetInfo(ctx, WithLibraryVersion(libVer)) + // Handle network errors (e.g. timeouts) in CLIENT SETINFO to avoid + // out of order responses later on. + if _, initErr = p.Exec(ctx); initErr != nil && !isRedisError(initErr) { + cn.GetStateMachine().Transition(pool.StateClosed) + return initErr + } + } + + // Set the connection initialization function for potential reconnections + // This must be set before transitioning to IDLE so that handoff/reauth can use it + cn.SetInitConnFunc(c.createInitConnFunc()) + + // Initialization succeeded - transition to IDLE state + // This marks the connection as initialized and ready for use + // NOTE: The connection is still owned by the calling goroutine at this point + // and won't be available to other goroutines until it's Put() back into the pool + cn.GetStateMachine().Transition(pool.StateIdle) + + // Call OnConnect hook if configured + // The connection is in IDLE state but still owned by this goroutine + // If OnConnect needs to send commands, it can use the connection safely + if c.opt.OnConnect != nil { + if initErr = c.opt.OnConnect(ctx, conn); initErr != nil { + // OnConnect failed - transition to closed + cn.GetStateMachine().Transition(pool.StateClosed) + return initErr + } + } + return nil } @@ -364,6 +653,10 @@ func (c *baseClient) releaseConn(ctx context.Context, cn *pool.Conn, err error) if isBadConn(err, false, c.opt.Addr) { c.connPool.Remove(ctx, cn, err) } else { + // process any pending push notifications before returning the connection to the pool + if err := c.processPushNotifications(ctx, cn); err != nil { + internal.Logger.Printf(ctx, "push: error processing pending notifications before releasing connection: %v", err) + } c.connPool.Put(ctx, cn) } } @@ -391,37 +684,172 @@ func (c *baseClient) dial(ctx context.Context, network, addr string) (net.Conn, } func (c *baseClient) process(ctx context.Context, cmd Cmder) error { + // Start measuring total operation duration (includes all retries) + // Only call time.Now() if operation duration callback is set to avoid overhead + var operationStart time.Time + opDurationCallback := otel.GetOperationDurationCallback() + if opDurationCallback != nil { + operationStart = time.Now() + } + var lastConn *pool.Conn + var lastErr error + totalAttempts := 0 for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ { + totalAttempts++ attempt := attempt - retry, err := c._process(ctx, cmd, attempt) + retry, cn, err := c._process(ctx, cmd, attempt) + if cn != nil { + lastConn = cn + } if err == nil || !retry { + // Record total operation duration + if opDurationCallback != nil { + operationDuration := time.Since(operationStart) + opDurationCallback(ctx, operationDuration, cmd, totalAttempts, err, lastConn, c.opt.DB) + } + + if err != nil { + if errorCallback := pool.GetMetricErrorCallback(); errorCallback != nil { + errorType, statusCode, isInternal := classifyCommandError(err) + errorCallback(ctx, errorType, lastConn, statusCode, isInternal, totalAttempts-1) + } + } return err } lastErr = err } + + // Record failed operation after all retries + if opDurationCallback != nil { + operationDuration := time.Since(operationStart) + opDurationCallback(ctx, operationDuration, cmd, totalAttempts, lastErr, lastConn, c.opt.DB) + } + + // Record error metric for exhausted retries + if errorCallback := pool.GetMetricErrorCallback(); errorCallback != nil { + errorType, statusCode, isInternal := classifyCommandError(lastErr) + errorCallback(ctx, errorType, lastConn, statusCode, isInternal, totalAttempts-1) + } + return lastErr } -func (c *baseClient) _process(ctx context.Context, cmd Cmder, attempt int) (bool, error) { - if attempt > 0 { - if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { - return false, err +// classifyCommandError classifies an error for metrics reporting. +// Returns: errorType, statusCode, isInternal +// - errorType: A string describing the error type (e.g., "TIMEOUT", "NETWORK", "ERR") +// - statusCode: The Redis error prefix or error category +// - isInternal: true for network/timeout errors, false for Redis server errors +func classifyCommandError(err error) (errorType, statusCode string, isInternal bool) { + if err == nil { + return "", "", false + } + + errStr := err.Error() + + // Check for timeout errors + if netErr, ok := err.(net.Error); ok && netErr.Timeout() { + return "TIMEOUT", "TIMEOUT", true + } + + // Check for network errors + if _, ok := err.(net.Error); ok { + return "NETWORK", "NETWORK", true + } + + // Check for context errors + if errors.Is(err, context.Canceled) { + return "CONTEXT_CANCELED", "CONTEXT_CANCELED", true + } + if errors.Is(err, context.DeadlineExceeded) { + return "CONTEXT_TIMEOUT", "CONTEXT_TIMEOUT", true + } + + // Check for Redis errors + // Examples: "ERR ...", "WRONGTYPE ...", "CLUSTERDOWN ..." + if len(errStr) > 0 { + // Find the first space to extract the prefix + spaceIdx := 0 + for i, c := range errStr { + if c == ' ' { + spaceIdx = i + break + } + } + if spaceIdx == 0 { + spaceIdx = len(errStr) + } + prefix := errStr[:spaceIdx] + isUppercase := true + for _, c := range prefix { + if c < 'A' || c > 'Z' { + isUppercase = false + break + } + } + if isUppercase && len(prefix) > 0 { + return prefix, prefix, false } } + return "UNKNOWN", "UNKNOWN", true +} + +func (c *baseClient) assertUnstableCommand(cmd Cmder) (bool, error) { + switch cmd.(type) { + case *AggregateCmd, *FTInfoCmd, *FTSpellCheckCmd, *FTSearchCmd, *FTSynDumpCmd: + if c.opt.UnstableResp3 { + return true, nil + } else { + return false, fmt.Errorf("RESP3 responses for this command are disabled because they may still change. Please set the flag UnstableResp3. See the README and the release notes for guidance") + } + default: + return false, nil + } +} + +func (c *baseClient) _process(ctx context.Context, cmd Cmder, attempt int) (bool, *pool.Conn, error) { + if attempt > 0 { + if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { + return false, nil, err + } + } + + var usedConn *pool.Conn retryTimeout := uint32(0) if err := c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error { + usedConn = cn + // Process any pending push notifications before executing the command + if err := c.processPushNotifications(ctx, cn); err != nil { + internal.Logger.Printf(ctx, "push: error processing pending notifications before command: %v", err) + } + if err := cn.WithWriter(c.context(ctx), c.opt.WriteTimeout, func(wr *proto.Writer) error { return writeCmd(wr, cmd) }); err != nil { atomic.StoreUint32(&retryTimeout, 1) return err } - - if err := cn.WithReader(c.context(ctx), c.cmdTimeout(cmd), cmd.readReply); err != nil { + readReplyFunc := cmd.readReply + // Apply unstable RESP3 search module. + if c.opt.Protocol != 2 { + useRawReply, err := c.assertUnstableCommand(cmd) + if err != nil { + return err + } + if useRawReply { + readReplyFunc = cmd.readRawReply + } + } + if err := cn.WithReader(c.context(ctx), c.cmdTimeout(cmd), func(rd *proto.Reader) error { + // To be sure there are no buffered push notifications, we process them before reading the reply + if err := c.processPendingPushNotificationWithReader(ctx, cn, rd); err != nil { + internal.Logger.Printf(ctx, "push: error processing pending notifications before reading reply: %v", err) + } + return readReplyFunc(rd) + }); err != nil { if cmd.readTimeout() == nil { atomic.StoreUint32(&retryTimeout, 1) } else { @@ -433,10 +861,10 @@ func (c *baseClient) _process(ctx context.Context, cmd Cmder, attempt int) (bool return nil }); err != nil { retry := shouldRetry(err, atomic.LoadUint32(&retryTimeout) == 1) - return retry, err + return retry, usedConn, err } - return false, nil + return false, usedConn, nil } func (c *baseClient) retryBackoff(attempt int) time.Duration { @@ -454,19 +882,90 @@ func (c *baseClient) cmdTimeout(cmd Cmder) time.Duration { return c.opt.ReadTimeout } +// context returns the context for the current connection. +// If the context timeout is enabled, it returns the original context. +// Otherwise, it returns a new background context. +func (c *baseClient) context(ctx context.Context) context.Context { + if c.opt.ContextTimeoutEnabled { + return ctx + } + return context.Background() +} + +// createInitConnFunc creates a connection initialization function that can be used for reconnections. +func (c *baseClient) createInitConnFunc() func(context.Context, *pool.Conn) error { + return func(ctx context.Context, cn *pool.Conn) error { + return c.initConn(ctx, cn) + } +} + +// enableMaintNotificationsUpgrades initializes the maintnotifications upgrade manager and pool hook. +// This function is called during client initialization. +// will register push notification handlers for all maintenance upgrade events. +// will start background workers for handoff processing in the pool hook. +func (c *baseClient) enableMaintNotificationsUpgrades() error { + // Create client adapter + clientAdapterInstance := newClientAdapter(c) + + // Create maintnotifications manager directly + manager, err := maintnotifications.NewManager(clientAdapterInstance, c.connPool, c.opt.MaintNotificationsConfig) + if err != nil { + return err + } + // Set the manager reference and initialize pool hook + c.maintNotificationsManagerLock.Lock() + c.maintNotificationsManager = manager + c.maintNotificationsManagerLock.Unlock() + + // Initialize pool hook (safe to call without lock since manager is now set) + manager.InitPoolHook(c.dialHook) + return nil +} + +func (c *baseClient) disableMaintNotificationsUpgrades() error { + c.maintNotificationsManagerLock.Lock() + defer c.maintNotificationsManagerLock.Unlock() + + // Close the maintnotifications manager + if c.maintNotificationsManager != nil { + // Closing the manager will also shutdown the pool hook + // and remove it from the pool + c.maintNotificationsManager.Close() + c.maintNotificationsManager = nil + } + return nil +} + // Close closes the client, releasing any open resources. // // It is rare to Close a Client, as the Client is meant to be // long-lived and shared between many goroutines. func (c *baseClient) Close() error { var firstErr error + + // Close maintnotifications manager first + if err := c.disableMaintNotificationsUpgrades(); err != nil { + firstErr = err + } + if c.onClose != nil { - if err := c.onClose(); err != nil { + if err := c.onClose(); err != nil && firstErr == nil { firstErr = err } } - if err := c.connPool.Close(); err != nil && firstErr == nil { - firstErr = err + + // Unregister pools from OTel before closing them + otel.UnregisterPools(c.connPool, c.pubSubPool) + + if c.connPool != nil { + if err := c.connPool.Close(); err != nil && firstErr == nil { + firstErr = err + } + } + if c.pubSubPool != nil { + if err := c.pubSubPool.Close(); err != nil && firstErr == nil { + firstErr = err + } } return firstErr } @@ -476,14 +975,14 @@ func (c *baseClient) getAddr() string { } func (c *baseClient) processPipeline(ctx context.Context, cmds []Cmder) error { - if err := c.generalProcessPipeline(ctx, cmds, c.pipelineProcessCmds); err != nil { + if err := c.generalProcessPipeline(ctx, cmds, c.pipelineProcessCmds, "PIPELINE"); err != nil { return err } return cmdsFirstErr(cmds) } func (c *baseClient) processTxPipeline(ctx context.Context, cmds []Cmder) error { - if err := c.generalProcessPipeline(ctx, cmds, c.txPipelineProcessCmds); err != nil { + if err := c.generalProcessPipeline(ctx, cmds, c.txPipelineProcessCmds, "MULTI"); err != nil { return err } return cmdsFirstErr(cmds) @@ -492,13 +991,27 @@ func (c *baseClient) processTxPipeline(ctx context.Context, cmds []Cmder) error type pipelineProcessor func(context.Context, *pool.Conn, []Cmder) (bool, error) func (c *baseClient) generalProcessPipeline( - ctx context.Context, cmds []Cmder, p pipelineProcessor, + ctx context.Context, cmds []Cmder, p pipelineProcessor, operationName string, ) error { + // Only call time.Now() if pipeline operation duration callback is set to avoid overhead + var operationStart time.Time + pipelineOpDurationCallback := otel.GetPipelineOperationDurationCallback() + if pipelineOpDurationCallback != nil { + operationStart = time.Now() + } + var lastConn *pool.Conn + totalAttempts := 0 + var lastErr error for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ { + totalAttempts++ if attempt > 0 { if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { setCmdsErr(cmds, err) + if pipelineOpDurationCallback != nil { + operationDuration := time.Since(operationStart) + pipelineOpDurationCallback(ctx, operationDuration, operationName, len(cmds), totalAttempts, err, lastConn, c.opt.DB) + } return err } } @@ -506,20 +1019,56 @@ func (c *baseClient) generalProcessPipeline( // Enable retries by default to retry dial errors returned by withConn. canRetry := true lastErr = c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error { + lastConn = cn + // Process any pending push notifications before executing the pipeline + if err := c.processPushNotifications(ctx, cn); err != nil { + internal.Logger.Printf(ctx, "push: error processing pending notifications before processing pipeline: %v", err) + } var err error canRetry, err = p(ctx, cn, cmds) return err }) if lastErr == nil || !canRetry || !shouldRetry(lastErr, true) { + // The error should be set here only when failing to obtain the conn. + if !isRedisError(lastErr) { + setCmdsErr(cmds, lastErr) + } + if pipelineOpDurationCallback != nil { + operationDuration := time.Since(operationStart) + pipelineOpDurationCallback(ctx, operationDuration, operationName, len(cmds), totalAttempts, lastErr, lastConn, c.opt.DB) + } + + if lastErr != nil { + if errorCallback := pool.GetMetricErrorCallback(); errorCallback != nil { + errorType, statusCode, isInternal := classifyCommandError(lastErr) + errorCallback(ctx, errorType, lastConn, statusCode, isInternal, totalAttempts-1) + } + } return lastErr } } + + if pipelineOpDurationCallback != nil { + operationDuration := time.Since(operationStart) + pipelineOpDurationCallback(ctx, operationDuration, operationName, len(cmds), totalAttempts, lastErr, lastConn, c.opt.DB) + } + + if errorCallback := pool.GetMetricErrorCallback(); errorCallback != nil { + errorType, statusCode, isInternal := classifyCommandError(lastErr) + errorCallback(ctx, errorType, lastConn, statusCode, isInternal, totalAttempts-1) + } + return lastErr } func (c *baseClient) pipelineProcessCmds( ctx context.Context, cn *pool.Conn, cmds []Cmder, ) (bool, error) { + // Process any pending push notifications before executing the pipeline + if err := c.processPushNotifications(ctx, cn); err != nil { + internal.Logger.Printf(ctx, "push: error processing pending notifications before writing pipeline: %v", err) + } + if err := cn.WithWriter(c.context(ctx), c.opt.WriteTimeout, func(wr *proto.Writer) error { return writeCmds(wr, cmds) }); err != nil { @@ -528,7 +1077,8 @@ func (c *baseClient) pipelineProcessCmds( } if err := cn.WithReader(c.context(ctx), c.opt.ReadTimeout, func(rd *proto.Reader) error { - return pipelineReadCmds(rd, cmds) + // read all replies + return c.pipelineReadCmds(ctx, cn, rd, cmds) }); err != nil { return true, err } @@ -536,8 +1086,12 @@ func (c *baseClient) pipelineProcessCmds( return false, nil } -func pipelineReadCmds(rd *proto.Reader, cmds []Cmder) error { +func (c *baseClient) pipelineReadCmds(ctx context.Context, cn *pool.Conn, rd *proto.Reader, cmds []Cmder) error { for i, cmd := range cmds { + // To be sure there are no buffered push notifications, we process them before reading the reply + if err := c.processPendingPushNotificationWithReader(ctx, cn, rd); err != nil { + internal.Logger.Printf(ctx, "push: error processing pending notifications before reading reply: %v", err) + } err := cmd.readReply(rd) cmd.SetErr(err) if err != nil && !isRedisError(err) { @@ -552,6 +1106,11 @@ func pipelineReadCmds(rd *proto.Reader, cmds []Cmder) error { func (c *baseClient) txPipelineProcessCmds( ctx context.Context, cn *pool.Conn, cmds []Cmder, ) (bool, error) { + // Process any pending push notifications before executing the transaction pipeline + if err := c.processPushNotifications(ctx, cn); err != nil { + internal.Logger.Printf(ctx, "push: error processing pending notifications before transaction: %v", err) + } + if err := cn.WithWriter(c.context(ctx), c.opt.WriteTimeout, func(wr *proto.Writer) error { return writeCmds(wr, cmds) }); err != nil { @@ -564,12 +1123,13 @@ func (c *baseClient) txPipelineProcessCmds( // Trim multi and exec. trimmedCmds := cmds[1 : len(cmds)-1] - if err := txPipelineReadQueued(rd, statusCmd, trimmedCmds); err != nil { + if err := c.txPipelineReadQueued(ctx, cn, rd, statusCmd, trimmedCmds); err != nil { setCmdsErr(cmds, err) return err } - return pipelineReadCmds(rd, trimmedCmds) + // Read replies. + return c.pipelineReadCmds(ctx, cn, rd, trimmedCmds) }); err != nil { return false, err } @@ -577,19 +1137,36 @@ func (c *baseClient) txPipelineProcessCmds( return false, nil } -func txPipelineReadQueued(rd *proto.Reader, statusCmd *StatusCmd, cmds []Cmder) error { +// txPipelineReadQueued reads queued replies from the Redis server. +// It returns an error if the server returns an error or if the number of replies does not match the number of commands. +func (c *baseClient) txPipelineReadQueued(ctx context.Context, cn *pool.Conn, rd *proto.Reader, statusCmd *StatusCmd, cmds []Cmder) error { + // To be sure there are no buffered push notifications, we process them before reading the reply + if err := c.processPendingPushNotificationWithReader(ctx, cn, rd); err != nil { + internal.Logger.Printf(ctx, "push: error processing pending notifications before reading reply: %v", err) + } // Parse +OK. if err := statusCmd.readReply(rd); err != nil { return err } // Parse +QUEUED. - for range cmds { - if err := statusCmd.readReply(rd); err != nil && !isRedisError(err) { - return err + for _, cmd := range cmds { + // To be sure there are no buffered push notifications, we process them before reading the reply + if err := c.processPendingPushNotificationWithReader(ctx, cn, rd); err != nil { + internal.Logger.Printf(ctx, "push: error processing pending notifications before reading reply: %v", err) + } + if err := statusCmd.readReply(rd); err != nil { + cmd.SetErr(err) + if !isRedisError(err) { + return err + } } } + // To be sure there are no buffered push notifications, we process them before reading the reply + if err := c.processPendingPushNotificationWithReader(ctx, cn, rd); err != nil { + internal.Logger.Printf(ctx, "push: error processing pending notifications before reading reply: %v", err) + } // Parse number of replies. line, err := rd.ReadLine() if err != nil { @@ -606,13 +1183,6 @@ func txPipelineReadQueued(rd *proto.Reader, statusCmd *StatusCmd, cmds []Cmder) return nil } -func (c *baseClient) context(ctx context.Context) context.Context { - if c.opt.ContextTimeoutEnabled { - return ctx - } - return context.Background() -} - //------------------------------------------------------------------------------ // Client is a Redis client representing a pool of zero or more underlying connections. @@ -623,20 +1193,77 @@ func (c *baseClient) context(ctx context.Context) context.Context { type Client struct { *baseClient cmdable - hooksMixin } // NewClient returns a client to the Redis Server specified by Options. func NewClient(opt *Options) *Client { + if opt == nil { + panic("redis: NewClient nil options") + } + // clone to not share options with the caller + opt = opt.clone() opt.init() + // Push notifications are always enabled for RESP3 (cannot be disabled) + c := Client{ baseClient: &baseClient{ opt: opt, }, } c.init() - c.connPool = newConnPool(opt, c.dialHook) + + // Initialize push notification processor using shared helper + // Use void processor for RESP2 connections (push notifications not available) + c.pushProcessor = initializePushProcessor(opt) + // set opt push processor for child clients + c.opt.PushNotificationProcessor = c.pushProcessor + + // Generate unique pool names for metrics + uniqueID := generateUniqueID() + mainPoolName := opt.Addr + "_" + uniqueID + pubsubPoolName := opt.Addr + "_" + uniqueID + "_pubsub" + + // Create connection pools + var err error + c.connPool, err = newConnPool(opt, c.dialHook, mainPoolName) + if err != nil { + panic(fmt.Errorf("redis: failed to create connection pool: %w", err)) + } + c.pubSubPool, err = newPubSubPool(opt, c.dialHook, pubsubPoolName) + if err != nil { + panic(fmt.Errorf("redis: failed to create pubsub pool: %w", err)) + } + + if opt.StreamingCredentialsProvider != nil { + c.streamingCredentialsManager = streaming.NewManager(c.connPool, c.opt.PoolTimeout) + c.connPool.AddPoolHook(c.streamingCredentialsManager.PoolHook()) + } + + // Initialize maintnotifications first if enabled and protocol is RESP3 + if opt.MaintNotificationsConfig != nil && opt.MaintNotificationsConfig.Mode != maintnotifications.ModeDisabled && opt.Protocol == 3 { + err := c.enableMaintNotificationsUpgrades() + if err != nil { + internal.Logger.Printf(context.Background(), "failed to initialize maintnotifications: %v", err) + if opt.MaintNotificationsConfig.Mode == maintnotifications.ModeEnabled { + /* + Design decision: panic here to fail fast if maintnotifications cannot be enabled when explicitly requested. + We choose to panic instead of returning an error to avoid breaking the existing client API, which does not expect + an error from NewClient. This ensures that misconfiguration or critical initialization failures are surfaced + immediately, rather than allowing the client to continue in a partially initialized or inconsistent state. + Clients relying on maintnotifications should be aware that initialization errors will cause a panic, and should + handle this accordingly (e.g., via recover or by validating configuration before calling NewClient). + This approach is only used when MaintNotificationsConfig.Mode is MaintNotificationsEnabled, indicating that maintnotifications + upgrades are required for correct operation. In other modes, initialization failures are logged but do not panic. + */ + panic(fmt.Errorf("failed to enable maintnotifications: %w", err)) + } + } + } + + // Register pools with OTel recorder if it supports pool registration + // This allows async gauge metrics to pull stats from pools periodically + otel.RegisterPools(c.connPool, c.pubSubPool, opt.Addr) return &c } @@ -659,14 +1286,7 @@ func (c *Client) WithTimeout(timeout time.Duration) *Client { } func (c *Client) Conn() *Conn { - return newConn(c.opt, pool.NewStickyConnPool(c.connPool)) -} - -// Do create a Cmd from the args and processes the cmd. -func (c *Client) Do(ctx context.Context, args ...interface{}) *Cmd { - cmd := NewCmd(ctx, args...) - _ = c.Process(ctx, cmd) - return cmd + return newConn(c.opt, pool.NewStickyConnPool(c.connPool), &c.hooksMixin) } func (c *Client) Process(ctx context.Context, cmd Cmder) error { @@ -680,11 +1300,61 @@ func (c *Client) Options() *Options { return c.opt } +// NodeAddress returns the address of the Redis node as reported by the server. +// For cluster clients, this is the endpoint from CLUSTER SLOTS before any transformation +// (e.g., loopback replacement). For standalone clients, this defaults to Addr. +// +// This is useful for matching the source field in maintenance notifications +// (e.g. SMIGRATED). +func (c *Client) NodeAddress() string { + return c.opt.NodeAddress +} + +// GetMaintNotificationsManager returns the maintnotifications manager instance for monitoring and control. +// Returns nil if maintnotifications are not enabled. +func (c *Client) GetMaintNotificationsManager() *maintnotifications.Manager { + c.maintNotificationsManagerLock.RLock() + defer c.maintNotificationsManagerLock.RUnlock() + return c.maintNotificationsManager +} + +// initializePushProcessor initializes the push notification processor for any client type. +// This is a shared helper to avoid duplication across NewClient, NewFailoverClient, and NewSentinelClient. +func initializePushProcessor(opt *Options) push.NotificationProcessor { + // Always use custom processor if provided + if opt.PushNotificationProcessor != nil { + return opt.PushNotificationProcessor + } + + // Push notifications are always enabled for RESP3, disabled for RESP2 + if opt.Protocol == 3 { + // Create default processor for RESP3 connections + return NewPushNotificationProcessor() + } + + // Create void processor for RESP2 connections (push notifications not available) + return NewVoidPushNotificationProcessor() +} + +// RegisterPushNotificationHandler registers a handler for a specific push notification name. +// Returns an error if a handler is already registered for this push notification name. +// If protected is true, the handler cannot be unregistered. +func (c *Client) RegisterPushNotificationHandler(pushNotificationName string, handler push.NotificationHandler, protected bool) error { + return c.pushProcessor.RegisterHandler(pushNotificationName, handler, protected) +} + +// GetPushNotificationHandler returns the handler for a specific push notification name. +// Returns nil if no handler is registered for the given name. +func (c *Client) GetPushNotificationHandler(pushNotificationName string) push.NotificationHandler { + return c.pushProcessor.GetHandler(pushNotificationName) +} + type PoolStats pool.Stats // PoolStats returns connection pool stats. func (c *Client) PoolStats() *PoolStats { stats := c.connPool.Stats() + stats.PubSubStats = *(c.pubSubPool.Stats()) return (*PoolStats)(stats) } @@ -719,13 +1389,31 @@ func (c *Client) TxPipeline() Pipeliner { func (c *Client) pubSub() *PubSub { pubsub := &PubSub{ opt: c.opt, - - newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) { - return c.newConn(ctx) + newConn: func(ctx context.Context, addr string, channels []string) (*pool.Conn, error) { + cn, err := c.pubSubPool.NewConn(ctx, c.opt.Network, addr, channels) + if err != nil { + return nil, err + } + // will return nil if already initialized + err = c.initConn(ctx, cn) + if err != nil { + _ = cn.Close() + return nil, err + } + // Track connection in PubSubPool + c.pubSubPool.TrackConn(cn) + return cn, nil }, - closeConn: c.connPool.CloseConn, + closeConn: func(cn *pool.Conn) error { + // Untrack connection from PubSubPool + c.pubSubPool.UntrackConn(cn) + _ = cn.Close() + return nil + }, + pushProcessor: c.pushProcessor, } pubsub.init() + return pubsub } @@ -792,10 +1480,12 @@ type Conn struct { baseClient cmdable statefulCmdable - hooksMixin } -func newConn(opt *Options, connPool pool.Pooler) *Conn { +// newConn is a helper func to create a new Conn instance. +// the Conn instance is not thread-safe and should not be shared between goroutines. +// the parentHooks will be cloned, no need to clone before passing it. +func newConn(opt *Options, connPool pool.Pooler, parentHooks *hooksMixin) *Conn { c := Conn{ baseClient: baseClient{ opt: opt, @@ -803,6 +1493,14 @@ func newConn(opt *Options, connPool pool.Pooler) *Conn { }, } + if parentHooks != nil { + c.hooksMixin = parentHooks.clone() + } + + // Initialize push notification processor using shared helper + // Use void processor for RESP2 connections (push notifications not available) + c.pushProcessor = initializePushProcessor(opt) + c.cmdable = c.Process c.statefulCmdable = c.Process c.initHooks(hooks{ @@ -821,6 +1519,13 @@ func (c *Conn) Process(ctx context.Context, cmd Cmder) error { return err } +// RegisterPushNotificationHandler registers a handler for a specific push notification name. +// Returns an error if a handler is already registered for this push notification name. +// If protected is true, the handler cannot be unregistered. +func (c *Conn) RegisterPushNotificationHandler(pushNotificationName string, handler push.NotificationHandler, protected bool) error { + return c.pushProcessor.RegisterHandler(pushNotificationName, handler, protected) +} + func (c *Conn) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { return c.Pipeline().Pipelined(ctx, fn) } @@ -848,3 +1553,78 @@ func (c *Conn) TxPipeline() Pipeliner { pipe.init() return &pipe } + +// processPushNotifications processes all pending push notifications on a connection +// This ensures that cluster topology changes are handled immediately before the connection is used +// This method should be called by the client before using WithReader for command execution +// +// Performance optimization: Skip the expensive MaybeHasData() syscall if a health check +// was performed recently (within 5 seconds). The health check already verified the connection +// is healthy and checked for unexpected data (push notifications). +func (c *baseClient) processPushNotifications(ctx context.Context, cn *pool.Conn) error { + // Only process push notifications for RESP3 connections with a processor + if c.opt.Protocol != 3 || c.pushProcessor == nil { + return nil + } + + // Performance optimization: Skip MaybeHasData() syscall if health check was recent + // If the connection was health-checked within the last 5 seconds, we can skip the + // expensive syscall since the health check already verified no unexpected data. + // This is safe because: + // 0. lastHealthCheckNs is set in pool/conn.go:putConn() after a successful health check + // 1. Health check (connCheck) uses the same syscall (Recvfrom with MSG_PEEK) + // 2. If push notifications arrived, they would have been detected by health check + // 3. 5 seconds is short enough that connection state is still fresh + // 4. Push notifications will be processed by the next WithReader call + // used it is set on getConn, so we should use another timer (lastPutAt?) + lastHealthCheckNs := cn.LastPutAtNs() + if lastHealthCheckNs > 0 { + // Use pool's cached time to avoid expensive time.Now() syscall + nowNs := pool.GetCachedTimeNs() + if nowNs-lastHealthCheckNs < int64(5*time.Second) { + // Recent health check confirmed no unexpected data, skip the syscall + return nil + } + } + + // Check if there is any data to read before processing + // This is an optimization on UNIX systems where MaybeHasData is a syscall + // On Windows, MaybeHasData always returns true, so this check is a no-op + if !cn.MaybeHasData() { + return nil + } + + // Use WithReader to access the reader and process push notifications + // This is critical for maintnotifications to work properly + // NOTE: almost no timeouts are set for this read, so it should not block + // longer than necessary, 10us should be plenty of time to read if there are any push notifications + // on the socket. + return cn.WithReader(ctx, 10*time.Microsecond, func(rd *proto.Reader) error { + // Create handler context with client, connection pool, and connection information + handlerCtx := c.pushNotificationHandlerContext(cn) + return c.pushProcessor.ProcessPendingNotifications(ctx, handlerCtx, rd) + }) +} + +// processPendingPushNotificationWithReader processes all pending push notifications on a connection +// This method should be called by the client in WithReader before reading the reply +func (c *baseClient) processPendingPushNotificationWithReader(ctx context.Context, cn *pool.Conn, rd *proto.Reader) error { + // if we have the reader, we don't need to check for data on the socket, we are waiting + // for either a reply or a push notification, so we can block until we get a reply or reach the timeout + if c.opt.Protocol != 3 || c.pushProcessor == nil { + return nil + } + + // Create handler context with client, connection pool, and connection information + handlerCtx := c.pushNotificationHandlerContext(cn) + return c.pushProcessor.ProcessPendingNotifications(ctx, handlerCtx, rd) +} + +// pushNotificationHandlerContext creates a handler context for push notification processing +func (c *baseClient) pushNotificationHandlerContext(cn *pool.Conn) push.NotificationHandlerContext { + return push.NotificationHandlerContext{ + Client: c, + ConnPool: c.connPool, + Conn: cn, // Wrap in adapter for easier interface access + } +} diff --git a/vendor/github.com/redis/go-redis/v9/result.go b/vendor/github.com/redis/go-redis/v9/result.go index cfd4cf92..3e0d0a13 100644 --- a/vendor/github.com/redis/go-redis/v9/result.go +++ b/vendor/github.com/redis/go-redis/v9/result.go @@ -82,6 +82,14 @@ func NewBoolSliceResult(val []bool, err error) *BoolSliceCmd { return &cmd } +// NewFloatSliceResult returns a FloatSliceCmd initialised with val and err for testing. +func NewFloatSliceResult(val []float64, err error) *FloatSliceCmd { + var cmd FloatSliceCmd + cmd.val = val + cmd.SetErr(err) + return &cmd +} + // NewMapStringStringResult returns a MapStringStringCmd initialised with val and err for testing. func NewMapStringStringResult(val map[string]string, err error) *MapStringStringCmd { var cmd MapStringStringCmd diff --git a/vendor/github.com/redis/go-redis/v9/ring.go b/vendor/github.com/redis/go-redis/v9/ring.go index 4ae00542..d9220ddb 100644 --- a/vendor/github.com/redis/go-redis/v9/ring.go +++ b/vendor/github.com/redis/go-redis/v9/ring.go @@ -13,15 +13,23 @@ import ( "github.com/cespare/xxhash/v2" "github.com/dgryski/go-rendezvous" //nolint + "github.com/redis/go-redis/v9/auth" "github.com/redis/go-redis/v9/internal" "github.com/redis/go-redis/v9/internal/hashtag" "github.com/redis/go-redis/v9/internal/pool" + "github.com/redis/go-redis/v9/internal/proto" "github.com/redis/go-redis/v9/internal/rand" ) var errRingShardsDown = errors.New("redis: all ring shards are down") +// defaultHeartbeatFn is the default function used to check the shard liveness +var defaultHeartbeatFn = func(ctx context.Context, client *Client) bool { + err := client.Ping(ctx).Err() + return err == nil || err == pool.ErrPoolTimeout +} + //------------------------------------------------------------------------------ type ConsistentHash interface { @@ -54,10 +62,14 @@ type RingOptions struct { // ClientName will execute the `CLIENT SETNAME ClientName` command for each conn. ClientName string - // Frequency of PING commands sent to check shards availability. + // Frequency of executing HeartbeatFn to check shards availability. // Shard is considered down after 3 subsequent failed checks. HeartbeatFrequency time.Duration + // A function used to check the shard liveness + // if not set, defaults to defaultHeartbeatFn + HeartbeatFn func(ctx context.Context, client *Client) bool + // NewConsistentHash returns a consistent hash that is used // to distribute keys across the shards. // @@ -73,13 +85,41 @@ type RingOptions struct { Protocol int Username string Password string - DB int + // CredentialsProvider allows the username and password to be updated + // before reconnecting. It should return the current username and password. + CredentialsProvider func() (username string, password string) + + // CredentialsProviderContext is an enhanced parameter of CredentialsProvider, + // done to maintain API compatibility. In the future, + // there might be a merge between CredentialsProviderContext and CredentialsProvider. + // There will be a conflict between them; if CredentialsProviderContext exists, we will ignore CredentialsProvider. + CredentialsProviderContext func(ctx context.Context) (username string, password string, err error) + + // StreamingCredentialsProvider is used to retrieve the credentials + // for the connection from an external source. Those credentials may change + // during the connection lifetime. This is useful for managed identity + // scenarios where the credentials are retrieved from an external source. + // + // Currently, this is a placeholder for the future implementation. + StreamingCredentialsProvider auth.StreamingCredentialsProvider + DB int MaxRetries int MinRetryBackoff time.Duration MaxRetryBackoff time.Duration - DialTimeout time.Duration + DialTimeout time.Duration + + // DialerRetries is the maximum number of retry attempts when dialing fails. + // + // default: 5 + DialerRetries int + + // DialerRetryTimeout is the backoff duration between retry attempts. + // + // default: 100 milliseconds + DialerRetryTimeout time.Duration + ReadTimeout time.Duration WriteTimeout time.Duration ContextTimeoutEnabled bool @@ -87,19 +127,45 @@ type RingOptions struct { // PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO). PoolFIFO bool - PoolSize int - PoolTimeout time.Duration - MinIdleConns int - MaxIdleConns int - MaxActiveConns int - ConnMaxIdleTime time.Duration - ConnMaxLifetime time.Duration + PoolSize int + PoolTimeout time.Duration + MinIdleConns int + MaxIdleConns int + MaxActiveConns int + ConnMaxIdleTime time.Duration + ConnMaxLifetime time.Duration + ConnMaxLifetimeJitter time.Duration + + // ReadBufferSize is the size of the bufio.Reader buffer for each connection. + // Larger buffers can improve performance for commands that return large responses. + // Smaller buffers can improve memory usage for larger pools. + // + // default: 32KiB (32768 bytes) + ReadBufferSize int + + // WriteBufferSize is the size of the bufio.Writer buffer for each connection. + // Larger buffers can improve performance for large pipelines and commands with many arguments. + // Smaller buffers can improve memory usage for larger pools. + // + // default: 32KiB (32768 bytes) + WriteBufferSize int TLSConfig *tls.Config Limiter Limiter + // DisableIndentity - Disable set-lib on connect. + // + // default: false + // + // Deprecated: Use DisableIdentity instead. DisableIndentity bool - IdentitySuffix string + + // DisableIdentity is used to disable CLIENT SETINFO command on connect. + // + // default: false + DisableIdentity bool + IdentitySuffix string + UnstableResp3 bool } func (opt *RingOptions) init() { @@ -113,13 +179,18 @@ func (opt *RingOptions) init() { opt.HeartbeatFrequency = 500 * time.Millisecond } + if opt.HeartbeatFn == nil { + opt.HeartbeatFn = defaultHeartbeatFn + } + if opt.NewConsistentHash == nil { opt.NewConsistentHash = newRendezvous } - if opt.MaxRetries == -1 { + switch opt.MaxRetries { + case -1: opt.MaxRetries = 0 - } else if opt.MaxRetries == 0 { + case 0: opt.MaxRetries = 3 } switch opt.MinRetryBackoff { @@ -134,6 +205,13 @@ func (opt *RingOptions) init() { case 0: opt.MaxRetryBackoff = 512 * time.Millisecond } + + if opt.ReadBufferSize == 0 { + opt.ReadBufferSize = proto.DefaultBufferSize + } + if opt.WriteBufferSize == 0 { + opt.WriteBufferSize = proto.DefaultBufferSize + } } func (opt *RingOptions) clientOptions() *Options { @@ -142,32 +220,43 @@ func (opt *RingOptions) clientOptions() *Options { Dialer: opt.Dialer, OnConnect: opt.OnConnect, - Protocol: opt.Protocol, - Username: opt.Username, - Password: opt.Password, - DB: opt.DB, + Protocol: opt.Protocol, + Username: opt.Username, + Password: opt.Password, + CredentialsProvider: opt.CredentialsProvider, + CredentialsProviderContext: opt.CredentialsProviderContext, + StreamingCredentialsProvider: opt.StreamingCredentialsProvider, + DB: opt.DB, MaxRetries: -1, DialTimeout: opt.DialTimeout, + DialerRetries: opt.DialerRetries, + DialerRetryTimeout: opt.DialerRetryTimeout, ReadTimeout: opt.ReadTimeout, WriteTimeout: opt.WriteTimeout, ContextTimeoutEnabled: opt.ContextTimeoutEnabled, - PoolFIFO: opt.PoolFIFO, - PoolSize: opt.PoolSize, - PoolTimeout: opt.PoolTimeout, - MinIdleConns: opt.MinIdleConns, - MaxIdleConns: opt.MaxIdleConns, - MaxActiveConns: opt.MaxActiveConns, - ConnMaxIdleTime: opt.ConnMaxIdleTime, - ConnMaxLifetime: opt.ConnMaxLifetime, + PoolFIFO: opt.PoolFIFO, + PoolSize: opt.PoolSize, + PoolTimeout: opt.PoolTimeout, + MinIdleConns: opt.MinIdleConns, + MaxIdleConns: opt.MaxIdleConns, + MaxActiveConns: opt.MaxActiveConns, + ConnMaxIdleTime: opt.ConnMaxIdleTime, + ConnMaxLifetime: opt.ConnMaxLifetime, + ConnMaxLifetimeJitter: opt.ConnMaxLifetimeJitter, + ReadBufferSize: opt.ReadBufferSize, + WriteBufferSize: opt.WriteBufferSize, TLSConfig: opt.TLSConfig, Limiter: opt.Limiter, + DisableIdentity: opt.DisableIdentity, DisableIndentity: opt.DisableIndentity, - IdentitySuffix: opt.IdentitySuffix, + + IdentitySuffix: opt.IdentitySuffix, + UnstableResp3: opt.UnstableResp3, } } @@ -334,16 +423,16 @@ func (c *ringSharding) newRingShards( return } +// Warning: External exposure of `c.shards.list` may cause data races. +// So keep internal or implement deep copy if exposed. func (c *ringSharding) List() []*ringShard { - var list []*ringShard - c.mu.RLock() - if !c.closed { - list = c.shards.list - } - c.mu.RUnlock() + defer c.mu.RUnlock() - return list + if c.closed { + return nil + } + return c.shards.list } func (c *ringSharding) Hash(key string) string { @@ -390,7 +479,12 @@ func (c *ringSharding) GetByName(shardName string) (*ringShard, error) { c.mu.RLock() defer c.mu.RUnlock() - return c.shards.m[shardName], nil + shard, ok := c.shards.m[shardName] + if !ok { + return nil, errors.New("redis: the shard is not in the ring") + } + + return shard, nil } func (c *ringSharding) Random() (*ringShard, error) { @@ -407,9 +501,9 @@ func (c *ringSharding) Heartbeat(ctx context.Context, frequency time.Duration) { case <-ticker.C: var rebalance bool + // note: `c.List()` return a shadow copy of `[]*ringShard`. for _, shard := range c.List() { - err := shard.Client.Ping(ctx).Err() - isUp := err == nil || err == pool.ErrPoolTimeout + isUp := c.opt.HeartbeatFn(ctx, shard.Client) if shard.Vote(isUp) { internal.Logger.Printf(ctx, "ring shard state changed: %s", shard) rebalance = true @@ -507,6 +601,9 @@ type Ring struct { } func NewRing(opt *RingOptions) *Ring { + if opt == nil { + panic("redis: NewRing nil options") + } opt.init() hbCtx, hbCancel := context.WithCancel(context.Background()) @@ -539,13 +636,6 @@ func (c *Ring) SetAddrs(addrs map[string]string) { c.sharding.SetAddrs(addrs) } -// Do create a Cmd from the args and processes the cmd. -func (c *Ring) Do(ctx context.Context, args ...interface{}) *Cmd { - cmd := NewCmd(ctx, args...) - _ = c.Process(ctx, cmd) - return cmd -} - func (c *Ring) Process(ctx context.Context, cmd Cmder) error { err := c.processHook(ctx, cmd) cmd.SetErr(err) @@ -563,6 +653,7 @@ func (c *Ring) retryBackoff(attempt int) time.Duration { // PoolStats returns accumulated connection pool stats. func (c *Ring) PoolStats() *PoolStats { + // note: `c.List()` return a shadow copy of `[]*ringShard`. shards := c.sharding.List() var acc PoolStats for _, shard := range shards { @@ -632,6 +723,7 @@ func (c *Ring) ForEachShard( ctx context.Context, fn func(ctx context.Context, client *Client) error, ) error { + // note: `c.List()` return a shadow copy of `[]*ringShard`. shards := c.sharding.List() var wg sync.WaitGroup errCh := make(chan error, 1) @@ -663,6 +755,7 @@ func (c *Ring) ForEachShard( } func (c *Ring) cmdsInfo(ctx context.Context) (map[string]*CommandInfo, error) { + // note: `c.List()` return a shadow copy of `[]*ringShard`. shards := c.sharding.List() var firstErr error for _, shard := range shards { @@ -680,7 +773,7 @@ func (c *Ring) cmdsInfo(ctx context.Context) (map[string]*CommandInfo, error) { return nil, firstErr } -func (c *Ring) cmdShard(ctx context.Context, cmd Cmder) (*ringShard, error) { +func (c *Ring) cmdShard(cmd Cmder) (*ringShard, error) { pos := cmdFirstKeyPos(cmd) if pos == 0 { return c.sharding.Random() @@ -698,7 +791,7 @@ func (c *Ring) process(ctx context.Context, cmd Cmder) error { } } - shard, err := c.cmdShard(ctx, cmd) + shard, err := c.cmdShard(cmd) if err != nil { return err } @@ -757,6 +850,8 @@ func (c *Ring) generalProcessPipeline( } var wg sync.WaitGroup + errs := make(chan error, len(cmdsMap)) + for hash, cmds := range cmdsMap { wg.Add(1) go func(hash string, cmds []Cmder) { @@ -769,16 +864,24 @@ func (c *Ring) generalProcessPipeline( return } + hook := shard.Client.processPipelineHook if tx { cmds = wrapMultiExec(ctx, cmds) - _ = shard.Client.processTxPipelineHook(ctx, cmds) - } else { - _ = shard.Client.processPipelineHook(ctx, cmds) + hook = shard.Client.processTxPipelineHook + } + + if err = hook(ctx, cmds); err != nil { + errs <- err } }(hash, cmds) } wg.Wait() + close(errs) + + if err := <-errs; err != nil { + return err + } return cmdsFirstErr(cmds) } @@ -791,7 +894,7 @@ func (c *Ring) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) er for _, key := range keys { if key != "" { - shard, err := c.sharding.GetByKey(hashtag.Key(key)) + shard, err := c.sharding.GetByKey(key) if err != nil { return err } @@ -825,3 +928,26 @@ func (c *Ring) Close() error { return c.sharding.Close() } + +// GetShardClients returns a list of all shard clients in the ring. +// This can be used to create dedicated connections (e.g., PubSub) for each shard. +func (c *Ring) GetShardClients() []*Client { + shards := c.sharding.List() + clients := make([]*Client, 0, len(shards)) + for _, shard := range shards { + if shard.IsUp() { + clients = append(clients, shard.Client) + } + } + return clients +} + +// GetShardClientForKey returns the shard client that would handle the given key. +// This can be used to determine which shard a particular key/channel would be routed to. +func (c *Ring) GetShardClientForKey(key string) (*Client, error) { + shard, err := c.sharding.GetByKey(key) + if err != nil { + return nil, err + } + return shard.Client, nil +} diff --git a/vendor/github.com/redis/go-redis/v9/search_builders.go b/vendor/github.com/redis/go-redis/v9/search_builders.go new file mode 100644 index 00000000..91f06340 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/search_builders.go @@ -0,0 +1,825 @@ +package redis + +import ( + "context" +) + +// ---------------------- +// Search Module Builders +// ---------------------- + +// SearchBuilder provides a fluent API for FT.SEARCH +// (see original FTSearchOptions for all options). +// EXPERIMENTAL: this API is subject to change, use with caution. +type SearchBuilder struct { + c *Client + ctx context.Context + index string + query string + options *FTSearchOptions +} + +// NewSearchBuilder creates a new SearchBuilder for FT.SEARCH commands. +// EXPERIMENTAL: this API is subject to change, use with caution. +func (c *Client) NewSearchBuilder(ctx context.Context, index, query string) *SearchBuilder { + b := &SearchBuilder{c: c, ctx: ctx, index: index, query: query, options: &FTSearchOptions{LimitOffset: -1}} + return b +} + +// WithScores includes WITHSCORES. +func (b *SearchBuilder) WithScores() *SearchBuilder { + b.options.WithScores = true + return b +} + +// NoContent includes NOCONTENT. +func (b *SearchBuilder) NoContent() *SearchBuilder { b.options.NoContent = true; return b } + +// Verbatim includes VERBATIM. +func (b *SearchBuilder) Verbatim() *SearchBuilder { b.options.Verbatim = true; return b } + +// NoStopWords includes NOSTOPWORDS. +func (b *SearchBuilder) NoStopWords() *SearchBuilder { b.options.NoStopWords = true; return b } + +// WithPayloads includes WITHPAYLOADS. +func (b *SearchBuilder) WithPayloads() *SearchBuilder { + b.options.WithPayloads = true + return b +} + +// WithSortKeys includes WITHSORTKEYS. +func (b *SearchBuilder) WithSortKeys() *SearchBuilder { + b.options.WithSortKeys = true + return b +} + +// Filter adds a FILTER clause: FILTER . +func (b *SearchBuilder) Filter(field string, min, max interface{}) *SearchBuilder { + b.options.Filters = append(b.options.Filters, FTSearchFilter{ + FieldName: field, + Min: min, + Max: max, + }) + return b +} + +// GeoFilter adds a GEOFILTER clause: GEOFILTER . +func (b *SearchBuilder) GeoFilter(field string, lon, lat, radius float64, unit string) *SearchBuilder { + b.options.GeoFilter = append(b.options.GeoFilter, FTSearchGeoFilter{ + FieldName: field, + Longitude: lon, + Latitude: lat, + Radius: radius, + Unit: unit, + }) + return b +} + +// InKeys restricts the search to the given keys. +func (b *SearchBuilder) InKeys(keys ...interface{}) *SearchBuilder { + b.options.InKeys = append(b.options.InKeys, keys...) + return b +} + +// InFields restricts the search to the given fields. +func (b *SearchBuilder) InFields(fields ...interface{}) *SearchBuilder { + b.options.InFields = append(b.options.InFields, fields...) + return b +} + +// ReturnFields adds simple RETURN ... +func (b *SearchBuilder) ReturnFields(fields ...string) *SearchBuilder { + for _, f := range fields { + b.options.Return = append(b.options.Return, FTSearchReturn{FieldName: f}) + } + return b +} + +// ReturnAs adds RETURN AS . +func (b *SearchBuilder) ReturnAs(field, alias string) *SearchBuilder { + b.options.Return = append(b.options.Return, FTSearchReturn{FieldName: field, As: alias}) + return b +} + +// Slop adds SLOP . +func (b *SearchBuilder) Slop(slop int) *SearchBuilder { + b.options.Slop = slop + return b +} + +// Timeout adds TIMEOUT . +func (b *SearchBuilder) Timeout(timeout int) *SearchBuilder { + b.options.Timeout = timeout + return b +} + +// InOrder includes INORDER. +func (b *SearchBuilder) InOrder() *SearchBuilder { + b.options.InOrder = true + return b +} + +// Language sets LANGUAGE . +func (b *SearchBuilder) Language(lang string) *SearchBuilder { + b.options.Language = lang + return b +} + +// Expander sets EXPANDER . +func (b *SearchBuilder) Expander(expander string) *SearchBuilder { + b.options.Expander = expander + return b +} + +// Scorer sets SCORER . +func (b *SearchBuilder) Scorer(scorer string) *SearchBuilder { + b.options.Scorer = scorer + return b +} + +// ExplainScore includes EXPLAINSCORE. +func (b *SearchBuilder) ExplainScore() *SearchBuilder { + b.options.ExplainScore = true + return b +} + +// Payload sets PAYLOAD . +func (b *SearchBuilder) Payload(payload string) *SearchBuilder { + b.options.Payload = payload + return b +} + +// SortBy adds SORTBY ASC|DESC. +func (b *SearchBuilder) SortBy(field string, asc bool) *SearchBuilder { + b.options.SortBy = append(b.options.SortBy, FTSearchSortBy{ + FieldName: field, + Asc: asc, + Desc: !asc, + }) + return b +} + +// WithSortByCount includes WITHCOUNT (when used with SortBy). +func (b *SearchBuilder) WithSortByCount() *SearchBuilder { + b.options.SortByWithCount = true + return b +} + +// Param adds a single PARAMS . +func (b *SearchBuilder) Param(key string, value interface{}) *SearchBuilder { + if b.options.Params == nil { + b.options.Params = make(map[string]interface{}, 1) + } + b.options.Params[key] = value + return b +} + +// ParamsMap adds multiple PARAMS at once. +func (b *SearchBuilder) ParamsMap(p map[string]interface{}) *SearchBuilder { + if b.options.Params == nil { + b.options.Params = make(map[string]interface{}, len(p)) + } + for k, v := range p { + b.options.Params[k] = v + } + return b +} + +// Dialect sets DIALECT . +func (b *SearchBuilder) Dialect(version int) *SearchBuilder { + b.options.DialectVersion = version + return b +} + +// Limit sets OFFSET and COUNT. CountOnly uses LIMIT 0 0. +func (b *SearchBuilder) Limit(offset, count int) *SearchBuilder { + b.options.LimitOffset = offset + b.options.Limit = count + return b +} +func (b *SearchBuilder) CountOnly() *SearchBuilder { b.options.CountOnly = true; return b } + +// Run executes FT.SEARCH and returns a typed result. +func (b *SearchBuilder) Run() (FTSearchResult, error) { + cmd := b.c.FTSearchWithArgs(b.ctx, b.index, b.query, b.options) + return cmd.Result() +} + +// ---------------------- +// AggregateBuilder for FT.AGGREGATE +// ---------------------- + +type AggregateBuilder struct { + c *Client + ctx context.Context + index string + query string + options *FTAggregateOptions +} + +// NewAggregateBuilder creates a new AggregateBuilder for FT.AGGREGATE commands. +// EXPERIMENTAL: this API is subject to change, use with caution. +func (c *Client) NewAggregateBuilder(ctx context.Context, index, query string) *AggregateBuilder { + return &AggregateBuilder{c: c, ctx: ctx, index: index, query: query, options: &FTAggregateOptions{LimitOffset: -1}} +} + +// Verbatim includes VERBATIM. +func (b *AggregateBuilder) Verbatim() *AggregateBuilder { b.options.Verbatim = true; return b } + +// AddScores includes ADDSCORES. +func (b *AggregateBuilder) AddScores() *AggregateBuilder { b.options.AddScores = true; return b } + +// Scorer sets SCORER . +func (b *AggregateBuilder) Scorer(s string) *AggregateBuilder { + b.options.Scorer = s + return b +} + +// LoadAll includes LOAD * (mutually exclusive with Load). +func (b *AggregateBuilder) LoadAll() *AggregateBuilder { + b.options.LoadAll = true + return b +} + +// Load adds LOAD [AS alias]... +// You can call it multiple times for multiple fields. +func (b *AggregateBuilder) Load(field string, alias ...string) *AggregateBuilder { + // each Load entry becomes one element in options.Load + l := FTAggregateLoad{Field: field} + if len(alias) > 0 { + l.As = alias[0] + } + b.options.Load = append(b.options.Load, l) + return b +} + +// Timeout sets TIMEOUT . +func (b *AggregateBuilder) Timeout(ms int) *AggregateBuilder { + b.options.Timeout = ms + return b +} + +// Apply adds APPLY [AS alias]. +func (b *AggregateBuilder) Apply(field string, alias ...string) *AggregateBuilder { + a := FTAggregateApply{Field: field} + if len(alias) > 0 { + a.As = alias[0] + } + b.options.Apply = append(b.options.Apply, a) + return b +} + +// GroupBy starts a new GROUPBY clause. +func (b *AggregateBuilder) GroupBy(fields ...interface{}) *AggregateBuilder { + b.options.GroupBy = append(b.options.GroupBy, FTAggregateGroupBy{ + Fields: fields, + }) + return b +} + +// Reduce adds a REDUCE [<#args> ] clause to the *last* GROUPBY. +func (b *AggregateBuilder) Reduce(fn SearchAggregator, args ...interface{}) *AggregateBuilder { + if len(b.options.GroupBy) == 0 { + // no GROUPBY yet — nothing to attach to + return b + } + idx := len(b.options.GroupBy) - 1 + b.options.GroupBy[idx].Reduce = append(b.options.GroupBy[idx].Reduce, FTAggregateReducer{ + Reducer: fn, + Args: args, + }) + return b +} + +// ReduceAs does the same but also sets an alias: REDUCE … AS +func (b *AggregateBuilder) ReduceAs(fn SearchAggregator, alias string, args ...interface{}) *AggregateBuilder { + if len(b.options.GroupBy) == 0 { + return b + } + idx := len(b.options.GroupBy) - 1 + b.options.GroupBy[idx].Reduce = append(b.options.GroupBy[idx].Reduce, FTAggregateReducer{ + Reducer: fn, + Args: args, + As: alias, + }) + return b +} + +// SortBy adds SORTBY ASC|DESC. +func (b *AggregateBuilder) SortBy(field string, asc bool) *AggregateBuilder { + sb := FTAggregateSortBy{FieldName: field, Asc: asc, Desc: !asc} + b.options.SortBy = append(b.options.SortBy, sb) + return b +} + +// SortByMax sets MAX (only if SortBy was called). +func (b *AggregateBuilder) SortByMax(max int) *AggregateBuilder { + b.options.SortByMax = max + return b +} + +// Filter sets FILTER . +func (b *AggregateBuilder) Filter(expr string) *AggregateBuilder { + b.options.Filter = expr + return b +} + +// WithCursor enables WITHCURSOR [COUNT ] [MAXIDLE ]. +func (b *AggregateBuilder) WithCursor(count, maxIdle int) *AggregateBuilder { + b.options.WithCursor = true + if b.options.WithCursorOptions == nil { + b.options.WithCursorOptions = &FTAggregateWithCursor{} + } + b.options.WithCursorOptions.Count = count + b.options.WithCursorOptions.MaxIdle = maxIdle + return b +} + +// Params adds PARAMS pairs. +func (b *AggregateBuilder) Params(p map[string]interface{}) *AggregateBuilder { + if b.options.Params == nil { + b.options.Params = make(map[string]interface{}, len(p)) + } + for k, v := range p { + b.options.Params[k] = v + } + return b +} + +// Dialect sets DIALECT . +func (b *AggregateBuilder) Dialect(version int) *AggregateBuilder { + b.options.DialectVersion = version + return b +} + +// Run executes FT.AGGREGATE and returns a typed result. +func (b *AggregateBuilder) Run() (*FTAggregateResult, error) { + cmd := b.c.FTAggregateWithArgs(b.ctx, b.index, b.query, b.options) + return cmd.Result() +} + +// ---------------------- +// CreateIndexBuilder for FT.CREATE +// ---------------------- +// CreateIndexBuilder is builder for FT.CREATE +// EXPERIMENTAL: this API is subject to change, use with caution. +type CreateIndexBuilder struct { + c *Client + ctx context.Context + index string + options *FTCreateOptions + schema []*FieldSchema +} + +// NewCreateIndexBuilder creates a new CreateIndexBuilder for FT.CREATE commands. +// EXPERIMENTAL: this API is subject to change, use with caution. +func (c *Client) NewCreateIndexBuilder(ctx context.Context, index string) *CreateIndexBuilder { + return &CreateIndexBuilder{c: c, ctx: ctx, index: index, options: &FTCreateOptions{}} +} + +// OnHash sets ON HASH. +func (b *CreateIndexBuilder) OnHash() *CreateIndexBuilder { b.options.OnHash = true; return b } + +// OnJSON sets ON JSON. +func (b *CreateIndexBuilder) OnJSON() *CreateIndexBuilder { b.options.OnJSON = true; return b } + +// Prefix sets PREFIX. +func (b *CreateIndexBuilder) Prefix(prefixes ...interface{}) *CreateIndexBuilder { + b.options.Prefix = prefixes + return b +} + +// Filter sets FILTER. +func (b *CreateIndexBuilder) Filter(filter string) *CreateIndexBuilder { + b.options.Filter = filter + return b +} + +// DefaultLanguage sets LANGUAGE. +func (b *CreateIndexBuilder) DefaultLanguage(lang string) *CreateIndexBuilder { + b.options.DefaultLanguage = lang + return b +} + +// LanguageField sets LANGUAGE_FIELD. +func (b *CreateIndexBuilder) LanguageField(field string) *CreateIndexBuilder { + b.options.LanguageField = field + return b +} + +// Score sets SCORE. +func (b *CreateIndexBuilder) Score(score float64) *CreateIndexBuilder { + b.options.Score = score + return b +} + +// ScoreField sets SCORE_FIELD. +func (b *CreateIndexBuilder) ScoreField(field string) *CreateIndexBuilder { + b.options.ScoreField = field + return b +} + +// PayloadField sets PAYLOAD_FIELD. +func (b *CreateIndexBuilder) PayloadField(field string) *CreateIndexBuilder { + b.options.PayloadField = field + return b +} + +// NoOffsets includes NOOFFSETS. +func (b *CreateIndexBuilder) NoOffsets() *CreateIndexBuilder { b.options.NoOffsets = true; return b } + +// Temporary sets TEMPORARY seconds. +func (b *CreateIndexBuilder) Temporary(sec int) *CreateIndexBuilder { + b.options.Temporary = sec + return b +} + +// NoHL includes NOHL. +func (b *CreateIndexBuilder) NoHL() *CreateIndexBuilder { b.options.NoHL = true; return b } + +// NoFields includes NOFIELDS. +func (b *CreateIndexBuilder) NoFields() *CreateIndexBuilder { b.options.NoFields = true; return b } + +// NoFreqs includes NOFREQS. +func (b *CreateIndexBuilder) NoFreqs() *CreateIndexBuilder { b.options.NoFreqs = true; return b } + +// StopWords sets STOPWORDS. +func (b *CreateIndexBuilder) StopWords(words ...interface{}) *CreateIndexBuilder { + b.options.StopWords = words + return b +} + +// SkipInitialScan includes SKIPINITIALSCAN. +func (b *CreateIndexBuilder) SkipInitialScan() *CreateIndexBuilder { + b.options.SkipInitialScan = true + return b +} + +// Schema adds a FieldSchema. +func (b *CreateIndexBuilder) Schema(field *FieldSchema) *CreateIndexBuilder { + b.schema = append(b.schema, field) + return b +} + +// Run executes FT.CREATE and returns the status. +func (b *CreateIndexBuilder) Run() (string, error) { + cmd := b.c.FTCreate(b.ctx, b.index, b.options, b.schema...) + return cmd.Result() +} + +// ---------------------- +// DropIndexBuilder for FT.DROPINDEX +// ---------------------- +// DropIndexBuilder is a builder for FT.DROPINDEX +// EXPERIMENTAL: this API is subject to change, use with caution. +type DropIndexBuilder struct { + c *Client + ctx context.Context + index string + options *FTDropIndexOptions +} + +// NewDropIndexBuilder creates a new DropIndexBuilder for FT.DROPINDEX commands. +// EXPERIMENTAL: this API is subject to change, use with caution. +func (c *Client) NewDropIndexBuilder(ctx context.Context, index string) *DropIndexBuilder { + return &DropIndexBuilder{c: c, ctx: ctx, index: index} +} + +// DeleteRuncs includes DD. +func (b *DropIndexBuilder) DeleteDocs() *DropIndexBuilder { b.options.DeleteDocs = true; return b } + +// Run executes FT.DROPINDEX. +func (b *DropIndexBuilder) Run() (string, error) { + cmd := b.c.FTDropIndexWithArgs(b.ctx, b.index, b.options) + return cmd.Result() +} + +// ---------------------- +// AliasBuilder for FT.ALIAS* commands +// ---------------------- +// AliasBuilder is builder for FT.ALIAS* commands +// EXPERIMENTAL: this API is subject to change, use with caution. +type AliasBuilder struct { + c *Client + ctx context.Context + alias string + index string + action string // add|del|update +} + +// NewAliasBuilder creates a new AliasBuilder for FT.ALIAS* commands. +// EXPERIMENTAL: this API is subject to change, use with caution. +func (c *Client) NewAliasBuilder(ctx context.Context, alias string) *AliasBuilder { + return &AliasBuilder{c: c, ctx: ctx, alias: alias} +} + +// Action sets the action for the alias builder. +func (b *AliasBuilder) Action(action string) *AliasBuilder { + b.action = action + return b +} + +// Add sets the action to "add" and requires an index. +func (b *AliasBuilder) Add(index string) *AliasBuilder { + b.action = "add" + b.index = index + return b +} + +// Del sets the action to "del". +func (b *AliasBuilder) Del() *AliasBuilder { + b.action = "del" + return b +} + +// Update sets the action to "update" and requires an index. +func (b *AliasBuilder) Update(index string) *AliasBuilder { + b.action = "update" + b.index = index + return b +} + +// Run executes the configured alias command. +func (b *AliasBuilder) Run() (string, error) { + switch b.action { + case "add": + cmd := b.c.FTAliasAdd(b.ctx, b.index, b.alias) + return cmd.Result() + case "del": + cmd := b.c.FTAliasDel(b.ctx, b.alias) + return cmd.Result() + case "update": + cmd := b.c.FTAliasUpdate(b.ctx, b.index, b.alias) + return cmd.Result() + } + return "", nil +} + +// ---------------------- +// ExplainBuilder for FT.EXPLAIN +// ---------------------- +// ExplainBuilder is builder for FT.EXPLAIN +// EXPERIMENTAL: this API is subject to change, use with caution. +type ExplainBuilder struct { + c *Client + ctx context.Context + index string + query string + options *FTExplainOptions +} + +// NewExplainBuilder creates a new ExplainBuilder for FT.EXPLAIN commands. +// EXPERIMENTAL: this API is subject to change, use with caution. +func (c *Client) NewExplainBuilder(ctx context.Context, index, query string) *ExplainBuilder { + return &ExplainBuilder{c: c, ctx: ctx, index: index, query: query, options: &FTExplainOptions{}} +} + +// Dialect sets dialect for EXPLAINCLI. +func (b *ExplainBuilder) Dialect(d string) *ExplainBuilder { b.options.Dialect = d; return b } + +// Run executes FT.EXPLAIN and returns the plan. +func (b *ExplainBuilder) Run() (string, error) { + cmd := b.c.FTExplainWithArgs(b.ctx, b.index, b.query, b.options) + return cmd.Result() +} + +// ---------------------- +// InfoBuilder for FT.INFO +// ---------------------- + +type FTInfoBuilder struct { + c *Client + ctx context.Context + index string +} + +// NewSearchInfoBuilder creates a new FTInfoBuilder for FT.INFO commands. +func (c *Client) NewSearchInfoBuilder(ctx context.Context, index string) *FTInfoBuilder { + return &FTInfoBuilder{c: c, ctx: ctx, index: index} +} + +// Run executes FT.INFO and returns detailed info. +func (b *FTInfoBuilder) Run() (FTInfoResult, error) { + cmd := b.c.FTInfo(b.ctx, b.index) + return cmd.Result() +} + +// ---------------------- +// SpellCheckBuilder for FT.SPELLCHECK +// ---------------------- +// SpellCheckBuilder is builder for FT.SPELLCHECK +// EXPERIMENTAL: this API is subject to change, use with caution. +type SpellCheckBuilder struct { + c *Client + ctx context.Context + index string + query string + options *FTSpellCheckOptions +} + +// NewSpellCheckBuilder creates a new SpellCheckBuilder for FT.SPELLCHECK commands. +// EXPERIMENTAL: this API is subject to change, use with caution. +func (c *Client) NewSpellCheckBuilder(ctx context.Context, index, query string) *SpellCheckBuilder { + return &SpellCheckBuilder{c: c, ctx: ctx, index: index, query: query, options: &FTSpellCheckOptions{}} +} + +// Distance sets MAXDISTANCE. +func (b *SpellCheckBuilder) Distance(d int) *SpellCheckBuilder { b.options.Distance = d; return b } + +// Terms sets INCLUDE or EXCLUDE terms. +func (b *SpellCheckBuilder) Terms(include bool, dictionary string, terms ...interface{}) *SpellCheckBuilder { + if b.options.Terms == nil { + b.options.Terms = &FTSpellCheckTerms{} + } + if include { + b.options.Terms.Inclusion = "INCLUDE" + } else { + b.options.Terms.Inclusion = "EXCLUDE" + } + b.options.Terms.Dictionary = dictionary + b.options.Terms.Terms = terms + return b +} + +// Dialect sets dialect version. +func (b *SpellCheckBuilder) Dialect(d int) *SpellCheckBuilder { b.options.Dialect = d; return b } + +// Run executes FT.SPELLCHECK and returns suggestions. +func (b *SpellCheckBuilder) Run() ([]SpellCheckResult, error) { + cmd := b.c.FTSpellCheckWithArgs(b.ctx, b.index, b.query, b.options) + return cmd.Result() +} + +// ---------------------- +// DictBuilder for FT.DICT* commands +// ---------------------- +// DictBuilder is builder for FT.DICT* commands +// EXPERIMENTAL: this API is subject to change, use with caution. +type DictBuilder struct { + c *Client + ctx context.Context + dict string + terms []interface{} + action string // add|del|dump +} + +// NewDictBuilder creates a new DictBuilder for FT.DICT* commands. +// EXPERIMENTAL: this API is subject to change, use with caution. +func (c *Client) NewDictBuilder(ctx context.Context, dict string) *DictBuilder { + return &DictBuilder{c: c, ctx: ctx, dict: dict} +} + +// Action sets the action for the dictionary builder. +func (b *DictBuilder) Action(action string) *DictBuilder { + b.action = action + return b +} + +// Add sets the action to "add" and requires terms. +func (b *DictBuilder) Add(terms ...interface{}) *DictBuilder { + b.action = "add" + b.terms = terms + return b +} + +// Del sets the action to "del" and requires terms. +func (b *DictBuilder) Del(terms ...interface{}) *DictBuilder { + b.action = "del" + b.terms = terms + return b +} + +// Dump sets the action to "dump". +func (b *DictBuilder) Dump() *DictBuilder { + b.action = "dump" + return b +} + +// Run executes the configured dictionary command. +func (b *DictBuilder) Run() (interface{}, error) { + switch b.action { + case "add": + cmd := b.c.FTDictAdd(b.ctx, b.dict, b.terms...) + return cmd.Result() + case "del": + cmd := b.c.FTDictDel(b.ctx, b.dict, b.terms...) + return cmd.Result() + case "dump": + cmd := b.c.FTDictDump(b.ctx, b.dict) + return cmd.Result() + } + return nil, nil +} + +// ---------------------- +// TagValsBuilder for FT.TAGVALS +// ---------------------- +// TagValsBuilder is builder for FT.TAGVALS +// EXPERIMENTAL: this API is subject to change, use with caution. +type TagValsBuilder struct { + c *Client + ctx context.Context + index string + field string +} + +// NewTagValsBuilder creates a new TagValsBuilder for FT.TAGVALS commands. +// EXPERIMENTAL: this API is subject to change, use with caution. +func (c *Client) NewTagValsBuilder(ctx context.Context, index, field string) *TagValsBuilder { + return &TagValsBuilder{c: c, ctx: ctx, index: index, field: field} +} + +// Run executes FT.TAGVALS and returns tag values. +func (b *TagValsBuilder) Run() ([]string, error) { + cmd := b.c.FTTagVals(b.ctx, b.index, b.field) + return cmd.Result() +} + +// ---------------------- +// CursorBuilder for FT.CURSOR* +// ---------------------- +// CursorBuilder is builder for FT.CURSOR* commands +// EXPERIMENTAL: this API is subject to change, use with caution. +type CursorBuilder struct { + c *Client + ctx context.Context + index string + cursorId int64 + count int + action string // read|del +} + +// NewCursorBuilder creates a new CursorBuilder for FT.CURSOR* commands. +// EXPERIMENTAL: this API is subject to change, use with caution. +func (c *Client) NewCursorBuilder(ctx context.Context, index string, cursorId int64) *CursorBuilder { + return &CursorBuilder{c: c, ctx: ctx, index: index, cursorId: cursorId} +} + +// Action sets the action for the cursor builder. +func (b *CursorBuilder) Action(action string) *CursorBuilder { + b.action = action + return b +} + +// Read sets the action to "read". +func (b *CursorBuilder) Read() *CursorBuilder { + b.action = "read" + return b +} + +// Del sets the action to "del". +func (b *CursorBuilder) Del() *CursorBuilder { + b.action = "del" + return b +} + +// Count for READ. +func (b *CursorBuilder) Count(count int) *CursorBuilder { b.count = count; return b } + +// Run executes the cursor command. +func (b *CursorBuilder) Run() (interface{}, error) { + switch b.action { + case "read": + cmd := b.c.FTCursorRead(b.ctx, b.index, int(b.cursorId), b.count) + return cmd.Result() + case "del": + cmd := b.c.FTCursorDel(b.ctx, b.index, int(b.cursorId)) + return cmd.Result() + } + return nil, nil +} + +// ---------------------- +// SynUpdateBuilder for FT.SYNUPDATE +// ---------------------- +// SyncUpdateBuilder is builder for FT.SYNCUPDATE +// EXPERIMENTAL: this API is subject to change, use with caution. +type SynUpdateBuilder struct { + c *Client + ctx context.Context + index string + groupId interface{} + options *FTSynUpdateOptions + terms []interface{} +} + +// NewSynUpdateBuilder creates a new SynUpdateBuilder for FT.SYNUPDATE commands. +// EXPERIMENTAL: this API is subject to change, use with caution. +func (c *Client) NewSynUpdateBuilder(ctx context.Context, index string, groupId interface{}) *SynUpdateBuilder { + return &SynUpdateBuilder{c: c, ctx: ctx, index: index, groupId: groupId, options: &FTSynUpdateOptions{}} +} + +// SkipInitialScan includes SKIPINITIALSCAN. +func (b *SynUpdateBuilder) SkipInitialScan() *SynUpdateBuilder { + b.options.SkipInitialScan = true + return b +} + +// Terms adds synonyms to the group. +func (b *SynUpdateBuilder) Terms(terms ...interface{}) *SynUpdateBuilder { b.terms = terms; return b } + +// Run executes FT.SYNUPDATE. +func (b *SynUpdateBuilder) Run() (string, error) { + cmd := b.c.FTSynUpdateWithArgs(b.ctx, b.index, b.groupId, b.options, b.terms) + return cmd.Result() +} diff --git a/vendor/github.com/redis/go-redis/v9/search_commands.go b/vendor/github.com/redis/go-redis/v9/search_commands.go new file mode 100644 index 00000000..0fef8ffc --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/search_commands.go @@ -0,0 +1,3069 @@ +package redis + +import ( + "context" + "fmt" + "strconv" + + "github.com/redis/go-redis/v9/internal" + "github.com/redis/go-redis/v9/internal/proto" +) + +type SearchCmdable interface { + FT_List(ctx context.Context) *StringSliceCmd + FTAggregate(ctx context.Context, index string, query string) *MapStringInterfaceCmd + FTAggregateWithArgs(ctx context.Context, index string, query string, options *FTAggregateOptions) *AggregateCmd + FTAliasAdd(ctx context.Context, index string, alias string) *StatusCmd + FTAliasDel(ctx context.Context, alias string) *StatusCmd + FTAliasUpdate(ctx context.Context, index string, alias string) *StatusCmd + FTAlter(ctx context.Context, index string, skipInitialScan bool, definition []interface{}) *StatusCmd + FTConfigGet(ctx context.Context, option string) *MapMapStringInterfaceCmd + FTConfigSet(ctx context.Context, option string, value interface{}) *StatusCmd + FTCreate(ctx context.Context, index string, options *FTCreateOptions, schema ...*FieldSchema) *StatusCmd + FTCursorDel(ctx context.Context, index string, cursorId int) *StatusCmd + FTCursorRead(ctx context.Context, index string, cursorId int, count int) *MapStringInterfaceCmd + FTDictAdd(ctx context.Context, dict string, term ...interface{}) *IntCmd + FTDictDel(ctx context.Context, dict string, term ...interface{}) *IntCmd + FTDictDump(ctx context.Context, dict string) *StringSliceCmd + FTDropIndex(ctx context.Context, index string) *StatusCmd + FTDropIndexWithArgs(ctx context.Context, index string, options *FTDropIndexOptions) *StatusCmd + FTExplain(ctx context.Context, index string, query string) *StringCmd + FTExplainWithArgs(ctx context.Context, index string, query string, options *FTExplainOptions) *StringCmd + FTHybrid(ctx context.Context, index string, searchExpr string, vectorField string, vectorData Vector) *FTHybridCmd + FTHybridWithArgs(ctx context.Context, index string, options *FTHybridOptions) *FTHybridCmd + FTInfo(ctx context.Context, index string) *FTInfoCmd + FTSpellCheck(ctx context.Context, index string, query string) *FTSpellCheckCmd + FTSpellCheckWithArgs(ctx context.Context, index string, query string, options *FTSpellCheckOptions) *FTSpellCheckCmd + FTSearch(ctx context.Context, index string, query string) *FTSearchCmd + FTSearchWithArgs(ctx context.Context, index string, query string, options *FTSearchOptions) *FTSearchCmd + FTSynDump(ctx context.Context, index string) *FTSynDumpCmd + FTSynUpdate(ctx context.Context, index string, synGroupId interface{}, terms []interface{}) *StatusCmd + FTSynUpdateWithArgs(ctx context.Context, index string, synGroupId interface{}, options *FTSynUpdateOptions, terms []interface{}) *StatusCmd + FTTagVals(ctx context.Context, index string, field string) *StringSliceCmd +} + +type FTCreateOptions struct { + OnHash bool + OnJSON bool + Prefix []interface{} + Filter string + DefaultLanguage string + LanguageField string + Score float64 + ScoreField string + PayloadField string + MaxTextFields int + NoOffsets bool + Temporary int + NoHL bool + NoFields bool + NoFreqs bool + StopWords []interface{} + SkipInitialScan bool +} + +type FieldSchema struct { + FieldName string + As string + FieldType SearchFieldType + Sortable bool + UNF bool + NoStem bool + NoIndex bool + PhoneticMatcher string + Weight float64 + Separator string + CaseSensitive bool + WithSuffixtrie bool + VectorArgs *FTVectorArgs + GeoShapeFieldType string + IndexEmpty bool + IndexMissing bool +} + +type FTVectorArgs struct { + FlatOptions *FTFlatOptions + HNSWOptions *FTHNSWOptions + VamanaOptions *FTVamanaOptions +} + +type FTFlatOptions struct { + Type string + Dim int + DistanceMetric string + InitialCapacity int + BlockSize int +} + +type FTHNSWOptions struct { + Type string + Dim int + DistanceMetric string + InitialCapacity int + MaxEdgesPerNode int + MaxAllowedEdgesPerNode int + EFRunTime int + Epsilon float64 +} + +type FTVamanaOptions struct { + Type string + Dim int + DistanceMetric string + Compression string + ConstructionWindowSize int + GraphMaxDegree int + SearchWindowSize int + Epsilon float64 + TrainingThreshold int + ReduceDim int +} + +type FTDropIndexOptions struct { + DeleteDocs bool +} + +type SpellCheckTerms struct { + Include bool + Exclude bool + Dictionary string +} + +type FTExplainOptions struct { + // Dialect 1,3 and 4 are deprecated since redis 8.0 + Dialect string +} + +type FTSynUpdateOptions struct { + SkipInitialScan bool +} + +type SearchAggregator int + +const ( + SearchInvalid = SearchAggregator(iota) + SearchAvg + SearchSum + SearchMin + SearchMax + SearchCount + SearchCountDistinct + SearchCountDistinctish + SearchStdDev + SearchQuantile + SearchToList + SearchFirstValue + SearchRandomSample +) + +func (a SearchAggregator) String() string { + switch a { + case SearchInvalid: + return "" + case SearchAvg: + return "AVG" + case SearchSum: + return "SUM" + case SearchMin: + return "MIN" + case SearchMax: + return "MAX" + case SearchCount: + return "COUNT" + case SearchCountDistinct: + return "COUNT_DISTINCT" + case SearchCountDistinctish: + return "COUNT_DISTINCTISH" + case SearchStdDev: + return "STDDEV" + case SearchQuantile: + return "QUANTILE" + case SearchToList: + return "TOLIST" + case SearchFirstValue: + return "FIRST_VALUE" + case SearchRandomSample: + return "RANDOM_SAMPLE" + default: + return "" + } +} + +type SearchFieldType int + +const ( + SearchFieldTypeInvalid = SearchFieldType(iota) + SearchFieldTypeNumeric + SearchFieldTypeTag + SearchFieldTypeText + SearchFieldTypeGeo + SearchFieldTypeVector + SearchFieldTypeGeoShape +) + +func (t SearchFieldType) String() string { + switch t { + case SearchFieldTypeInvalid: + return "" + case SearchFieldTypeNumeric: + return "NUMERIC" + case SearchFieldTypeTag: + return "TAG" + case SearchFieldTypeText: + return "TEXT" + case SearchFieldTypeGeo: + return "GEO" + case SearchFieldTypeVector: + return "VECTOR" + case SearchFieldTypeGeoShape: + return "GEOSHAPE" + default: + return "TEXT" + } +} + +// Each AggregateReducer have different args. +// Please follow https://redis.io/docs/interact/search-and-query/search/aggregations/#supported-groupby-reducers for more information. +type FTAggregateReducer struct { + Reducer SearchAggregator + Args []interface{} + As string +} + +type FTAggregateGroupBy struct { + Fields []interface{} + Reduce []FTAggregateReducer +} + +type FTAggregateSortBy struct { + FieldName string + Asc bool + Desc bool +} + +type FTAggregateApply struct { + Field string + As string +} + +type FTAggregateLoad struct { + Field string + As string +} + +type FTAggregateWithCursor struct { + Count int + MaxIdle int +} + +type FTAggregateOptions struct { + Verbatim bool + LoadAll bool + Load []FTAggregateLoad + Timeout int + GroupBy []FTAggregateGroupBy + SortBy []FTAggregateSortBy + SortByMax int + // Scorer is used to set scoring function, if not set passed, a default will be used. + // The default scorer depends on the Redis version: + // - `BM25` for Redis >= 8 + // - `TFIDF` for Redis < 8 + Scorer string + // AddScores is available in Redis CE 8 + AddScores bool + Apply []FTAggregateApply + LimitOffset int + Limit int + Filter string + WithCursor bool + WithCursorOptions *FTAggregateWithCursor + Params map[string]interface{} + // Dialect 1,3 and 4 are deprecated since redis 8.0 + DialectVersion int +} + +type FTSearchFilter struct { + FieldName interface{} + Min interface{} + Max interface{} +} + +type FTSearchGeoFilter struct { + FieldName string + Longitude float64 + Latitude float64 + Radius float64 + Unit string +} + +type FTSearchReturn struct { + FieldName string + As string +} + +type FTSearchSortBy struct { + FieldName string + Asc bool + Desc bool +} + +// FTSearchOptions hold options that can be passed to the FT.SEARCH command. +// More information about the options can be found +// in the documentation for FT.SEARCH https://redis.io/docs/latest/commands/ft.search/ +type FTSearchOptions struct { + NoContent bool + Verbatim bool + NoStopWords bool + WithScores bool + WithPayloads bool + WithSortKeys bool + Filters []FTSearchFilter + GeoFilter []FTSearchGeoFilter + InKeys []interface{} + InFields []interface{} + Return []FTSearchReturn + Slop int + Timeout int + InOrder bool + Language string + Expander string + // Scorer is used to set scoring function, if not set passed, a default will be used. + // The default scorer depends on the Redis version: + // - `BM25` for Redis >= 8 + // - `TFIDF` for Redis < 8 + Scorer string + ExplainScore bool + Payload string + SortBy []FTSearchSortBy + SortByWithCount bool + LimitOffset int + Limit int + // CountOnly sets LIMIT 0 0 to get the count - number of documents in the result set without actually returning the result set. + // When using this option, the Limit and LimitOffset options are ignored. + CountOnly bool + Params map[string]interface{} + // Dialect 1,3 and 4 are deprecated since redis 8.0 + DialectVersion int +} + +// FTHybridCombineMethod represents the fusion method for combining search and vector results +type FTHybridCombineMethod string + +const ( + FTHybridCombineRRF FTHybridCombineMethod = "RRF" + FTHybridCombineLinear FTHybridCombineMethod = "LINEAR" + FTHybridCombineFunction FTHybridCombineMethod = "FUNCTION" +) + +// FTHybridSearchExpression represents a search expression in hybrid search +type FTHybridSearchExpression struct { + Query string + Scorer string + ScorerParams []interface{} + YieldScoreAs string +} + +type FTHybridVectorMethod = string + +const ( + KNN FTHybridCombineMethod = "KNN" + RANGE FTHybridCombineMethod = "RANGE" +) + +// FTHybridVectorExpression represents a vector expression in hybrid search +type FTHybridVectorExpression struct { + VectorField string + VectorData Vector + // VectorParamName specifies the parameter name for passing vector data via PARAMS mechanism. + // REQUIRED for Redis 8.6+ (inline vector blobs are not supported in 8.6+). + // Optional for Redis 8.4-8.5 (both inline and PARAMS are supported). + // When set, the vector blob will be passed as: VSIM @field $VectorParamName PARAMS ... VectorParamName + // When empty, the vector blob will be inlined: VSIM @field (fails on Redis 8.6+) + VectorParamName string + Method FTHybridVectorMethod + MethodParams []interface{} + Filter string + YieldScoreAs string +} + +// FTHybridCombineOptions represents options for result fusion +type FTHybridCombineOptions struct { + Method FTHybridCombineMethod + Count int + Window int // For RRF + Constant float64 // For RRF + Alpha float64 // For LINEAR + Beta float64 // For LINEAR + YieldScoreAs string +} + +// FTHybridGroupBy represents GROUP BY functionality +type FTHybridGroupBy struct { + Count int + Fields []string + ReduceFunc string + ReduceCount int + ReduceParams []interface{} +} + +// FTHybridApply represents APPLY functionality +type FTHybridApply struct { + Expression string + AsField string +} + +// FTHybridWithCursor represents cursor configuration for hybrid search +type FTHybridWithCursor struct { + Count int // Number of results to return per cursor read + MaxIdle int // Maximum idle time in milliseconds before cursor is automatically deleted +} + +// FTHybridOptions hold options that can be passed to the FT.HYBRID command +type FTHybridOptions struct { + CountExpressions int // Number of search/vector expressions + SearchExpressions []FTHybridSearchExpression // Multiple search expressions + VectorExpressions []FTHybridVectorExpression // Multiple vector expressions + Combine *FTHybridCombineOptions // Fusion step options + Load []string // Projected fields + GroupBy *FTHybridGroupBy // Aggregation grouping + Apply []FTHybridApply // Field transformations + SortBy []FTSearchSortBy // Reuse from FTSearch + Filter string // Post-filter expression + LimitOffset int // Result limiting + Limit int + Params map[string]interface{} // Parameter substitution + ExplainScore bool // Include score explanations + Timeout int // Runtime timeout + WithCursor bool // Enable cursor support for large result sets + WithCursorOptions *FTHybridWithCursor // Cursor configuration options +} + +type FTSynDumpResult struct { + Term string + Synonyms []string +} + +type FTSynDumpCmd struct { + baseCmd + val []FTSynDumpResult +} + +type FTAggregateResult struct { + Total int + Rows []AggregateRow +} + +type AggregateRow struct { + Fields map[string]interface{} +} + +type AggregateCmd struct { + baseCmd + val *FTAggregateResult +} + +type FTInfoResult struct { + IndexErrors IndexErrors + Attributes []FTAttribute + BytesPerRecordAvg string + Cleaning int + CursorStats CursorStats + DialectStats map[string]int + DocTableSizeMB float64 + FieldStatistics []FieldStatistic + GCStats GCStats + GeoshapesSzMB float64 + HashIndexingFailures int + IndexDefinition IndexDefinition + IndexName string + IndexOptions []string + Indexing int + InvertedSzMB float64 + KeyTableSizeMB float64 + MaxDocID int + NumDocs int + NumRecords int + NumTerms int + NumberOfUses int + OffsetBitsPerRecordAvg string + OffsetVectorsSzMB float64 + OffsetsPerTermAvg string + PercentIndexed float64 + RecordsPerDocAvg string + SortableValuesSizeMB float64 + TagOverheadSzMB float64 + TextOverheadSzMB float64 + TotalIndexMemorySzMB float64 + TotalIndexingTime int + TotalInvertedIndexBlocks int + VectorIndexSzMB float64 +} + +type IndexErrors struct { + IndexingFailures int + LastIndexingError string + LastIndexingErrorKey string +} + +type FTAttribute struct { + Identifier string + Attribute string + Type string + Weight float64 + Sortable bool + NoStem bool + NoIndex bool + UNF bool + PhoneticMatcher string + CaseSensitive bool + WithSuffixtrie bool + + // Vector specific attributes + Algorithm string + DataType string + Dim int + DistanceMetric string + M int + EFConstruction int +} + +type CursorStats struct { + GlobalIdle int + GlobalTotal int + IndexCapacity int + IndexTotal int +} + +type FieldStatistic struct { + Identifier string + Attribute string + IndexErrors IndexErrors +} + +type GCStats struct { + BytesCollected int + TotalMsRun int + TotalCycles int + AverageCycleTimeMs string + LastRunTimeMs int + GCNumericTreesMissed int + GCBlocksDenied int +} + +type IndexDefinition struct { + KeyType string + Prefixes []string + DefaultScore float64 +} + +type FTSpellCheckOptions struct { + Distance int + Terms *FTSpellCheckTerms + // Dialect 1,3 and 4 are deprecated since redis 8.0 + Dialect int +} + +type FTSpellCheckTerms struct { + Inclusion string // Either "INCLUDE" or "EXCLUDE" + Dictionary string + Terms []interface{} +} + +type SpellCheckResult struct { + Term string + Suggestions []SpellCheckSuggestion +} + +type SpellCheckSuggestion struct { + Score float64 + Suggestion string +} + +type FTSearchResult struct { + Total int + Docs []Document +} + +type Document struct { + ID string + Score *float64 + Payload *string + SortKey *string + Fields map[string]string + Error error +} + +type AggregateQuery []interface{} + +// FT_List - Lists all the existing indexes in the database. +// For more information, please refer to the Redis documentation: +// [FT._LIST]: (https://redis.io/commands/ft._list/) +func (c cmdable) FT_List(ctx context.Context) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "FT._LIST") + _ = c(ctx, cmd) + return cmd +} + +// FTAggregate - Performs a search query on an index and applies a series of aggregate transformations to the result. +// The 'index' parameter specifies the index to search, and the 'query' parameter specifies the search query. +// For more information, please refer to the Redis documentation: +// [FT.AGGREGATE]: (https://redis.io/commands/ft.aggregate/) +func (c cmdable) FTAggregate(ctx context.Context, index string, query string) *MapStringInterfaceCmd { + args := []interface{}{"FT.AGGREGATE", index, query} + cmd := NewMapStringInterfaceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func FTAggregateQuery(query string, options *FTAggregateOptions) (AggregateQuery, error) { + queryArgs := []interface{}{query} + if options != nil { + if options.Verbatim { + queryArgs = append(queryArgs, "VERBATIM") + } + + if options.Scorer != "" { + queryArgs = append(queryArgs, "SCORER", options.Scorer) + } + + if options.AddScores { + queryArgs = append(queryArgs, "ADDSCORES") + } + + if options.LoadAll && options.Load != nil { + return nil, fmt.Errorf("FT.AGGREGATE: LOADALL and LOAD are mutually exclusive") + } + if options.LoadAll { + queryArgs = append(queryArgs, "LOAD", "*") + } + if options.Load != nil { + queryArgs = append(queryArgs, "LOAD", len(options.Load)) + index, count := len(queryArgs)-1, 0 + for _, load := range options.Load { + queryArgs = append(queryArgs, load.Field) + count++ + if load.As != "" { + queryArgs = append(queryArgs, "AS", load.As) + count += 2 + } + } + queryArgs[index] = count + } + + if options.Timeout > 0 { + queryArgs = append(queryArgs, "TIMEOUT", options.Timeout) + } + + for _, apply := range options.Apply { + queryArgs = append(queryArgs, "APPLY", apply.Field) + if apply.As != "" { + queryArgs = append(queryArgs, "AS", apply.As) + } + } + + if options.GroupBy != nil { + for _, groupBy := range options.GroupBy { + queryArgs = append(queryArgs, "GROUPBY", len(groupBy.Fields)) + queryArgs = append(queryArgs, groupBy.Fields...) + + for _, reducer := range groupBy.Reduce { + queryArgs = append(queryArgs, "REDUCE") + queryArgs = append(queryArgs, reducer.Reducer.String()) + if reducer.Args != nil { + queryArgs = append(queryArgs, len(reducer.Args)) + queryArgs = append(queryArgs, reducer.Args...) + } else { + queryArgs = append(queryArgs, 0) + } + if reducer.As != "" { + queryArgs = append(queryArgs, "AS", reducer.As) + } + } + } + } + if options.SortBy != nil { + queryArgs = append(queryArgs, "SORTBY") + sortByOptions := []interface{}{} + for _, sortBy := range options.SortBy { + sortByOptions = append(sortByOptions, sortBy.FieldName) + if sortBy.Asc && sortBy.Desc { + return nil, fmt.Errorf("FT.AGGREGATE: ASC and DESC are mutually exclusive") + } + if sortBy.Asc { + sortByOptions = append(sortByOptions, "ASC") + } + if sortBy.Desc { + sortByOptions = append(sortByOptions, "DESC") + } + } + queryArgs = append(queryArgs, len(sortByOptions)) + queryArgs = append(queryArgs, sortByOptions...) + } + if options.SortByMax > 0 { + queryArgs = append(queryArgs, "MAX", options.SortByMax) + } + if options.LimitOffset >= 0 && options.Limit > 0 { + queryArgs = append(queryArgs, "LIMIT", options.LimitOffset, options.Limit) + } + if options.Filter != "" { + queryArgs = append(queryArgs, "FILTER", options.Filter) + } + if options.WithCursor { + queryArgs = append(queryArgs, "WITHCURSOR") + if options.WithCursorOptions != nil { + if options.WithCursorOptions.Count > 0 { + queryArgs = append(queryArgs, "COUNT", options.WithCursorOptions.Count) + } + if options.WithCursorOptions.MaxIdle > 0 { + queryArgs = append(queryArgs, "MAXIDLE", options.WithCursorOptions.MaxIdle) + } + } + } + if options.Params != nil { + queryArgs = append(queryArgs, "PARAMS", len(options.Params)*2) + for key, value := range options.Params { + queryArgs = append(queryArgs, key, value) + } + } + + if options.DialectVersion > 0 { + queryArgs = append(queryArgs, "DIALECT", options.DialectVersion) + } else { + queryArgs = append(queryArgs, "DIALECT", 2) + } + } + return queryArgs, nil +} + +func ProcessAggregateResult(data []interface{}) (*FTAggregateResult, error) { + if len(data) == 0 { + return nil, fmt.Errorf("no data returned") + } + + total, ok := data[0].(int64) + if !ok { + return nil, fmt.Errorf("invalid total format") + } + + rows := make([]AggregateRow, 0, len(data)-1) + for _, row := range data[1:] { + fields, ok := row.([]interface{}) + if !ok { + return nil, fmt.Errorf("invalid row format") + } + + rowMap := make(map[string]interface{}) + for i := 0; i < len(fields); i += 2 { + key, ok := fields[i].(string) + if !ok { + return nil, fmt.Errorf("invalid field key format") + } + value := fields[i+1] + rowMap[key] = value + } + rows = append(rows, AggregateRow{Fields: rowMap}) + } + + result := &FTAggregateResult{ + Total: int(total), + Rows: rows, + } + return result, nil +} + +func NewAggregateCmd(ctx context.Context, args ...interface{}) *AggregateCmd { + return &AggregateCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + cmdType: CmdTypeAggregate, + }, + } +} + +func (cmd *AggregateCmd) SetVal(val *FTAggregateResult) { + cmd.val = val +} + +func (cmd *AggregateCmd) Val() *FTAggregateResult { + return cmd.val +} + +func (cmd *AggregateCmd) Result() (*FTAggregateResult, error) { + return cmd.val, cmd.err +} + +func (cmd *AggregateCmd) RawVal() interface{} { + return cmd.rawVal +} + +func (cmd *AggregateCmd) RawResult() (interface{}, error) { + return cmd.rawVal, cmd.err +} + +func (cmd *AggregateCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *AggregateCmd) readReply(rd *proto.Reader) (err error) { + data, err := rd.ReadSlice() + if err != nil { + return err + } + cmd.val, err = ProcessAggregateResult(data) + if err != nil { + return err + } + return nil +} + +func (cmd *AggregateCmd) Clone() Cmder { + var val *FTAggregateResult + if cmd.val != nil { + val = &FTAggregateResult{ + Total: cmd.val.Total, + } + if cmd.val.Rows != nil { + val.Rows = make([]AggregateRow, len(cmd.val.Rows)) + for i, row := range cmd.val.Rows { + val.Rows[i] = AggregateRow{} + if row.Fields != nil { + val.Rows[i].Fields = make(map[string]interface{}, len(row.Fields)) + for k, v := range row.Fields { + val.Rows[i].Fields[k] = v + } + } + } + } + } + return &AggregateCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: val, + } +} + +// FTAggregateWithArgs - Performs a search query on an index and applies a series of aggregate transformations to the result. +// The 'index' parameter specifies the index to search, and the 'query' parameter specifies the search query. +// This function also allows for specifying additional options such as: Verbatim, LoadAll, Load, Timeout, GroupBy, SortBy, SortByMax, Apply, LimitOffset, Limit, Filter, WithCursor, Params, and DialectVersion. +// For more information, please refer to the Redis documentation: +// [FT.AGGREGATE]: (https://redis.io/commands/ft.aggregate/) +func (c cmdable) FTAggregateWithArgs(ctx context.Context, index string, query string, options *FTAggregateOptions) *AggregateCmd { + args := []interface{}{"FT.AGGREGATE", index, query} + if options != nil { + if options.Verbatim { + args = append(args, "VERBATIM") + } + if options.Scorer != "" { + args = append(args, "SCORER", options.Scorer) + } + if options.AddScores { + args = append(args, "ADDSCORES") + } + if options.LoadAll && options.Load != nil { + cmd := NewAggregateCmd(ctx, args...) + cmd.SetErr(fmt.Errorf("FT.AGGREGATE: LOADALL and LOAD are mutually exclusive")) + return cmd + } + if options.LoadAll { + args = append(args, "LOAD", "*") + } + if options.Load != nil { + args = append(args, "LOAD", len(options.Load)) + index, count := len(args)-1, 0 + for _, load := range options.Load { + args = append(args, load.Field) + count++ + if load.As != "" { + args = append(args, "AS", load.As) + count += 2 + } + } + args[index] = count + } + if options.Timeout > 0 { + args = append(args, "TIMEOUT", options.Timeout) + } + for _, apply := range options.Apply { + args = append(args, "APPLY", apply.Field) + if apply.As != "" { + args = append(args, "AS", apply.As) + } + } + if options.GroupBy != nil { + for _, groupBy := range options.GroupBy { + args = append(args, "GROUPBY", len(groupBy.Fields)) + args = append(args, groupBy.Fields...) + + for _, reducer := range groupBy.Reduce { + args = append(args, "REDUCE") + args = append(args, reducer.Reducer.String()) + if reducer.Args != nil { + args = append(args, len(reducer.Args)) + args = append(args, reducer.Args...) + } else { + args = append(args, 0) + } + if reducer.As != "" { + args = append(args, "AS", reducer.As) + } + } + } + } + if options.SortBy != nil { + args = append(args, "SORTBY") + sortByOptions := []interface{}{} + for _, sortBy := range options.SortBy { + sortByOptions = append(sortByOptions, sortBy.FieldName) + if sortBy.Asc && sortBy.Desc { + cmd := NewAggregateCmd(ctx, args...) + cmd.SetErr(fmt.Errorf("FT.AGGREGATE: ASC and DESC are mutually exclusive")) + return cmd + } + if sortBy.Asc { + sortByOptions = append(sortByOptions, "ASC") + } + if sortBy.Desc { + sortByOptions = append(sortByOptions, "DESC") + } + } + args = append(args, len(sortByOptions)) + args = append(args, sortByOptions...) + } + if options.SortByMax > 0 { + args = append(args, "MAX", options.SortByMax) + } + if options.LimitOffset >= 0 && options.Limit > 0 { + args = append(args, "LIMIT", options.LimitOffset, options.Limit) + } + if options.Filter != "" { + args = append(args, "FILTER", options.Filter) + } + if options.WithCursor { + args = append(args, "WITHCURSOR") + if options.WithCursorOptions != nil { + if options.WithCursorOptions.Count > 0 { + args = append(args, "COUNT", options.WithCursorOptions.Count) + } + if options.WithCursorOptions.MaxIdle > 0 { + args = append(args, "MAXIDLE", options.WithCursorOptions.MaxIdle) + } + } + } + if options.Params != nil { + args = append(args, "PARAMS", len(options.Params)*2) + for key, value := range options.Params { + args = append(args, key, value) + } + } + if options.DialectVersion > 0 { + args = append(args, "DIALECT", options.DialectVersion) + } else { + args = append(args, "DIALECT", 2) + } + } + + cmd := NewAggregateCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// FTAliasAdd - Adds an alias to an index. +// The 'index' parameter specifies the index to which the alias is added, and the 'alias' parameter specifies the alias. +// For more information, please refer to the Redis documentation: +// [FT.ALIASADD]: (https://redis.io/commands/ft.aliasadd/) +func (c cmdable) FTAliasAdd(ctx context.Context, index string, alias string) *StatusCmd { + args := []interface{}{"FT.ALIASADD", alias, index} + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// FTAliasDel - Removes an alias from an index. +// The 'alias' parameter specifies the alias to be removed. +// For more information, please refer to the Redis documentation: +// [FT.ALIASDEL]: (https://redis.io/commands/ft.aliasdel/) +func (c cmdable) FTAliasDel(ctx context.Context, alias string) *StatusCmd { + cmd := NewStatusCmd(ctx, "FT.ALIASDEL", alias) + _ = c(ctx, cmd) + return cmd +} + +// FTAliasUpdate - Updates an alias to an index. +// The 'index' parameter specifies the index to which the alias is updated, and the 'alias' parameter specifies the alias. +// If the alias already exists for a different index, it updates the alias to point to the specified index instead. +// For more information, please refer to the Redis documentation: +// [FT.ALIASUPDATE]: (https://redis.io/commands/ft.aliasupdate/) +func (c cmdable) FTAliasUpdate(ctx context.Context, index string, alias string) *StatusCmd { + cmd := NewStatusCmd(ctx, "FT.ALIASUPDATE", alias, index) + _ = c(ctx, cmd) + return cmd +} + +// FTAlter - Alters the definition of an existing index. +// The 'index' parameter specifies the index to alter, and the 'skipInitialScan' parameter specifies whether to skip the initial scan. +// The 'definition' parameter specifies the new definition for the index. +// For more information, please refer to the Redis documentation: +// [FT.ALTER]: (https://redis.io/commands/ft.alter/) +func (c cmdable) FTAlter(ctx context.Context, index string, skipInitialScan bool, definition []interface{}) *StatusCmd { + args := []interface{}{"FT.ALTER", index} + if skipInitialScan { + args = append(args, "SKIPINITIALSCAN") + } + args = append(args, "SCHEMA", "ADD") + args = append(args, definition...) + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// Retrieves the value of a RediSearch configuration parameter. +// The 'option' parameter specifies the configuration parameter to retrieve. +// For more information, please refer to the Redis [FT.CONFIG GET] documentation. +// +// Deprecated: FTConfigGet is deprecated in Redis 8. +// All configuration will be done with the CONFIG GET command. +// For more information check [Client.ConfigGet] and [CONFIG GET Documentation] +// +// [CONFIG GET Documentation]: https://redis.io/commands/config-get/ +// [FT.CONFIG GET]: https://redis.io/commands/ft.config-get/ +func (c cmdable) FTConfigGet(ctx context.Context, option string) *MapMapStringInterfaceCmd { + cmd := NewMapMapStringInterfaceCmd(ctx, "FT.CONFIG", "GET", option) + _ = c(ctx, cmd) + return cmd +} + +// Sets the value of a RediSearch configuration parameter. +// The 'option' parameter specifies the configuration parameter to set, and the 'value' parameter specifies the new value. +// For more information, please refer to the Redis [FT.CONFIG SET] documentation. +// +// Deprecated: FTConfigSet is deprecated in Redis 8. +// All configuration will be done with the CONFIG SET command. +// For more information check [Client.ConfigSet] and [CONFIG SET Documentation] +// +// [CONFIG SET Documentation]: https://redis.io/commands/config-set/ +// [FT.CONFIG SET]: https://redis.io/commands/ft.config-set/ +func (c cmdable) FTConfigSet(ctx context.Context, option string, value interface{}) *StatusCmd { + cmd := NewStatusCmd(ctx, "FT.CONFIG", "SET", option, value) + _ = c(ctx, cmd) + return cmd +} + +// FTCreate - Creates a new index with the given options and schema. +// The 'index' parameter specifies the name of the index to create. +// The 'options' parameter specifies various options for the index, such as: +// whether to index hashes or JSONs, prefixes, filters, default language, score, score field, payload field, etc. +// The 'schema' parameter specifies the schema for the index, which includes the field name, field type, etc. +// For more information, please refer to the Redis documentation: +// [FT.CREATE]: (https://redis.io/commands/ft.create/) +func (c cmdable) FTCreate(ctx context.Context, index string, options *FTCreateOptions, schema ...*FieldSchema) *StatusCmd { + args := []interface{}{"FT.CREATE", index} + if options != nil { + if options.OnHash && !options.OnJSON { + args = append(args, "ON", "HASH") + } + if options.OnJSON && !options.OnHash { + args = append(args, "ON", "JSON") + } + if options.OnHash && options.OnJSON { + cmd := NewStatusCmd(ctx, args...) + cmd.SetErr(fmt.Errorf("FT.CREATE: ON HASH and ON JSON are mutually exclusive")) + return cmd + } + if options.Prefix != nil { + args = append(args, "PREFIX", len(options.Prefix)) + args = append(args, options.Prefix...) + } + if options.Filter != "" { + args = append(args, "FILTER", options.Filter) + } + if options.DefaultLanguage != "" { + args = append(args, "LANGUAGE", options.DefaultLanguage) + } + if options.LanguageField != "" { + args = append(args, "LANGUAGE_FIELD", options.LanguageField) + } + if options.Score > 0 { + args = append(args, "SCORE", options.Score) + } + if options.ScoreField != "" { + args = append(args, "SCORE_FIELD", options.ScoreField) + } + if options.PayloadField != "" { + args = append(args, "PAYLOAD_FIELD", options.PayloadField) + } + if options.MaxTextFields > 0 { + args = append(args, "MAXTEXTFIELDS", options.MaxTextFields) + } + if options.NoOffsets { + args = append(args, "NOOFFSETS") + } + if options.Temporary > 0 { + args = append(args, "TEMPORARY", options.Temporary) + } + if options.NoHL { + args = append(args, "NOHL") + } + if options.NoFields { + args = append(args, "NOFIELDS") + } + if options.NoFreqs { + args = append(args, "NOFREQS") + } + if options.StopWords != nil { + args = append(args, "STOPWORDS", len(options.StopWords)) + args = append(args, options.StopWords...) + } + if options.SkipInitialScan { + args = append(args, "SKIPINITIALSCAN") + } + } + if schema == nil { + cmd := NewStatusCmd(ctx, args...) + cmd.SetErr(fmt.Errorf("FT.CREATE: SCHEMA is required")) + return cmd + } + args = append(args, "SCHEMA") + for _, schema := range schema { + if schema.FieldName == "" || schema.FieldType == SearchFieldTypeInvalid { + cmd := NewStatusCmd(ctx, args...) + cmd.SetErr(fmt.Errorf("FT.CREATE: SCHEMA FieldName and FieldType are required")) + return cmd + } + args = append(args, schema.FieldName) + if schema.As != "" { + args = append(args, "AS", schema.As) + } + args = append(args, schema.FieldType.String()) + if schema.VectorArgs != nil { + if schema.FieldType != SearchFieldTypeVector { + cmd := NewStatusCmd(ctx, args...) + cmd.SetErr(fmt.Errorf("FT.CREATE: SCHEMA FieldType VECTOR is required for VectorArgs")) + return cmd + } + // Check mutual exclusivity of vector options + optionCount := 0 + if schema.VectorArgs.FlatOptions != nil { + optionCount++ + } + if schema.VectorArgs.HNSWOptions != nil { + optionCount++ + } + if schema.VectorArgs.VamanaOptions != nil { + optionCount++ + } + if optionCount != 1 { + cmd := NewStatusCmd(ctx, args...) + cmd.SetErr(fmt.Errorf("FT.CREATE: SCHEMA VectorArgs must have exactly one of FlatOptions, HNSWOptions, or VamanaOptions")) + return cmd + } + if schema.VectorArgs.FlatOptions != nil { + args = append(args, "FLAT") + if schema.VectorArgs.FlatOptions.Type == "" || schema.VectorArgs.FlatOptions.Dim == 0 || schema.VectorArgs.FlatOptions.DistanceMetric == "" { + cmd := NewStatusCmd(ctx, args...) + cmd.SetErr(fmt.Errorf("FT.CREATE: Type, Dim and DistanceMetric are required for VECTOR FLAT")) + return cmd + } + flatArgs := []interface{}{ + "TYPE", schema.VectorArgs.FlatOptions.Type, + "DIM", schema.VectorArgs.FlatOptions.Dim, + "DISTANCE_METRIC", schema.VectorArgs.FlatOptions.DistanceMetric, + } + if schema.VectorArgs.FlatOptions.InitialCapacity > 0 { + flatArgs = append(flatArgs, "INITIAL_CAP", schema.VectorArgs.FlatOptions.InitialCapacity) + } + if schema.VectorArgs.FlatOptions.BlockSize > 0 { + flatArgs = append(flatArgs, "BLOCK_SIZE", schema.VectorArgs.FlatOptions.BlockSize) + } + args = append(args, len(flatArgs)) + args = append(args, flatArgs...) + } + if schema.VectorArgs.HNSWOptions != nil { + args = append(args, "HNSW") + if schema.VectorArgs.HNSWOptions.Type == "" || schema.VectorArgs.HNSWOptions.Dim == 0 || schema.VectorArgs.HNSWOptions.DistanceMetric == "" { + cmd := NewStatusCmd(ctx, args...) + cmd.SetErr(fmt.Errorf("FT.CREATE: Type, Dim and DistanceMetric are required for VECTOR HNSW")) + return cmd + } + hnswArgs := []interface{}{ + "TYPE", schema.VectorArgs.HNSWOptions.Type, + "DIM", schema.VectorArgs.HNSWOptions.Dim, + "DISTANCE_METRIC", schema.VectorArgs.HNSWOptions.DistanceMetric, + } + if schema.VectorArgs.HNSWOptions.InitialCapacity > 0 { + hnswArgs = append(hnswArgs, "INITIAL_CAP", schema.VectorArgs.HNSWOptions.InitialCapacity) + } + if schema.VectorArgs.HNSWOptions.MaxEdgesPerNode > 0 { + hnswArgs = append(hnswArgs, "M", schema.VectorArgs.HNSWOptions.MaxEdgesPerNode) + } + if schema.VectorArgs.HNSWOptions.MaxAllowedEdgesPerNode > 0 { + hnswArgs = append(hnswArgs, "EF_CONSTRUCTION", schema.VectorArgs.HNSWOptions.MaxAllowedEdgesPerNode) + } + if schema.VectorArgs.HNSWOptions.EFRunTime > 0 { + hnswArgs = append(hnswArgs, "EF_RUNTIME", schema.VectorArgs.HNSWOptions.EFRunTime) + } + if schema.VectorArgs.HNSWOptions.Epsilon > 0 { + hnswArgs = append(hnswArgs, "EPSILON", schema.VectorArgs.HNSWOptions.Epsilon) + } + args = append(args, len(hnswArgs)) + args = append(args, hnswArgs...) + } + if schema.VectorArgs.VamanaOptions != nil { + args = append(args, "SVS-VAMANA") + if schema.VectorArgs.VamanaOptions.Type == "" || schema.VectorArgs.VamanaOptions.Dim == 0 || schema.VectorArgs.VamanaOptions.DistanceMetric == "" { + cmd := NewStatusCmd(ctx, args...) + cmd.SetErr(fmt.Errorf("FT.CREATE: Type, Dim and DistanceMetric are required for VECTOR VAMANA")) + return cmd + } + vamanaArgs := []interface{}{ + "TYPE", schema.VectorArgs.VamanaOptions.Type, + "DIM", schema.VectorArgs.VamanaOptions.Dim, + "DISTANCE_METRIC", schema.VectorArgs.VamanaOptions.DistanceMetric, + } + if schema.VectorArgs.VamanaOptions.Compression != "" { + vamanaArgs = append(vamanaArgs, "COMPRESSION", schema.VectorArgs.VamanaOptions.Compression) + } + if schema.VectorArgs.VamanaOptions.ConstructionWindowSize > 0 { + vamanaArgs = append(vamanaArgs, "CONSTRUCTION_WINDOW_SIZE", schema.VectorArgs.VamanaOptions.ConstructionWindowSize) + } + if schema.VectorArgs.VamanaOptions.GraphMaxDegree > 0 { + vamanaArgs = append(vamanaArgs, "GRAPH_MAX_DEGREE", schema.VectorArgs.VamanaOptions.GraphMaxDegree) + } + if schema.VectorArgs.VamanaOptions.SearchWindowSize > 0 { + vamanaArgs = append(vamanaArgs, "SEARCH_WINDOW_SIZE", schema.VectorArgs.VamanaOptions.SearchWindowSize) + } + if schema.VectorArgs.VamanaOptions.Epsilon > 0 { + vamanaArgs = append(vamanaArgs, "EPSILON", schema.VectorArgs.VamanaOptions.Epsilon) + } + if schema.VectorArgs.VamanaOptions.TrainingThreshold > 0 { + vamanaArgs = append(vamanaArgs, "TRAINING_THRESHOLD", schema.VectorArgs.VamanaOptions.TrainingThreshold) + } + if schema.VectorArgs.VamanaOptions.ReduceDim > 0 { + vamanaArgs = append(vamanaArgs, "REDUCE", schema.VectorArgs.VamanaOptions.ReduceDim) + } + args = append(args, len(vamanaArgs)) + args = append(args, vamanaArgs...) + } + } + if schema.GeoShapeFieldType != "" { + if schema.FieldType != SearchFieldTypeGeoShape { + cmd := NewStatusCmd(ctx, args...) + cmd.SetErr(fmt.Errorf("FT.CREATE: SCHEMA FieldType GEOSHAPE is required for GeoShapeFieldType")) + return cmd + } + args = append(args, schema.GeoShapeFieldType) + } + if schema.NoStem { + args = append(args, "NOSTEM") + } + if schema.Sortable { + args = append(args, "SORTABLE") + } + if schema.UNF { + args = append(args, "UNF") + } + if schema.NoIndex { + args = append(args, "NOINDEX") + } + if schema.PhoneticMatcher != "" { + args = append(args, "PHONETIC", schema.PhoneticMatcher) + } + if schema.Weight > 0 { + args = append(args, "WEIGHT", schema.Weight) + } + if schema.Separator != "" { + args = append(args, "SEPARATOR", schema.Separator) + } + if schema.CaseSensitive { + args = append(args, "CASESENSITIVE") + } + if schema.WithSuffixtrie { + args = append(args, "WITHSUFFIXTRIE") + } + if schema.IndexEmpty { + args = append(args, "INDEXEMPTY") + } + if schema.IndexMissing { + args = append(args, "INDEXMISSING") + + } + } + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// FTCursorDel - Deletes a cursor from an existing index. +// The 'index' parameter specifies the index from which to delete the cursor, and the 'cursorId' parameter specifies the ID of the cursor to delete. +// For more information, please refer to the Redis documentation: +// [FT.CURSOR DEL]: (https://redis.io/commands/ft.cursor-del/) +func (c cmdable) FTCursorDel(ctx context.Context, index string, cursorId int) *StatusCmd { + cmd := NewStatusCmd(ctx, "FT.CURSOR", "DEL", index, cursorId) + _ = c(ctx, cmd) + return cmd +} + +// FTCursorRead - Reads the next results from an existing cursor. +// The 'index' parameter specifies the index from which to read the cursor, the 'cursorId' parameter specifies the ID of the cursor to read, and the 'count' parameter specifies the number of results to read. +// For more information, please refer to the Redis documentation: +// [FT.CURSOR READ]: (https://redis.io/commands/ft.cursor-read/) +func (c cmdable) FTCursorRead(ctx context.Context, index string, cursorId int, count int) *MapStringInterfaceCmd { + args := []interface{}{"FT.CURSOR", "READ", index, cursorId} + if count > 0 { + args = append(args, "COUNT", count) + } + cmd := NewMapStringInterfaceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// FTDictAdd - Adds terms to a dictionary. +// The 'dict' parameter specifies the dictionary to which to add the terms, and the 'term' parameter specifies the terms to add. +// For more information, please refer to the Redis documentation: +// [FT.DICTADD]: (https://redis.io/commands/ft.dictadd/) +func (c cmdable) FTDictAdd(ctx context.Context, dict string, term ...interface{}) *IntCmd { + args := []interface{}{"FT.DICTADD", dict} + args = append(args, term...) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// FTDictDel - Deletes terms from a dictionary. +// The 'dict' parameter specifies the dictionary from which to delete the terms, and the 'term' parameter specifies the terms to delete. +// For more information, please refer to the Redis documentation: +// [FT.DICTDEL]: (https://redis.io/commands/ft.dictdel/) +func (c cmdable) FTDictDel(ctx context.Context, dict string, term ...interface{}) *IntCmd { + args := []interface{}{"FT.DICTDEL", dict} + args = append(args, term...) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// FTDictDump - Returns all terms in the specified dictionary. +// The 'dict' parameter specifies the dictionary from which to return the terms. +// For more information, please refer to the Redis documentation: +// [FT.DICTDUMP]: (https://redis.io/commands/ft.dictdump/) +func (c cmdable) FTDictDump(ctx context.Context, dict string) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "FT.DICTDUMP", dict) + _ = c(ctx, cmd) + return cmd +} + +// FTDropIndex - Deletes an index. +// The 'index' parameter specifies the index to delete. +// For more information, please refer to the Redis documentation: +// [FT.DROPINDEX]: (https://redis.io/commands/ft.dropindex/) +func (c cmdable) FTDropIndex(ctx context.Context, index string) *StatusCmd { + args := []interface{}{"FT.DROPINDEX", index} + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// FTDropIndexWithArgs - Deletes an index with options. +// The 'index' parameter specifies the index to delete, and the 'options' parameter specifies the DeleteDocs option for docs deletion. +// For more information, please refer to the Redis documentation: +// [FT.DROPINDEX]: (https://redis.io/commands/ft.dropindex/) +func (c cmdable) FTDropIndexWithArgs(ctx context.Context, index string, options *FTDropIndexOptions) *StatusCmd { + args := []interface{}{"FT.DROPINDEX", index} + if options != nil { + if options.DeleteDocs { + args = append(args, "DD") + } + } + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// FTExplain - Returns the execution plan for a complex query. +// The 'index' parameter specifies the index to query, and the 'query' parameter specifies the query string. +// For more information, please refer to the Redis documentation: +// [FT.EXPLAIN]: (https://redis.io/commands/ft.explain/) +func (c cmdable) FTExplain(ctx context.Context, index string, query string) *StringCmd { + cmd := NewStringCmd(ctx, "FT.EXPLAIN", index, query) + _ = c(ctx, cmd) + return cmd +} + +// FTExplainWithArgs - Returns the execution plan for a complex query with options. +// The 'index' parameter specifies the index to query, the 'query' parameter specifies the query string, and the 'options' parameter specifies the Dialect for the query. +// For more information, please refer to the Redis documentation: +// [FT.EXPLAIN]: (https://redis.io/commands/ft.explain/) +func (c cmdable) FTExplainWithArgs(ctx context.Context, index string, query string, options *FTExplainOptions) *StringCmd { + args := []interface{}{"FT.EXPLAIN", index, query} + if options.Dialect != "" { + args = append(args, "DIALECT", options.Dialect) + } else { + args = append(args, "DIALECT", 2) + } + cmd := NewStringCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// FTExplainCli - Returns the execution plan for a complex query. [Not Implemented] +// For more information, see https://redis.io/commands/ft.explaincli/ +func (c cmdable) FTExplainCli(ctx context.Context, key, path string) error { + return fmt.Errorf("FTExplainCli is not implemented") +} + +func parseFTInfo(data map[string]interface{}) (FTInfoResult, error) { + var ftInfo FTInfoResult + // Manually parse each field from the map + if indexErrors, ok := data["Index Errors"].([]interface{}); ok { + ftInfo.IndexErrors = IndexErrors{ + IndexingFailures: internal.ToInteger(indexErrors[1]), + LastIndexingError: internal.ToString(indexErrors[3]), + LastIndexingErrorKey: internal.ToString(indexErrors[5]), + } + } + + if attributes, ok := data["attributes"].([]interface{}); ok { + for _, attr := range attributes { + if attrMap, ok := attr.([]interface{}); ok { + att := FTAttribute{} + attrLen := len(attrMap) + for i := 0; i < attrLen; i++ { + if internal.ToLower(internal.ToString(attrMap[i])) == "attribute" && i+1 < attrLen { + att.Attribute = internal.ToString(attrMap[i+1]) + i++ + continue + } + if internal.ToLower(internal.ToString(attrMap[i])) == "identifier" && i+1 < attrLen { + att.Identifier = internal.ToString(attrMap[i+1]) + i++ + continue + } + if internal.ToLower(internal.ToString(attrMap[i])) == "type" && i+1 < attrLen { + att.Type = internal.ToString(attrMap[i+1]) + i++ + continue + } + if internal.ToLower(internal.ToString(attrMap[i])) == "weight" && i+1 < attrLen { + att.Weight = internal.ToFloat(attrMap[i+1]) + i++ + continue + } + if internal.ToLower(internal.ToString(attrMap[i])) == "nostem" { + att.NoStem = true + continue + } + if internal.ToLower(internal.ToString(attrMap[i])) == "sortable" { + att.Sortable = true + continue + } + if internal.ToLower(internal.ToString(attrMap[i])) == "noindex" { + att.NoIndex = true + continue + } + if internal.ToLower(internal.ToString(attrMap[i])) == "unf" { + att.UNF = true + continue + } + if internal.ToLower(internal.ToString(attrMap[i])) == "phonetic" && i+1 < attrLen { + att.PhoneticMatcher = internal.ToString(attrMap[i+1]) + continue + } + if internal.ToLower(internal.ToString(attrMap[i])) == "case_sensitive" { + att.CaseSensitive = true + continue + } + if internal.ToLower(internal.ToString(attrMap[i])) == "withsuffixtrie" { + att.WithSuffixtrie = true + continue + } + + // vector specific attributes + if internal.ToLower(internal.ToString(attrMap[i])) == "algorithm" && i+1 < attrLen { + att.Algorithm = internal.ToString(attrMap[i+1]) + i++ + continue + } + if internal.ToLower(internal.ToString(attrMap[i])) == "data_type" && i+1 < attrLen { + att.DataType = internal.ToString(attrMap[i+1]) + i++ + continue + } + if internal.ToLower(internal.ToString(attrMap[i])) == "dim" && i+1 < attrLen { + att.Dim = internal.ToInteger(attrMap[i+1]) + i++ + continue + } + if internal.ToLower(internal.ToString(attrMap[i])) == "distance_metric" && i+1 < attrLen { + att.DistanceMetric = internal.ToString(attrMap[i+1]) + i++ + continue + } + if internal.ToLower(internal.ToString(attrMap[i])) == "m" && i+1 < attrLen { + att.M = internal.ToInteger(attrMap[i+1]) + i++ + continue + } + if internal.ToLower(internal.ToString(attrMap[i])) == "ef_construction" && i+1 < attrLen { + att.EFConstruction = internal.ToInteger(attrMap[i+1]) + i++ + continue + } + + } + ftInfo.Attributes = append(ftInfo.Attributes, att) + } + } + } + + ftInfo.BytesPerRecordAvg = internal.ToString(data["bytes_per_record_avg"]) + ftInfo.Cleaning = internal.ToInteger(data["cleaning"]) + + if cursorStats, ok := data["cursor_stats"].([]interface{}); ok { + ftInfo.CursorStats = CursorStats{ + GlobalIdle: internal.ToInteger(cursorStats[1]), + GlobalTotal: internal.ToInteger(cursorStats[3]), + IndexCapacity: internal.ToInteger(cursorStats[5]), + IndexTotal: internal.ToInteger(cursorStats[7]), + } + } + + if dialectStats, ok := data["dialect_stats"].([]interface{}); ok { + ftInfo.DialectStats = make(map[string]int) + for i := 0; i < len(dialectStats); i += 2 { + ftInfo.DialectStats[internal.ToString(dialectStats[i])] = internal.ToInteger(dialectStats[i+1]) + } + } + + ftInfo.DocTableSizeMB = internal.ToFloat(data["doc_table_size_mb"]) + + if fieldStats, ok := data["field statistics"].([]interface{}); ok { + for _, stat := range fieldStats { + if statMap, ok := stat.([]interface{}); ok { + ftInfo.FieldStatistics = append(ftInfo.FieldStatistics, FieldStatistic{ + Identifier: internal.ToString(statMap[1]), + Attribute: internal.ToString(statMap[3]), + IndexErrors: IndexErrors{ + IndexingFailures: internal.ToInteger(statMap[5].([]interface{})[1]), + LastIndexingError: internal.ToString(statMap[5].([]interface{})[3]), + LastIndexingErrorKey: internal.ToString(statMap[5].([]interface{})[5]), + }, + }) + } + } + } + + if gcStats, ok := data["gc_stats"].([]interface{}); ok { + ftInfo.GCStats = GCStats{} + for i := 0; i < len(gcStats); i += 2 { + if internal.ToLower(internal.ToString(gcStats[i])) == "bytes_collected" { + ftInfo.GCStats.BytesCollected = internal.ToInteger(gcStats[i+1]) + continue + } + if internal.ToLower(internal.ToString(gcStats[i])) == "total_ms_run" { + ftInfo.GCStats.TotalMsRun = internal.ToInteger(gcStats[i+1]) + continue + } + if internal.ToLower(internal.ToString(gcStats[i])) == "total_cycles" { + ftInfo.GCStats.TotalCycles = internal.ToInteger(gcStats[i+1]) + continue + } + if internal.ToLower(internal.ToString(gcStats[i])) == "average_cycle_time_ms" { + ftInfo.GCStats.AverageCycleTimeMs = internal.ToString(gcStats[i+1]) + continue + } + if internal.ToLower(internal.ToString(gcStats[i])) == "last_run_time_ms" { + ftInfo.GCStats.LastRunTimeMs = internal.ToInteger(gcStats[i+1]) + continue + } + if internal.ToLower(internal.ToString(gcStats[i])) == "gc_numeric_trees_missed" { + ftInfo.GCStats.GCNumericTreesMissed = internal.ToInteger(gcStats[i+1]) + continue + } + if internal.ToLower(internal.ToString(gcStats[i])) == "gc_blocks_denied" { + ftInfo.GCStats.GCBlocksDenied = internal.ToInteger(gcStats[i+1]) + continue + } + } + } + + ftInfo.GeoshapesSzMB = internal.ToFloat(data["geoshapes_sz_mb"]) + ftInfo.HashIndexingFailures = internal.ToInteger(data["hash_indexing_failures"]) + + if indexDef, ok := data["index_definition"].([]interface{}); ok { + ftInfo.IndexDefinition = IndexDefinition{ + KeyType: internal.ToString(indexDef[1]), + Prefixes: internal.ToStringSlice(indexDef[3]), + DefaultScore: internal.ToFloat(indexDef[5]), + } + } + + ftInfo.IndexName = internal.ToString(data["index_name"]) + ftInfo.IndexOptions = internal.ToStringSlice(data["index_options"].([]interface{})) + ftInfo.Indexing = internal.ToInteger(data["indexing"]) + ftInfo.InvertedSzMB = internal.ToFloat(data["inverted_sz_mb"]) + ftInfo.KeyTableSizeMB = internal.ToFloat(data["key_table_size_mb"]) + ftInfo.MaxDocID = internal.ToInteger(data["max_doc_id"]) + ftInfo.NumDocs = internal.ToInteger(data["num_docs"]) + ftInfo.NumRecords = internal.ToInteger(data["num_records"]) + ftInfo.NumTerms = internal.ToInteger(data["num_terms"]) + ftInfo.NumberOfUses = internal.ToInteger(data["number_of_uses"]) + ftInfo.OffsetBitsPerRecordAvg = internal.ToString(data["offset_bits_per_record_avg"]) + ftInfo.OffsetVectorsSzMB = internal.ToFloat(data["offset_vectors_sz_mb"]) + ftInfo.OffsetsPerTermAvg = internal.ToString(data["offsets_per_term_avg"]) + ftInfo.PercentIndexed = internal.ToFloat(data["percent_indexed"]) + ftInfo.RecordsPerDocAvg = internal.ToString(data["records_per_doc_avg"]) + ftInfo.SortableValuesSizeMB = internal.ToFloat(data["sortable_values_size_mb"]) + ftInfo.TagOverheadSzMB = internal.ToFloat(data["tag_overhead_sz_mb"]) + ftInfo.TextOverheadSzMB = internal.ToFloat(data["text_overhead_sz_mb"]) + ftInfo.TotalIndexMemorySzMB = internal.ToFloat(data["total_index_memory_sz_mb"]) + ftInfo.TotalIndexingTime = internal.ToInteger(data["total_indexing_time"]) + ftInfo.TotalInvertedIndexBlocks = internal.ToInteger(data["total_inverted_index_blocks"]) + ftInfo.VectorIndexSzMB = internal.ToFloat(data["vector_index_sz_mb"]) + + return ftInfo, nil +} + +type FTInfoCmd struct { + baseCmd + val FTInfoResult +} + +func newFTInfoCmd(ctx context.Context, args ...interface{}) *FTInfoCmd { + return &FTInfoCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + cmdType: CmdTypeFTInfo, + }, + } +} + +func (cmd *FTInfoCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *FTInfoCmd) SetVal(val FTInfoResult) { + cmd.val = val +} + +func (cmd *FTInfoCmd) Result() (FTInfoResult, error) { + return cmd.val, cmd.err +} + +func (cmd *FTInfoCmd) Val() FTInfoResult { + return cmd.val +} + +func (cmd *FTInfoCmd) RawVal() interface{} { + return cmd.rawVal +} + +func (cmd *FTInfoCmd) RawResult() (interface{}, error) { + return cmd.rawVal, cmd.err +} +func (cmd *FTInfoCmd) readReply(rd *proto.Reader) (err error) { + n, err := rd.ReadMapLen() + if err != nil { + return err + } + + data := make(map[string]interface{}, n) + for i := 0; i < n; i++ { + k, err := rd.ReadString() + if err != nil { + return err + } + v, err := rd.ReadReply() + if err != nil { + if err == Nil { + data[k] = Nil + continue + } + if err, ok := err.(proto.RedisError); ok { + data[k] = err + continue + } + return err + } + data[k] = v + } + cmd.val, err = parseFTInfo(data) + if err != nil { + return err + } + + return nil +} + +func (cmd *FTInfoCmd) Clone() Cmder { + val := FTInfoResult{ + IndexErrors: cmd.val.IndexErrors, + BytesPerRecordAvg: cmd.val.BytesPerRecordAvg, + Cleaning: cmd.val.Cleaning, + CursorStats: cmd.val.CursorStats, + DocTableSizeMB: cmd.val.DocTableSizeMB, + GCStats: cmd.val.GCStats, + GeoshapesSzMB: cmd.val.GeoshapesSzMB, + HashIndexingFailures: cmd.val.HashIndexingFailures, + IndexDefinition: cmd.val.IndexDefinition, + IndexName: cmd.val.IndexName, + Indexing: cmd.val.Indexing, + InvertedSzMB: cmd.val.InvertedSzMB, + KeyTableSizeMB: cmd.val.KeyTableSizeMB, + MaxDocID: cmd.val.MaxDocID, + NumDocs: cmd.val.NumDocs, + NumRecords: cmd.val.NumRecords, + NumTerms: cmd.val.NumTerms, + NumberOfUses: cmd.val.NumberOfUses, + OffsetBitsPerRecordAvg: cmd.val.OffsetBitsPerRecordAvg, + OffsetVectorsSzMB: cmd.val.OffsetVectorsSzMB, + OffsetsPerTermAvg: cmd.val.OffsetsPerTermAvg, + PercentIndexed: cmd.val.PercentIndexed, + RecordsPerDocAvg: cmd.val.RecordsPerDocAvg, + SortableValuesSizeMB: cmd.val.SortableValuesSizeMB, + TagOverheadSzMB: cmd.val.TagOverheadSzMB, + TextOverheadSzMB: cmd.val.TextOverheadSzMB, + TotalIndexMemorySzMB: cmd.val.TotalIndexMemorySzMB, + TotalIndexingTime: cmd.val.TotalIndexingTime, + TotalInvertedIndexBlocks: cmd.val.TotalInvertedIndexBlocks, + VectorIndexSzMB: cmd.val.VectorIndexSzMB, + } + // Clone slices and maps + if cmd.val.Attributes != nil { + val.Attributes = make([]FTAttribute, len(cmd.val.Attributes)) + copy(val.Attributes, cmd.val.Attributes) + } + if cmd.val.DialectStats != nil { + val.DialectStats = make(map[string]int, len(cmd.val.DialectStats)) + for k, v := range cmd.val.DialectStats { + val.DialectStats[k] = v + } + } + if cmd.val.FieldStatistics != nil { + val.FieldStatistics = make([]FieldStatistic, len(cmd.val.FieldStatistics)) + copy(val.FieldStatistics, cmd.val.FieldStatistics) + } + if cmd.val.IndexOptions != nil { + val.IndexOptions = make([]string, len(cmd.val.IndexOptions)) + copy(val.IndexOptions, cmd.val.IndexOptions) + } + if cmd.val.IndexDefinition.Prefixes != nil { + val.IndexDefinition.Prefixes = make([]string, len(cmd.val.IndexDefinition.Prefixes)) + copy(val.IndexDefinition.Prefixes, cmd.val.IndexDefinition.Prefixes) + } + return &FTInfoCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: val, + } +} + +// FTInfo - Retrieves information about an index. +// The 'index' parameter specifies the index to retrieve information about. +// For more information, please refer to the Redis documentation: +// [FT.INFO]: (https://redis.io/commands/ft.info/) +func (c cmdable) FTInfo(ctx context.Context, index string) *FTInfoCmd { + cmd := newFTInfoCmd(ctx, "FT.INFO", index) + _ = c(ctx, cmd) + return cmd +} + +// FTSpellCheck - Checks a query string for spelling errors. +// For more details about spellcheck query please follow: +// https://redis.io/docs/interact/search-and-query/advanced-concepts/spellcheck/ +// For more information, please refer to the Redis documentation: +// [FT.SPELLCHECK]: (https://redis.io/commands/ft.spellcheck/) +func (c cmdable) FTSpellCheck(ctx context.Context, index string, query string) *FTSpellCheckCmd { + args := []interface{}{"FT.SPELLCHECK", index, query} + cmd := newFTSpellCheckCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// FTSpellCheckWithArgs - Checks a query string for spelling errors with additional options. +// For more details about spellcheck query please follow: +// https://redis.io/docs/interact/search-and-query/advanced-concepts/spellcheck/ +// For more information, please refer to the Redis documentation: +// [FT.SPELLCHECK]: (https://redis.io/commands/ft.spellcheck/) +func (c cmdable) FTSpellCheckWithArgs(ctx context.Context, index string, query string, options *FTSpellCheckOptions) *FTSpellCheckCmd { + args := []interface{}{"FT.SPELLCHECK", index, query} + if options != nil { + if options.Distance > 0 { + args = append(args, "DISTANCE", options.Distance) + } + if options.Terms != nil { + args = append(args, "TERMS", options.Terms.Inclusion, options.Terms.Dictionary) + args = append(args, options.Terms.Terms...) + } + if options.Dialect > 0 { + args = append(args, "DIALECT", options.Dialect) + } else { + args = append(args, "DIALECT", 2) + } + } + cmd := newFTSpellCheckCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +type FTSpellCheckCmd struct { + baseCmd + val []SpellCheckResult +} + +func newFTSpellCheckCmd(ctx context.Context, args ...interface{}) *FTSpellCheckCmd { + return &FTSpellCheckCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + cmdType: CmdTypeFTSpellCheck, + }, + } +} + +func (cmd *FTSpellCheckCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *FTSpellCheckCmd) SetVal(val []SpellCheckResult) { + cmd.val = val +} + +func (cmd *FTSpellCheckCmd) Result() ([]SpellCheckResult, error) { + return cmd.val, cmd.err +} + +func (cmd *FTSpellCheckCmd) Val() []SpellCheckResult { + return cmd.val +} + +func (cmd *FTSpellCheckCmd) RawVal() interface{} { + return cmd.rawVal +} + +func (cmd *FTSpellCheckCmd) RawResult() (interface{}, error) { + return cmd.rawVal, cmd.err +} + +func (cmd *FTSpellCheckCmd) readReply(rd *proto.Reader) (err error) { + data, err := rd.ReadSlice() + if err != nil { + return err + } + cmd.val, err = parseFTSpellCheck(data) + if err != nil { + return err + } + return nil +} + +func parseFTSpellCheck(data []interface{}) ([]SpellCheckResult, error) { + results := make([]SpellCheckResult, 0, len(data)) + + for _, termData := range data { + termInfo, ok := termData.([]interface{}) + if !ok || len(termInfo) != 3 { + return nil, fmt.Errorf("invalid term format") + } + + term, ok := termInfo[1].(string) + if !ok { + return nil, fmt.Errorf("invalid term format") + } + + suggestionsData, ok := termInfo[2].([]interface{}) + if !ok { + return nil, fmt.Errorf("invalid suggestions format") + } + + suggestions := make([]SpellCheckSuggestion, 0, len(suggestionsData)) + for _, suggestionData := range suggestionsData { + suggestionInfo, ok := suggestionData.([]interface{}) + if !ok || len(suggestionInfo) != 2 { + return nil, fmt.Errorf("invalid suggestion format") + } + + scoreStr, ok := suggestionInfo[0].(string) + if !ok { + return nil, fmt.Errorf("invalid suggestion score format") + } + score, err := strconv.ParseFloat(scoreStr, 64) + if err != nil { + return nil, fmt.Errorf("invalid suggestion score value") + } + + suggestion, ok := suggestionInfo[1].(string) + if !ok { + return nil, fmt.Errorf("invalid suggestion format") + } + + suggestions = append(suggestions, SpellCheckSuggestion{ + Score: score, + Suggestion: suggestion, + }) + } + + results = append(results, SpellCheckResult{ + Term: term, + Suggestions: suggestions, + }) + } + + return results, nil +} + +func (cmd *FTSpellCheckCmd) Clone() Cmder { + var val []SpellCheckResult + if cmd.val != nil { + val = make([]SpellCheckResult, len(cmd.val)) + for i, result := range cmd.val { + val[i] = SpellCheckResult{ + Term: result.Term, + } + if result.Suggestions != nil { + val[i].Suggestions = make([]SpellCheckSuggestion, len(result.Suggestions)) + copy(val[i].Suggestions, result.Suggestions) + } + } + } + return &FTSpellCheckCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: val, + } +} + +func parseFTSearch(data []interface{}, noContent, withScores, withPayloads, withSortKeys bool) (FTSearchResult, error) { + if len(data) < 1 { + return FTSearchResult{}, fmt.Errorf("unexpected search result format") + } + + total, ok := data[0].(int64) + if !ok { + return FTSearchResult{}, fmt.Errorf("invalid total results format") + } + + var results []Document + for i := 1; i < len(data); { + docID, ok := data[i].(string) + if !ok { + return FTSearchResult{}, fmt.Errorf("invalid document ID format") + } + + doc := Document{ + ID: docID, + Fields: make(map[string]string), + } + i++ + + if noContent { + results = append(results, doc) + continue + } + + if withScores && i < len(data) { + if scoreStr, ok := data[i].(string); ok { + score, err := strconv.ParseFloat(scoreStr, 64) + if err != nil { + return FTSearchResult{}, fmt.Errorf("invalid score format") + } + doc.Score = &score + i++ + } + } + + if withPayloads && i < len(data) { + if payload, ok := data[i].(string); ok { + doc.Payload = &payload + i++ + } + } + + if withSortKeys && i < len(data) { + if sortKey, ok := data[i].(string); ok { + doc.SortKey = &sortKey + i++ + } + } + + if i < len(data) { + fields, ok := data[i].([]interface{}) + if !ok { + if data[i] == proto.Nil || data[i] == nil { + doc.Error = proto.Nil + doc.Fields = map[string]string{} + fields = []interface{}{} + } else { + return FTSearchResult{}, fmt.Errorf("invalid document fields format") + } + } + + for j := 0; j < len(fields); j += 2 { + key, ok := fields[j].(string) + if !ok { + return FTSearchResult{}, fmt.Errorf("invalid field key format") + } + value, ok := fields[j+1].(string) + if !ok { + return FTSearchResult{}, fmt.Errorf("invalid field value format") + } + doc.Fields[key] = value + } + i++ + } + + results = append(results, doc) + } + return FTSearchResult{ + Total: int(total), + Docs: results, + }, nil +} + +type FTSearchCmd struct { + baseCmd + val FTSearchResult + options *FTSearchOptions +} + +func newFTSearchCmd(ctx context.Context, options *FTSearchOptions, args ...interface{}) *FTSearchCmd { + return &FTSearchCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + cmdType: CmdTypeFTSearch, + }, + options: options, + } +} + +func (cmd *FTSearchCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *FTSearchCmd) SetVal(val FTSearchResult) { + cmd.val = val +} + +func (cmd *FTSearchCmd) Result() (FTSearchResult, error) { + return cmd.val, cmd.err +} + +func (cmd *FTSearchCmd) Val() FTSearchResult { + return cmd.val +} + +func (cmd *FTSearchCmd) RawVal() interface{} { + return cmd.rawVal +} + +func (cmd *FTSearchCmd) RawResult() (interface{}, error) { + return cmd.rawVal, cmd.err +} + +func (cmd *FTSearchCmd) readReply(rd *proto.Reader) (err error) { + data, err := rd.ReadSlice() + if err != nil { + return err + } + cmd.val, err = parseFTSearch(data, cmd.options.NoContent, cmd.options.WithScores, cmd.options.WithPayloads, cmd.options.WithSortKeys) + if err != nil { + return err + } + return nil +} + +func (cmd *FTSearchCmd) Clone() Cmder { + val := FTSearchResult{ + Total: cmd.val.Total, + } + if cmd.val.Docs != nil { + val.Docs = make([]Document, len(cmd.val.Docs)) + for i, doc := range cmd.val.Docs { + val.Docs[i] = Document{ + ID: doc.ID, + Score: doc.Score, + Payload: doc.Payload, + SortKey: doc.SortKey, + } + if doc.Fields != nil { + val.Docs[i].Fields = make(map[string]string, len(doc.Fields)) + for k, v := range doc.Fields { + val.Docs[i].Fields[k] = v + } + } + } + } + var options *FTSearchOptions + if cmd.options != nil { + options = &FTSearchOptions{ + NoContent: cmd.options.NoContent, + Verbatim: cmd.options.Verbatim, + NoStopWords: cmd.options.NoStopWords, + WithScores: cmd.options.WithScores, + WithPayloads: cmd.options.WithPayloads, + WithSortKeys: cmd.options.WithSortKeys, + Slop: cmd.options.Slop, + Timeout: cmd.options.Timeout, + InOrder: cmd.options.InOrder, + Language: cmd.options.Language, + Expander: cmd.options.Expander, + Scorer: cmd.options.Scorer, + ExplainScore: cmd.options.ExplainScore, + Payload: cmd.options.Payload, + SortByWithCount: cmd.options.SortByWithCount, + LimitOffset: cmd.options.LimitOffset, + Limit: cmd.options.Limit, + CountOnly: cmd.options.CountOnly, + DialectVersion: cmd.options.DialectVersion, + } + // Clone slices and maps + if cmd.options.Filters != nil { + options.Filters = make([]FTSearchFilter, len(cmd.options.Filters)) + copy(options.Filters, cmd.options.Filters) + } + if cmd.options.GeoFilter != nil { + options.GeoFilter = make([]FTSearchGeoFilter, len(cmd.options.GeoFilter)) + copy(options.GeoFilter, cmd.options.GeoFilter) + } + if cmd.options.InKeys != nil { + options.InKeys = make([]interface{}, len(cmd.options.InKeys)) + copy(options.InKeys, cmd.options.InKeys) + } + if cmd.options.InFields != nil { + options.InFields = make([]interface{}, len(cmd.options.InFields)) + copy(options.InFields, cmd.options.InFields) + } + if cmd.options.Return != nil { + options.Return = make([]FTSearchReturn, len(cmd.options.Return)) + copy(options.Return, cmd.options.Return) + } + if cmd.options.SortBy != nil { + options.SortBy = make([]FTSearchSortBy, len(cmd.options.SortBy)) + copy(options.SortBy, cmd.options.SortBy) + } + if cmd.options.Params != nil { + options.Params = make(map[string]interface{}, len(cmd.options.Params)) + for k, v := range cmd.options.Params { + options.Params[k] = v + } + } + } + return &FTSearchCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: val, + options: options, + } +} + +// FTHybridResult represents the result of a hybrid search operation +type FTHybridResult struct { + TotalResults int + Results []map[string]interface{} + Warnings []string + ExecutionTime float64 +} + +// FTHybridCursorResult represents cursor result for hybrid search +type FTHybridCursorResult struct { + SearchCursorID int + VsimCursorID int +} + +type FTHybridCmd struct { + baseCmd + val FTHybridResult + cursorVal *FTHybridCursorResult + options *FTHybridOptions + withCursor bool +} + +func newFTHybridCmd(ctx context.Context, options *FTHybridOptions, args ...interface{}) *FTHybridCmd { + var withCursor bool + if options != nil && options.WithCursor { + withCursor = true + } + return &FTHybridCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + options: options, + withCursor: withCursor, + } +} + +func (cmd *FTHybridCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *FTHybridCmd) SetVal(val FTHybridResult) { + cmd.val = val +} + +func (cmd *FTHybridCmd) Result() (FTHybridResult, error) { + return cmd.val, cmd.err +} + +func (cmd *FTHybridCmd) CursorResult() (*FTHybridCursorResult, error) { + return cmd.cursorVal, cmd.err +} + +func (cmd *FTHybridCmd) Val() FTHybridResult { + return cmd.val +} + +func (cmd *FTHybridCmd) CursorVal() *FTHybridCursorResult { + return cmd.cursorVal +} + +func (cmd *FTHybridCmd) RawVal() interface{} { + return cmd.rawVal +} + +func (cmd *FTHybridCmd) RawResult() (interface{}, error) { + return cmd.rawVal, cmd.err +} + +func parseFTHybrid(data []interface{}, withCursor bool) (FTHybridResult, *FTHybridCursorResult, error) { + // Convert to map + resultMap := make(map[string]interface{}) + for i := 0; i < len(data); i += 2 { + if i+1 < len(data) { + key, ok := data[i].(string) + if !ok { + return FTHybridResult{}, nil, fmt.Errorf("invalid key type at index %d", i) + } + resultMap[key] = data[i+1] + } + } + + // Handle cursor result + if withCursor { + searchCursorID, ok1 := resultMap["SEARCH"].(int64) + vsimCursorID, ok2 := resultMap["VSIM"].(int64) + if !ok1 || !ok2 { + return FTHybridResult{}, nil, fmt.Errorf("invalid cursor result format") + } + return FTHybridResult{}, &FTHybridCursorResult{ + SearchCursorID: int(searchCursorID), + VsimCursorID: int(vsimCursorID), + }, nil + } + + // Parse regular result + totalResults, ok := resultMap["total_results"].(int64) + if !ok { + return FTHybridResult{}, nil, fmt.Errorf("invalid total_results format") + } + + resultsData, ok := resultMap["results"].([]interface{}) + if !ok { + return FTHybridResult{}, nil, fmt.Errorf("invalid results format") + } + + // Parse each result item + results := make([]map[string]interface{}, 0, len(resultsData)) + for _, item := range resultsData { + // Try parsing as map[string]interface{} first (RESP3 format) + if itemMap, ok := item.(map[string]interface{}); ok { + results = append(results, itemMap) + continue + } + + // Try parsing as map[interface{}]interface{} (alternative RESP3 format) + if rawMap, ok := item.(map[interface{}]interface{}); ok { + itemMap := make(map[string]interface{}) + for k, v := range rawMap { + if keyStr, ok := k.(string); ok { + itemMap[keyStr] = v + } + } + results = append(results, itemMap) + continue + } + + // Fall back to array format (RESP2 format - key-value pairs) + itemData, ok := item.([]interface{}) + if !ok { + return FTHybridResult{}, nil, fmt.Errorf("invalid result item format") + } + + itemMap := make(map[string]interface{}) + for i := 0; i < len(itemData); i += 2 { + if i+1 < len(itemData) { + key, ok := itemData[i].(string) + if !ok { + return FTHybridResult{}, nil, fmt.Errorf("invalid item key format") + } + itemMap[key] = itemData[i+1] + } + } + results = append(results, itemMap) + } + + // Parse warnings (optional field) + var warnings []string + if warningsData, ok := resultMap["warnings"].([]interface{}); ok { + warnings = make([]string, 0, len(warningsData)) + for _, w := range warningsData { + if ws, ok := w.(string); ok { + warnings = append(warnings, ws) + } + } + } + + // Parse execution time (optional field) + var executionTime float64 + if execTimeVal, exists := resultMap["execution_time"]; exists { + switch v := execTimeVal.(type) { + case string: + var err error + executionTime, err = strconv.ParseFloat(v, 64) + if err != nil { + return FTHybridResult{}, nil, fmt.Errorf("invalid execution_time format: %v", err) + } + case float64: + executionTime = v + case int64: + executionTime = float64(v) + } + } + + return FTHybridResult{ + TotalResults: int(totalResults), + Results: results, + Warnings: warnings, + ExecutionTime: executionTime, + }, nil, nil +} + +func (cmd *FTHybridCmd) readReply(rd *proto.Reader) (err error) { + data, err := rd.ReadSlice() + if err != nil { + return err + } + + result, cursorResult, err := parseFTHybrid(data, cmd.withCursor) + if err != nil { + return err + } + + if cmd.withCursor { + cmd.cursorVal = cursorResult + } else { + cmd.val = result + } + return nil +} + +func (cmd *FTHybridCmd) Clone() Cmder { + val := FTHybridResult{ + TotalResults: cmd.val.TotalResults, + ExecutionTime: cmd.val.ExecutionTime, + } + if cmd.val.Results != nil { + val.Results = make([]map[string]interface{}, len(cmd.val.Results)) + for i, result := range cmd.val.Results { + val.Results[i] = make(map[string]interface{}, len(result)) + for k, v := range result { + val.Results[i][k] = v + } + } + } + if cmd.val.Warnings != nil { + val.Warnings = make([]string, len(cmd.val.Warnings)) + copy(val.Warnings, cmd.val.Warnings) + } + + var cursorVal *FTHybridCursorResult + if cmd.cursorVal != nil { + cursorVal = &FTHybridCursorResult{ + SearchCursorID: cmd.cursorVal.SearchCursorID, + VsimCursorID: cmd.cursorVal.VsimCursorID, + } + } + + var options *FTHybridOptions + if cmd.options != nil { + options = &FTHybridOptions{ + CountExpressions: cmd.options.CountExpressions, + Load: cmd.options.Load, + Filter: cmd.options.Filter, + LimitOffset: cmd.options.LimitOffset, + Limit: cmd.options.Limit, + ExplainScore: cmd.options.ExplainScore, + Timeout: cmd.options.Timeout, + WithCursor: cmd.options.WithCursor, + } + // Clone slices and maps + if cmd.options.SearchExpressions != nil { + options.SearchExpressions = make([]FTHybridSearchExpression, len(cmd.options.SearchExpressions)) + copy(options.SearchExpressions, cmd.options.SearchExpressions) + } + if cmd.options.VectorExpressions != nil { + options.VectorExpressions = make([]FTHybridVectorExpression, len(cmd.options.VectorExpressions)) + copy(options.VectorExpressions, cmd.options.VectorExpressions) + } + if cmd.options.Combine != nil { + options.Combine = &FTHybridCombineOptions{ + Method: cmd.options.Combine.Method, + Count: cmd.options.Combine.Count, + Window: cmd.options.Combine.Window, + Constant: cmd.options.Combine.Constant, + Alpha: cmd.options.Combine.Alpha, + Beta: cmd.options.Combine.Beta, + YieldScoreAs: cmd.options.Combine.YieldScoreAs, + } + } + if cmd.options.GroupBy != nil { + options.GroupBy = &FTHybridGroupBy{ + Count: cmd.options.GroupBy.Count, + ReduceFunc: cmd.options.GroupBy.ReduceFunc, + ReduceCount: cmd.options.GroupBy.ReduceCount, + } + if cmd.options.GroupBy.Fields != nil { + options.GroupBy.Fields = make([]string, len(cmd.options.GroupBy.Fields)) + copy(options.GroupBy.Fields, cmd.options.GroupBy.Fields) + } + if cmd.options.GroupBy.ReduceParams != nil { + options.GroupBy.ReduceParams = make([]interface{}, len(cmd.options.GroupBy.ReduceParams)) + copy(options.GroupBy.ReduceParams, cmd.options.GroupBy.ReduceParams) + } + } + if cmd.options.Apply != nil { + options.Apply = make([]FTHybridApply, len(cmd.options.Apply)) + copy(options.Apply, cmd.options.Apply) + } + if cmd.options.SortBy != nil { + options.SortBy = make([]FTSearchSortBy, len(cmd.options.SortBy)) + copy(options.SortBy, cmd.options.SortBy) + } + if cmd.options.Params != nil { + options.Params = make(map[string]interface{}, len(cmd.options.Params)) + for k, v := range cmd.options.Params { + options.Params[k] = v + } + } + if cmd.options.WithCursorOptions != nil { + options.WithCursorOptions = &FTHybridWithCursor{ + MaxIdle: cmd.options.WithCursorOptions.MaxIdle, + Count: cmd.options.WithCursorOptions.Count, + } + } + } + + return &FTHybridCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: val, + cursorVal: cursorVal, + options: options, + withCursor: cmd.withCursor, + } +} + +// FTSearch - Executes a search query on an index. +// The 'index' parameter specifies the index to search, and the 'query' parameter specifies the search query. +// For more information, please refer to the Redis documentation about [FT.SEARCH]. +// +// [FT.SEARCH]: (https://redis.io/commands/ft.search/) +func (c cmdable) FTSearch(ctx context.Context, index string, query string) *FTSearchCmd { + args := []interface{}{"FT.SEARCH", index, query} + cmd := newFTSearchCmd(ctx, &FTSearchOptions{}, args...) + _ = c(ctx, cmd) + return cmd +} + +type SearchQuery []interface{} + +// FTSearchQuery - Executes a search query on an index with additional options. +// The 'index' parameter specifies the index to search, the 'query' parameter specifies the search query, +// and the 'options' parameter specifies additional options for the search. +// For more information, please refer to the Redis documentation about [FT.SEARCH]. +// +// [FT.SEARCH]: (https://redis.io/commands/ft.search/) +func FTSearchQuery(query string, options *FTSearchOptions) (SearchQuery, error) { + queryArgs := []interface{}{query} + if options != nil { + if options.NoContent { + queryArgs = append(queryArgs, "NOCONTENT") + } + if options.Verbatim { + queryArgs = append(queryArgs, "VERBATIM") + } + if options.NoStopWords { + queryArgs = append(queryArgs, "NOSTOPWORDS") + } + if options.WithScores { + queryArgs = append(queryArgs, "WITHSCORES") + } + if options.WithPayloads { + queryArgs = append(queryArgs, "WITHPAYLOADS") + } + if options.WithSortKeys { + queryArgs = append(queryArgs, "WITHSORTKEYS") + } + if options.Filters != nil { + for _, filter := range options.Filters { + queryArgs = append(queryArgs, "FILTER", filter.FieldName, filter.Min, filter.Max) + } + } + if options.GeoFilter != nil { + for _, geoFilter := range options.GeoFilter { + queryArgs = append(queryArgs, "GEOFILTER", geoFilter.FieldName, geoFilter.Longitude, geoFilter.Latitude, geoFilter.Radius, geoFilter.Unit) + } + } + if options.InKeys != nil { + queryArgs = append(queryArgs, "INKEYS", len(options.InKeys)) + queryArgs = append(queryArgs, options.InKeys...) + } + if options.InFields != nil { + queryArgs = append(queryArgs, "INFIELDS", len(options.InFields)) + queryArgs = append(queryArgs, options.InFields...) + } + if options.Return != nil { + queryArgs = append(queryArgs, "RETURN") + queryArgsReturn := []interface{}{} + for _, ret := range options.Return { + queryArgsReturn = append(queryArgsReturn, ret.FieldName) + if ret.As != "" { + queryArgsReturn = append(queryArgsReturn, "AS", ret.As) + } + } + queryArgs = append(queryArgs, len(queryArgsReturn)) + queryArgs = append(queryArgs, queryArgsReturn...) + } + if options.Slop > 0 { + queryArgs = append(queryArgs, "SLOP", options.Slop) + } + if options.Timeout > 0 { + queryArgs = append(queryArgs, "TIMEOUT", options.Timeout) + } + if options.InOrder { + queryArgs = append(queryArgs, "INORDER") + } + if options.Language != "" { + queryArgs = append(queryArgs, "LANGUAGE", options.Language) + } + if options.Expander != "" { + queryArgs = append(queryArgs, "EXPANDER", options.Expander) + } + if options.Scorer != "" { + queryArgs = append(queryArgs, "SCORER", options.Scorer) + } + if options.ExplainScore { + queryArgs = append(queryArgs, "EXPLAINSCORE") + } + if options.Payload != "" { + queryArgs = append(queryArgs, "PAYLOAD", options.Payload) + } + if options.SortBy != nil { + queryArgs = append(queryArgs, "SORTBY") + for _, sortBy := range options.SortBy { + queryArgs = append(queryArgs, sortBy.FieldName) + if sortBy.Asc && sortBy.Desc { + return nil, fmt.Errorf("FT.SEARCH: ASC and DESC are mutually exclusive") + } + if sortBy.Asc { + queryArgs = append(queryArgs, "ASC") + } + if sortBy.Desc { + queryArgs = append(queryArgs, "DESC") + } + } + if options.SortByWithCount { + queryArgs = append(queryArgs, "WITHCOUNT") + } + } + if options.LimitOffset >= 0 && options.Limit > 0 { + queryArgs = append(queryArgs, "LIMIT", options.LimitOffset, options.Limit) + } + if options.Params != nil { + queryArgs = append(queryArgs, "PARAMS", len(options.Params)*2) + for key, value := range options.Params { + queryArgs = append(queryArgs, key, value) + } + } + if options.DialectVersion > 0 { + queryArgs = append(queryArgs, "DIALECT", options.DialectVersion) + } else { + queryArgs = append(queryArgs, "DIALECT", 2) + } + } + return queryArgs, nil +} + +// FTSearchWithArgs - Executes a search query on an index with additional options. +// The 'index' parameter specifies the index to search, the 'query' parameter specifies the search query, +// and the 'options' parameter specifies additional options for the search. +// For more information, please refer to the Redis documentation about [FT.SEARCH]. +// +// [FT.SEARCH]: (https://redis.io/commands/ft.search/) +func (c cmdable) FTSearchWithArgs(ctx context.Context, index string, query string, options *FTSearchOptions) *FTSearchCmd { + args := []interface{}{"FT.SEARCH", index, query} + if options != nil { + if options.NoContent { + args = append(args, "NOCONTENT") + } + if options.Verbatim { + args = append(args, "VERBATIM") + } + if options.NoStopWords { + args = append(args, "NOSTOPWORDS") + } + if options.WithScores { + args = append(args, "WITHSCORES") + } + if options.WithPayloads { + args = append(args, "WITHPAYLOADS") + } + if options.WithSortKeys { + args = append(args, "WITHSORTKEYS") + } + if options.Filters != nil { + for _, filter := range options.Filters { + args = append(args, "FILTER", filter.FieldName, filter.Min, filter.Max) + } + } + if options.GeoFilter != nil { + for _, geoFilter := range options.GeoFilter { + args = append(args, "GEOFILTER", geoFilter.FieldName, geoFilter.Longitude, geoFilter.Latitude, geoFilter.Radius, geoFilter.Unit) + } + } + if options.InKeys != nil { + args = append(args, "INKEYS", len(options.InKeys)) + args = append(args, options.InKeys...) + } + if options.InFields != nil { + args = append(args, "INFIELDS", len(options.InFields)) + args = append(args, options.InFields...) + } + if options.Return != nil { + args = append(args, "RETURN") + argsReturn := []interface{}{} + for _, ret := range options.Return { + argsReturn = append(argsReturn, ret.FieldName) + if ret.As != "" { + argsReturn = append(argsReturn, "AS", ret.As) + } + } + args = append(args, len(argsReturn)) + args = append(args, argsReturn...) + } + if options.Slop > 0 { + args = append(args, "SLOP", options.Slop) + } + if options.Timeout > 0 { + args = append(args, "TIMEOUT", options.Timeout) + } + if options.InOrder { + args = append(args, "INORDER") + } + if options.Language != "" { + args = append(args, "LANGUAGE", options.Language) + } + if options.Expander != "" { + args = append(args, "EXPANDER", options.Expander) + } + if options.Scorer != "" { + args = append(args, "SCORER", options.Scorer) + } + if options.ExplainScore { + args = append(args, "EXPLAINSCORE") + } + if options.Payload != "" { + args = append(args, "PAYLOAD", options.Payload) + } + if options.SortBy != nil { + args = append(args, "SORTBY") + for _, sortBy := range options.SortBy { + args = append(args, sortBy.FieldName) + if sortBy.Asc && sortBy.Desc { + cmd := newFTSearchCmd(ctx, options, args...) + cmd.SetErr(fmt.Errorf("FT.SEARCH: ASC and DESC are mutually exclusive")) + return cmd + } + if sortBy.Asc { + args = append(args, "ASC") + } + if sortBy.Desc { + args = append(args, "DESC") + } + } + if options.SortByWithCount { + args = append(args, "WITHCOUNT") + } + } + if options.CountOnly { + args = append(args, "LIMIT", 0, 0) + } else { + if options.LimitOffset >= 0 && options.Limit > 0 || options.LimitOffset > 0 && options.Limit == 0 { + args = append(args, "LIMIT", options.LimitOffset, options.Limit) + } + } + if options.Params != nil { + args = append(args, "PARAMS", len(options.Params)*2) + for key, value := range options.Params { + args = append(args, key, value) + } + } + if options.DialectVersion > 0 { + args = append(args, "DIALECT", options.DialectVersion) + } else { + args = append(args, "DIALECT", 2) + } + } + cmd := newFTSearchCmd(ctx, options, args...) + _ = c(ctx, cmd) + return cmd +} + +func NewFTSynDumpCmd(ctx context.Context, args ...interface{}) *FTSynDumpCmd { + return &FTSynDumpCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + cmdType: CmdTypeFTSynDump, + }, + } +} + +func (cmd *FTSynDumpCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *FTSynDumpCmd) SetVal(val []FTSynDumpResult) { + cmd.val = val +} + +func (cmd *FTSynDumpCmd) Val() []FTSynDumpResult { + return cmd.val +} + +func (cmd *FTSynDumpCmd) Result() ([]FTSynDumpResult, error) { + return cmd.val, cmd.err +} + +func (cmd *FTSynDumpCmd) RawVal() interface{} { + return cmd.rawVal +} + +func (cmd *FTSynDumpCmd) RawResult() (interface{}, error) { + return cmd.rawVal, cmd.err +} + +func (cmd *FTSynDumpCmd) readReply(rd *proto.Reader) error { + termSynonymPairs, err := rd.ReadSlice() + if err != nil { + return err + } + + var results []FTSynDumpResult + for i := 0; i < len(termSynonymPairs); i += 2 { + term, ok := termSynonymPairs[i].(string) + if !ok { + return fmt.Errorf("invalid term format") + } + + synonyms, ok := termSynonymPairs[i+1].([]interface{}) + if !ok { + return fmt.Errorf("invalid synonyms format") + } + + synonymList := make([]string, len(synonyms)) + for j, syn := range synonyms { + synonym, ok := syn.(string) + if !ok { + return fmt.Errorf("invalid synonym format") + } + synonymList[j] = synonym + } + + results = append(results, FTSynDumpResult{ + Term: term, + Synonyms: synonymList, + }) + } + + cmd.val = results + return nil +} + +func (cmd *FTSynDumpCmd) Clone() Cmder { + var val []FTSynDumpResult + if cmd.val != nil { + val = make([]FTSynDumpResult, len(cmd.val)) + for i, result := range cmd.val { + val[i] = FTSynDumpResult{ + Term: result.Term, + } + if result.Synonyms != nil { + val[i].Synonyms = make([]string, len(result.Synonyms)) + copy(val[i].Synonyms, result.Synonyms) + } + } + } + return &FTSynDumpCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: val, + } +} + +// FTSynDump - Dumps the contents of a synonym group. +// The 'index' parameter specifies the index to dump. +// For more information, please refer to the Redis documentation: +// [FT.SYNDUMP]: (https://redis.io/commands/ft.syndump/) +func (c cmdable) FTSynDump(ctx context.Context, index string) *FTSynDumpCmd { + cmd := NewFTSynDumpCmd(ctx, "FT.SYNDUMP", index) + _ = c(ctx, cmd) + return cmd +} + +// FTSynUpdate - Creates or updates a synonym group with additional terms. +// The 'index' parameter specifies the index to update, the 'synGroupId' parameter specifies the synonym group id, and the 'terms' parameter specifies the additional terms. +// For more information, please refer to the Redis documentation: +// [FT.SYNUPDATE]: (https://redis.io/commands/ft.synupdate/) +func (c cmdable) FTSynUpdate(ctx context.Context, index string, synGroupId interface{}, terms []interface{}) *StatusCmd { + args := []interface{}{"FT.SYNUPDATE", index, synGroupId} + args = append(args, terms...) + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// FTSynUpdateWithArgs - Creates or updates a synonym group with additional terms and options. +// The 'index' parameter specifies the index to update, the 'synGroupId' parameter specifies the synonym group id, the 'options' parameter specifies additional options for the update, and the 'terms' parameter specifies the additional terms. +// For more information, please refer to the Redis documentation: +// [FT.SYNUPDATE]: (https://redis.io/commands/ft.synupdate/) +func (c cmdable) FTSynUpdateWithArgs(ctx context.Context, index string, synGroupId interface{}, options *FTSynUpdateOptions, terms []interface{}) *StatusCmd { + args := []interface{}{"FT.SYNUPDATE", index, synGroupId} + if options.SkipInitialScan { + args = append(args, "SKIPINITIALSCAN") + } + args = append(args, terms...) + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// FTTagVals - Returns all distinct values indexed in a tag field. +// The 'index' parameter specifies the index to check, and the 'field' parameter specifies the tag field to retrieve values from. +// For more information, please refer to the Redis documentation: +// [FT.TAGVALS]: (https://redis.io/commands/ft.tagvals/) +func (c cmdable) FTTagVals(ctx context.Context, index string, field string) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "FT.TAGVALS", index, field) + _ = c(ctx, cmd) + return cmd +} + +// FTHybrid - Executes a hybrid search combining full-text search and vector similarity +// The 'index' parameter specifies the index to search, 'searchExpr' is the search query, +// 'vectorField' is the name of the vector field, and 'vectorData' is the vector to search with. +// FTHybrid is still experimental, the command behaviour and signature may change +func (c cmdable) FTHybrid(ctx context.Context, index string, searchExpr string, vectorField string, vectorData Vector) *FTHybridCmd { + options := &FTHybridOptions{ + CountExpressions: 2, + SearchExpressions: []FTHybridSearchExpression{ + {Query: searchExpr}, + }, + VectorExpressions: []FTHybridVectorExpression{ + {VectorField: vectorField, VectorData: vectorData}, + }, + } + return c.FTHybridWithArgs(ctx, index, options) +} + +// FTHybridWithArgs - Executes a hybrid search with advanced options +// FTHybridWithArgs is still experimental, the command behaviour and signature may change +func (c cmdable) FTHybridWithArgs(ctx context.Context, index string, options *FTHybridOptions) *FTHybridCmd { + args := []interface{}{"FT.HYBRID", index} + + if options != nil { + // Add search expressions + for _, searchExpr := range options.SearchExpressions { + args = append(args, "SEARCH", searchExpr.Query) + + if searchExpr.Scorer != "" { + args = append(args, "SCORER", searchExpr.Scorer) + if len(searchExpr.ScorerParams) > 0 { + args = append(args, searchExpr.ScorerParams...) + } + } + + if searchExpr.YieldScoreAs != "" { + args = append(args, "YIELD_SCORE_AS", searchExpr.YieldScoreAs) + } + } + + // Add vector expressions + for _, vectorExpr := range options.VectorExpressions { + args = append(args, "VSIM", "@"+vectorExpr.VectorField) + + // For FT.HYBRID, we need to send just the raw vector bytes, not the Value() format + // Value() returns [format, data] but FT.HYBRID expects just the blob + vectorValue := vectorExpr.VectorData.Value() + var vectorBlob interface{} + if len(vectorValue) >= 2 { + // vectorValue is [format, data, ...] - we only want the data part + vectorBlob = vectorValue[1] + } else { + // Fallback for unexpected format + vectorBlob = vectorValue + } + + // If VectorParamName is provided, use PARAMS mechanism (required for Redis 8.6+) + // If not provided, inline the vector blob (works on Redis 8.4/8.5, fails on 8.6+) + if vectorExpr.VectorParamName != "" { + // Use PARAMS mechanism + args = append(args, "$"+vectorExpr.VectorParamName) + if options.Params == nil { + options.Params = make(map[string]interface{}) + } + options.Params[vectorExpr.VectorParamName] = vectorBlob + } else { + // Inline the vector blob (deprecated in Redis 8.6+) + args = append(args, vectorBlob) + } + + if vectorExpr.Method != "" { + args = append(args, vectorExpr.Method) + if len(vectorExpr.MethodParams) > 0 { + // MethodParams should be key-value pairs, count them + args = append(args, len(vectorExpr.MethodParams)) + args = append(args, vectorExpr.MethodParams...) + } + } + + if vectorExpr.Filter != "" { + args = append(args, "FILTER", vectorExpr.Filter) + } + + if vectorExpr.YieldScoreAs != "" { + args = append(args, "YIELD_SCORE_AS", vectorExpr.YieldScoreAs) + } + } + + // Add combine/fusion options + if options.Combine != nil { + // Build combine parameters + combineParams := []interface{}{} + + switch options.Combine.Method { + case FTHybridCombineRRF: + if options.Combine.Window > 0 { + combineParams = append(combineParams, "WINDOW", options.Combine.Window) + } + if options.Combine.Constant > 0 { + combineParams = append(combineParams, "CONSTANT", options.Combine.Constant) + } + case FTHybridCombineLinear: + if options.Combine.Alpha > 0 { + combineParams = append(combineParams, "ALPHA", options.Combine.Alpha) + } + if options.Combine.Beta > 0 { + combineParams = append(combineParams, "BETA", options.Combine.Beta) + } + } + + if options.Combine.YieldScoreAs != "" { + combineParams = append(combineParams, "YIELD_SCORE_AS", options.Combine.YieldScoreAs) + } + + // Add COMBINE with method and parameter count + args = append(args, "COMBINE", string(options.Combine.Method)) + if len(combineParams) > 0 { + args = append(args, len(combineParams)) + args = append(args, combineParams...) + } + } + + // Add LOAD (projected fields) + if len(options.Load) > 0 { + args = append(args, "LOAD", len(options.Load)) + for _, field := range options.Load { + args = append(args, field) + } + } + + // Add GROUPBY + if options.GroupBy != nil { + args = append(args, "GROUPBY", options.GroupBy.Count) + for _, field := range options.GroupBy.Fields { + args = append(args, field) + } + if options.GroupBy.ReduceFunc != "" { + args = append(args, "REDUCE", options.GroupBy.ReduceFunc, options.GroupBy.ReduceCount) + args = append(args, options.GroupBy.ReduceParams...) + } + } + + // Add APPLY transformations + for _, apply := range options.Apply { + args = append(args, "APPLY", apply.Expression, "AS", apply.AsField) + } + + // Add SORTBY + if len(options.SortBy) > 0 { + sortByOptions := []interface{}{} + for _, sortBy := range options.SortBy { + sortByOptions = append(sortByOptions, sortBy.FieldName) + if sortBy.Asc && sortBy.Desc { + cmd := newFTHybridCmd(ctx, options, args...) + cmd.SetErr(fmt.Errorf("FT.HYBRID: ASC and DESC are mutually exclusive")) + return cmd + } + if sortBy.Asc { + sortByOptions = append(sortByOptions, "ASC") + } + if sortBy.Desc { + sortByOptions = append(sortByOptions, "DESC") + } + } + args = append(args, "SORTBY", len(sortByOptions)) + args = append(args, sortByOptions...) + } + + // Add FILTER (post-filter) + if options.Filter != "" { + args = append(args, "FILTER", options.Filter) + } + + // Add LIMIT + if options.LimitOffset >= 0 && options.Limit > 0 || options.LimitOffset > 0 && options.Limit == 0 { + args = append(args, "LIMIT", options.LimitOffset, options.Limit) + } + + // Add PARAMS + if len(options.Params) > 0 { + args = append(args, "PARAMS", len(options.Params)*2) + for key, value := range options.Params { + // Parameter keys should already have '$' prefix from the user + // Don't add it again if it's already there + args = append(args, key, value) + } + } + + // Add EXPLAINSCORE + if options.ExplainScore { + args = append(args, "EXPLAINSCORE") + } + + // Add TIMEOUT + if options.Timeout > 0 { + args = append(args, "TIMEOUT", options.Timeout) + } + + // Add WITHCURSOR support + if options.WithCursor { + args = append(args, "WITHCURSOR") + if options.WithCursorOptions != nil { + if options.WithCursorOptions.Count > 0 { + args = append(args, "COUNT", options.WithCursorOptions.Count) + } + if options.WithCursorOptions.MaxIdle > 0 { + args = append(args, "MAXIDLE", options.WithCursorOptions.MaxIdle) + } + } + } + } + + cmd := newFTHybridCmd(ctx, options, args...) + _ = c(ctx, cmd) + return cmd +} diff --git a/vendor/github.com/redis/go-redis/v9/sentinel.go b/vendor/github.com/redis/go-redis/v9/sentinel.go index 9ace0886..24646c14 100644 --- a/vendor/github.com/redis/go-redis/v9/sentinel.go +++ b/vendor/github.com/redis/go-redis/v9/sentinel.go @@ -4,14 +4,20 @@ import ( "context" "crypto/tls" "errors" + "fmt" "net" + "net/url" + "strconv" "strings" "sync" "time" + "github.com/redis/go-redis/v9/auth" "github.com/redis/go-redis/v9/internal" "github.com/redis/go-redis/v9/internal/pool" "github.com/redis/go-redis/v9/internal/rand" + "github.com/redis/go-redis/v9/maintnotifications" + "github.com/redis/go-redis/v9/push" ) //------------------------------------------------------------------------------ @@ -57,31 +63,112 @@ type FailoverOptions struct { Protocol int Username string Password string - DB int + + // Push notifications are always enabled for RESP3 connections + // CredentialsProvider allows the username and password to be updated + // before reconnecting. It should return the current username and password. + CredentialsProvider func() (username string, password string) + + // CredentialsProviderContext is an enhanced parameter of CredentialsProvider, + // done to maintain API compatibility. In the future, + // there might be a merge between CredentialsProviderContext and CredentialsProvider. + // There will be a conflict between them; if CredentialsProviderContext exists, we will ignore CredentialsProvider. + CredentialsProviderContext func(ctx context.Context) (username string, password string, err error) + + // StreamingCredentialsProvider is used to retrieve the credentials + // for the connection from an external source. Those credentials may change + // during the connection lifetime. This is useful for managed identity + // scenarios where the credentials are retrieved from an external source. + // + // Currently, this is a placeholder for the future implementation. + StreamingCredentialsProvider auth.StreamingCredentialsProvider + DB int MaxRetries int MinRetryBackoff time.Duration MaxRetryBackoff time.Duration - DialTimeout time.Duration + DialTimeout time.Duration + + // DialerRetries is the maximum number of retry attempts when dialing fails. + // + // default: 5 + DialerRetries int + + // DialerRetryTimeout is the backoff duration between retry attempts. + // + // default: 100 milliseconds + DialerRetryTimeout time.Duration + ReadTimeout time.Duration WriteTimeout time.Duration ContextTimeoutEnabled bool + // ReadBufferSize is the size of the bufio.Reader buffer for each connection. + // Larger buffers can improve performance for commands that return large responses. + // Smaller buffers can improve memory usage for larger pools. + // + // default: 32KiB (32768 bytes) + ReadBufferSize int + + // WriteBufferSize is the size of the bufio.Writer buffer for each connection. + // Larger buffers can improve performance for large pipelines and commands with many arguments. + // Smaller buffers can improve memory usage for larger pools. + // + // default: 32KiB (32768 bytes) + WriteBufferSize int + PoolFIFO bool - PoolSize int - PoolTimeout time.Duration - MinIdleConns int - MaxIdleConns int - MaxActiveConns int - ConnMaxIdleTime time.Duration - ConnMaxLifetime time.Duration + PoolSize int + + // MaxConcurrentDials is the maximum number of concurrent connection creation goroutines. + // If <= 0, defaults to PoolSize. If > PoolSize, it will be capped at PoolSize. + MaxConcurrentDials int + + PoolTimeout time.Duration + MinIdleConns int + MaxIdleConns int + MaxActiveConns int + ConnMaxIdleTime time.Duration + ConnMaxLifetime time.Duration + ConnMaxLifetimeJitter time.Duration TLSConfig *tls.Config + // DisableIndentity - Disable set-lib on connect. + // + // default: false + // + // Deprecated: Use DisableIdentity instead. DisableIndentity bool - IdentitySuffix string + + // DisableIdentity is used to disable CLIENT SETINFO command on connect. + // + // default: false + DisableIdentity bool + + IdentitySuffix string + + // FailingTimeoutSeconds is the timeout in seconds for marking a cluster node as failing. + // When a node is marked as failing, it will be avoided for this duration. + // Only applies to failover cluster clients. Default is 15 seconds. + FailingTimeoutSeconds int + + UnstableResp3 bool + + // PushNotificationProcessor is the processor for handling push notifications. + // If nil, a default processor will be created for RESP3 connections. + PushNotificationProcessor push.NotificationProcessor + + // MaintNotificationsConfig is not supported for FailoverClients at the moment + // MaintNotificationsConfig provides custom configuration for maintnotifications upgrades. + // When MaintNotificationsConfig.Mode is not "disabled", the client will handle + // upgrade notifications gracefully and manage connection/pool state transitions + // seamlessly. Requires Protocol: 3 (RESP3) for push notifications. + // If nil, maintnotifications upgrades are disabled. + // (however if Mode is nil, it defaults to "auto" - enable if server supports it) + //MaintNotificationsConfig *maintnotifications.Config } func (opt *FailoverOptions) clientOptions() *Options { @@ -92,33 +179,52 @@ func (opt *FailoverOptions) clientOptions() *Options { Dialer: opt.Dialer, OnConnect: opt.OnConnect, - DB: opt.DB, - Protocol: opt.Protocol, - Username: opt.Username, - Password: opt.Password, + DB: opt.DB, + Protocol: opt.Protocol, + Username: opt.Username, + Password: opt.Password, + CredentialsProvider: opt.CredentialsProvider, + CredentialsProviderContext: opt.CredentialsProviderContext, + StreamingCredentialsProvider: opt.StreamingCredentialsProvider, MaxRetries: opt.MaxRetries, MinRetryBackoff: opt.MinRetryBackoff, MaxRetryBackoff: opt.MaxRetryBackoff, - DialTimeout: opt.DialTimeout, - ReadTimeout: opt.ReadTimeout, - WriteTimeout: opt.WriteTimeout, + ReadBufferSize: opt.ReadBufferSize, + WriteBufferSize: opt.WriteBufferSize, + + DialTimeout: opt.DialTimeout, + DialerRetries: opt.DialerRetries, + DialerRetryTimeout: opt.DialerRetryTimeout, + ReadTimeout: opt.ReadTimeout, + WriteTimeout: opt.WriteTimeout, + ContextTimeoutEnabled: opt.ContextTimeoutEnabled, - PoolFIFO: opt.PoolFIFO, - PoolSize: opt.PoolSize, - PoolTimeout: opt.PoolTimeout, - MinIdleConns: opt.MinIdleConns, - MaxIdleConns: opt.MaxIdleConns, - MaxActiveConns: opt.MaxActiveConns, - ConnMaxIdleTime: opt.ConnMaxIdleTime, - ConnMaxLifetime: opt.ConnMaxLifetime, + PoolFIFO: opt.PoolFIFO, + PoolSize: opt.PoolSize, + MaxConcurrentDials: opt.MaxConcurrentDials, + PoolTimeout: opt.PoolTimeout, + MinIdleConns: opt.MinIdleConns, + MaxIdleConns: opt.MaxIdleConns, + MaxActiveConns: opt.MaxActiveConns, + ConnMaxIdleTime: opt.ConnMaxIdleTime, + ConnMaxLifetime: opt.ConnMaxLifetime, + ConnMaxLifetimeJitter: opt.ConnMaxLifetimeJitter, TLSConfig: opt.TLSConfig, + DisableIdentity: opt.DisableIdentity, DisableIndentity: opt.DisableIndentity, - IdentitySuffix: opt.IdentitySuffix, + + IdentitySuffix: opt.IdentitySuffix, + UnstableResp3: opt.UnstableResp3, + PushNotificationProcessor: opt.PushNotificationProcessor, + + MaintNotificationsConfig: &maintnotifications.Config{ + Mode: maintnotifications.ModeDisabled, + }, } } @@ -138,21 +244,41 @@ func (opt *FailoverOptions) sentinelOptions(addr string) *Options { MinRetryBackoff: opt.MinRetryBackoff, MaxRetryBackoff: opt.MaxRetryBackoff, - DialTimeout: opt.DialTimeout, - ReadTimeout: opt.ReadTimeout, - WriteTimeout: opt.WriteTimeout, + // The sentinel client uses a 4KiB read/write buffer size. + ReadBufferSize: 4096, + WriteBufferSize: 4096, + + DialTimeout: opt.DialTimeout, + DialerRetries: opt.DialerRetries, + DialerRetryTimeout: opt.DialerRetryTimeout, + ReadTimeout: opt.ReadTimeout, + WriteTimeout: opt.WriteTimeout, + ContextTimeoutEnabled: opt.ContextTimeoutEnabled, - PoolFIFO: opt.PoolFIFO, - PoolSize: opt.PoolSize, - PoolTimeout: opt.PoolTimeout, - MinIdleConns: opt.MinIdleConns, - MaxIdleConns: opt.MaxIdleConns, - MaxActiveConns: opt.MaxActiveConns, - ConnMaxIdleTime: opt.ConnMaxIdleTime, - ConnMaxLifetime: opt.ConnMaxLifetime, + PoolFIFO: opt.PoolFIFO, + PoolSize: opt.PoolSize, + MaxConcurrentDials: opt.MaxConcurrentDials, + PoolTimeout: opt.PoolTimeout, + MinIdleConns: opt.MinIdleConns, + MaxIdleConns: opt.MaxIdleConns, + MaxActiveConns: opt.MaxActiveConns, + ConnMaxIdleTime: opt.ConnMaxIdleTime, + ConnMaxLifetime: opt.ConnMaxLifetime, + ConnMaxLifetimeJitter: opt.ConnMaxLifetimeJitter, TLSConfig: opt.TLSConfig, + + DisableIdentity: opt.DisableIdentity, + DisableIndentity: opt.DisableIndentity, + + IdentitySuffix: opt.IdentitySuffix, + UnstableResp3: opt.UnstableResp3, + PushNotificationProcessor: opt.PushNotificationProcessor, + + MaintNotificationsConfig: &maintnotifications.Config{ + Mode: maintnotifications.ModeDisabled, + }, } } @@ -163,40 +289,216 @@ func (opt *FailoverOptions) clusterOptions() *ClusterOptions { Dialer: opt.Dialer, OnConnect: opt.OnConnect, - Protocol: opt.Protocol, - Username: opt.Username, - Password: opt.Password, + Protocol: opt.Protocol, + Username: opt.Username, + Password: opt.Password, + CredentialsProvider: opt.CredentialsProvider, + CredentialsProviderContext: opt.CredentialsProviderContext, + StreamingCredentialsProvider: opt.StreamingCredentialsProvider, MaxRedirects: opt.MaxRetries, + ReadOnly: opt.ReplicaOnly, RouteByLatency: opt.RouteByLatency, RouteRandomly: opt.RouteRandomly, MinRetryBackoff: opt.MinRetryBackoff, MaxRetryBackoff: opt.MaxRetryBackoff, - DialTimeout: opt.DialTimeout, - ReadTimeout: opt.ReadTimeout, - WriteTimeout: opt.WriteTimeout, + ReadBufferSize: opt.ReadBufferSize, + WriteBufferSize: opt.WriteBufferSize, + + DialTimeout: opt.DialTimeout, + DialerRetries: opt.DialerRetries, + DialerRetryTimeout: opt.DialerRetryTimeout, + ReadTimeout: opt.ReadTimeout, + WriteTimeout: opt.WriteTimeout, + ContextTimeoutEnabled: opt.ContextTimeoutEnabled, - PoolFIFO: opt.PoolFIFO, - PoolSize: opt.PoolSize, - PoolTimeout: opt.PoolTimeout, - MinIdleConns: opt.MinIdleConns, - MaxIdleConns: opt.MaxIdleConns, - MaxActiveConns: opt.MaxActiveConns, - ConnMaxIdleTime: opt.ConnMaxIdleTime, - ConnMaxLifetime: opt.ConnMaxLifetime, + PoolFIFO: opt.PoolFIFO, + PoolSize: opt.PoolSize, + MaxConcurrentDials: opt.MaxConcurrentDials, + PoolTimeout: opt.PoolTimeout, + MinIdleConns: opt.MinIdleConns, + MaxIdleConns: opt.MaxIdleConns, + MaxActiveConns: opt.MaxActiveConns, + ConnMaxIdleTime: opt.ConnMaxIdleTime, + ConnMaxLifetime: opt.ConnMaxLifetime, TLSConfig: opt.TLSConfig, + + DisableIdentity: opt.DisableIdentity, + DisableIndentity: opt.DisableIndentity, + IdentitySuffix: opt.IdentitySuffix, + FailingTimeoutSeconds: opt.FailingTimeoutSeconds, + PushNotificationProcessor: opt.PushNotificationProcessor, + + MaintNotificationsConfig: &maintnotifications.Config{ + Mode: maintnotifications.ModeDisabled, + }, } } +// ParseFailoverURL parses a URL into FailoverOptions that can be used to connect to Redis. +// The URL must be in the form: +// +// redis://:@:/ +// or +// rediss://:@:/ +// +// To add additional addresses, specify the query parameter, "addr" one or more times. e.g: +// +// redis://:@:/?addr=:&addr=: +// or +// rediss://:@:/?addr=:&addr=: +// +// Most Option fields can be set using query parameters, with the following restrictions: +// - field names are mapped using snake-case conversion: to set MaxRetries, use max_retries +// - only scalar type fields are supported (bool, int, time.Duration) +// - for time.Duration fields, values must be a valid input for time.ParseDuration(); +// additionally a plain integer as value (i.e. without unit) is interpreted as seconds +// - to disable a duration field, use value less than or equal to 0; to use the default +// value, leave the value blank or remove the parameter +// - only the last value is interpreted if a parameter is given multiple times +// - fields "network", "addr", "sentinel_username" and "sentinel_password" can only be set using other +// URL attributes (scheme, host, userinfo, resp.), query parameters using these +// names will be treated as unknown parameters +// - unknown parameter names will result in an error +// - use "skip_verify=true" to ignore TLS certificate validation +// +// Example: +// +// redis://user:password@localhost:6789?master_name=mymaster&dial_timeout=3&read_timeout=6s&addr=localhost:6790&addr=localhost:6791 +// is equivalent to: +// &FailoverOptions{ +// MasterName: "mymaster", +// Addr: ["localhost:6789", "localhost:6790", "localhost:6791"] +// DialTimeout: 3 * time.Second, // no time unit = seconds +// ReadTimeout: 6 * time.Second, +// } +func ParseFailoverURL(redisURL string) (*FailoverOptions, error) { + u, err := url.Parse(redisURL) + if err != nil { + return nil, err + } + return setupFailoverConn(u) +} + +func setupFailoverConn(u *url.URL) (*FailoverOptions, error) { + o := &FailoverOptions{} + + o.SentinelUsername, o.SentinelPassword = getUserPassword(u) + + h, p := getHostPortWithDefaults(u) + o.SentinelAddrs = append(o.SentinelAddrs, net.JoinHostPort(h, p)) + + switch u.Scheme { + case "rediss": + o.TLSConfig = &tls.Config{ServerName: h, MinVersion: tls.VersionTLS12} + case "redis": + o.TLSConfig = nil + default: + return nil, fmt.Errorf("redis: invalid URL scheme: %s", u.Scheme) + } + + f := strings.FieldsFunc(u.Path, func(r rune) bool { + return r == '/' + }) + switch len(f) { + case 0: + o.DB = 0 + case 1: + var err error + if o.DB, err = strconv.Atoi(f[0]); err != nil { + return nil, fmt.Errorf("redis: invalid database number: %q", f[0]) + } + default: + return nil, fmt.Errorf("redis: invalid URL path: %s", u.Path) + } + + return setupFailoverConnParams(u, o) +} + +func setupFailoverConnParams(u *url.URL, o *FailoverOptions) (*FailoverOptions, error) { + q := queryOptions{q: u.Query()} + + o.MasterName = q.string("master_name") + o.ClientName = q.string("client_name") + o.RouteByLatency = q.bool("route_by_latency") + o.RouteRandomly = q.bool("route_randomly") + o.ReplicaOnly = q.bool("replica_only") + o.UseDisconnectedReplicas = q.bool("use_disconnected_replicas") + o.Protocol = q.int("protocol") + o.Username = q.string("username") + o.Password = q.string("password") + o.MaxRetries = q.int("max_retries") + o.MinRetryBackoff = q.duration("min_retry_backoff") + o.MaxRetryBackoff = q.duration("max_retry_backoff") + o.DialTimeout = q.duration("dial_timeout") + o.DialerRetries = q.int("dialer_retries") + o.DialerRetryTimeout = q.duration("dialer_retry_timeout") + o.ReadTimeout = q.duration("read_timeout") + o.WriteTimeout = q.duration("write_timeout") + o.ContextTimeoutEnabled = q.bool("context_timeout_enabled") + o.PoolFIFO = q.bool("pool_fifo") + o.PoolSize = q.int("pool_size") + o.MaxConcurrentDials = q.int("max_concurrent_dials") + o.MinIdleConns = q.int("min_idle_conns") + o.MaxIdleConns = q.int("max_idle_conns") + o.MaxActiveConns = q.int("max_active_conns") + o.ConnMaxLifetime = q.duration("conn_max_lifetime") + if q.has("conn_max_lifetime_jitter") { + o.ConnMaxLifetimeJitter = min(q.duration("conn_max_lifetime_jitter"), o.ConnMaxLifetime) + } + o.ConnMaxIdleTime = q.duration("conn_max_idle_time") + o.PoolTimeout = q.duration("pool_timeout") + o.DisableIdentity = q.bool("disableIdentity") + o.IdentitySuffix = q.string("identitySuffix") + o.UnstableResp3 = q.bool("unstable_resp3") + + if q.err != nil { + return nil, q.err + } + + if tmp := q.string("db"); tmp != "" { + db, err := strconv.Atoi(tmp) + if err != nil { + return nil, fmt.Errorf("redis: invalid database number: %w", err) + } + o.DB = db + } + + addrs := q.strings("addr") + for _, addr := range addrs { + h, p, err := net.SplitHostPort(addr) + if err != nil || h == "" || p == "" { + return nil, fmt.Errorf("redis: unable to parse addr param: %s", addr) + } + + o.SentinelAddrs = append(o.SentinelAddrs, net.JoinHostPort(h, p)) + } + + if o.TLSConfig != nil && q.has("skip_verify") { + o.TLSConfig.InsecureSkipVerify = q.bool("skip_verify") + } + + // any parameters left? + if r := q.remaining(); len(r) > 0 { + return nil, fmt.Errorf("redis: unexpected option: %s", strings.Join(r, ", ")) + } + + return o, nil +} + // NewFailoverClient returns a Redis client that uses Redis Sentinel // for automatic failover. It's safe for concurrent use by multiple // goroutines. func NewFailoverClient(failoverOpt *FailoverOptions) *Client { + if failoverOpt == nil { + panic("redis: NewFailoverClient nil options") + } + if failoverOpt.RouteByLatency { panic("to route commands by latency, use NewFailoverClusterClient") } @@ -220,8 +522,6 @@ func NewFailoverClient(failoverOpt *FailoverOptions) *Client { opt.Dialer = masterReplicaDialer(failover) opt.init() - var connPool *pool.ConnPool - rdb := &Client{ baseClient: &baseClient{ opt: opt, @@ -229,15 +529,34 @@ func NewFailoverClient(failoverOpt *FailoverOptions) *Client { } rdb.init() - connPool = newConnPool(opt, rdb.dialHook) - rdb.connPool = connPool - rdb.onClose = failover.Close + // Initialize push notification processor using shared helper + // Use void processor by default for RESP2 connections + rdb.pushProcessor = initializePushProcessor(opt) + + // Generate unique pool names for metrics + uniqueID := generateUniqueID() + mainPoolName := opt.Addr + "_" + uniqueID + pubsubPoolName := opt.Addr + "_" + uniqueID + "_pubsub" + + var err error + rdb.connPool, err = newConnPool(opt, rdb.dialHook, mainPoolName) + if err != nil { + panic(fmt.Errorf("redis: failed to create connection pool: %w", err)) + } + rdb.pubSubPool, err = newPubSubPool(opt, rdb.dialHook, pubsubPoolName) + if err != nil { + panic(fmt.Errorf("redis: failed to create pubsub pool: %w", err)) + } + + rdb.onClose = rdb.wrappedOnClose(failover.Close) failover.mu.Lock() failover.onFailover = func(ctx context.Context, addr string) { - _ = connPool.Filter(func(cn *pool.Conn) bool { - return cn.RemoteAddr().String() != addr - }) + if connPool, ok := rdb.connPool.(*pool.ConnPool); ok { + _ = connPool.Filter(func(cn *pool.Conn) bool { + return cn.RemoteAddr().String() != addr + }) + } } failover.mu.Unlock() @@ -282,10 +601,12 @@ func masterReplicaDialer( // SentinelClient is a client for a Redis Sentinel. type SentinelClient struct { *baseClient - hooksMixin } func NewSentinelClient(opt *Options) *SentinelClient { + if opt == nil { + panic("redis: NewSentinelClient nil options") + } opt.init() c := &SentinelClient{ baseClient: &baseClient{ @@ -293,15 +614,46 @@ func NewSentinelClient(opt *Options) *SentinelClient { }, } + // Initialize push notification processor using shared helper + // Use void processor for Sentinel clients + c.pushProcessor = NewVoidPushNotificationProcessor() + c.initHooks(hooks{ dial: c.baseClient.dial, process: c.baseClient.process, }) - c.connPool = newConnPool(opt, c.dialHook) + + // Generate unique pool names for metrics + uniqueID := generateUniqueID() + mainPoolName := opt.Addr + "_" + uniqueID + pubsubPoolName := opt.Addr + "_" + uniqueID + "_pubsub" + + var err error + c.connPool, err = newConnPool(opt, c.dialHook, mainPoolName) + if err != nil { + panic(fmt.Errorf("redis: failed to create connection pool: %w", err)) + } + c.pubSubPool, err = newPubSubPool(opt, c.dialHook, pubsubPoolName) + if err != nil { + panic(fmt.Errorf("redis: failed to create pubsub pool: %w", err)) + } return c } +// GetPushNotificationHandler returns the handler for a specific push notification name. +// Returns nil if no handler is registered for the given name. +func (c *SentinelClient) GetPushNotificationHandler(pushNotificationName string) push.NotificationHandler { + return c.pushProcessor.GetHandler(pushNotificationName) +} + +// RegisterPushNotificationHandler registers a handler for a specific push notification name. +// Returns an error if a handler is already registered for this push notification name. +// If protected is true, the handler cannot be unregistered. +func (c *SentinelClient) RegisterPushNotificationHandler(pushNotificationName string, handler push.NotificationHandler, protected bool) error { + return c.pushProcessor.RegisterHandler(pushNotificationName, handler, protected) +} + func (c *SentinelClient) Process(ctx context.Context, cmd Cmder) error { err := c.processHook(ctx, cmd) cmd.SetErr(err) @@ -311,13 +663,31 @@ func (c *SentinelClient) Process(ctx context.Context, cmd Cmder) error { func (c *SentinelClient) pubSub() *PubSub { pubsub := &PubSub{ opt: c.opt, - - newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) { - return c.newConn(ctx) + newConn: func(ctx context.Context, addr string, channels []string) (*pool.Conn, error) { + cn, err := c.pubSubPool.NewConn(ctx, c.opt.Network, addr, channels) + if err != nil { + return nil, err + } + // will return nil if already initialized + err = c.initConn(ctx, cn) + if err != nil { + _ = cn.Close() + return nil, err + } + // Track connection in PubSubPool + c.pubSubPool.TrackConn(cn) + return cn, nil }, - closeConn: c.connPool.CloseConn, + closeConn: func(cn *pool.Conn) error { + // Untrack connection from PubSubPool + c.pubSubPool.UntrackConn(cn) + _ = cn.Close() + return nil + }, + pushProcessor: c.pushProcessor, } pubsub.init() + return pubsub } @@ -452,10 +822,10 @@ type sentinelFailover struct { onFailover func(ctx context.Context, addr string) onUpdate func(ctx context.Context) - mu sync.RWMutex - _masterAddr string - sentinel *SentinelClient - pubsub *PubSub + mu sync.RWMutex + masterAddr string + sentinel *SentinelClient + pubsub *PubSub } func (c *sentinelFailover) Close() error { @@ -511,7 +881,7 @@ func (c *sentinelFailover) MasterAddr(ctx context.Context) (string, error) { if sentinel != nil { addr, err := c.getMasterAddr(ctx, sentinel) if err != nil { - if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + if isContextError(ctx.Err()) { return "", err } // Continue on other errors @@ -529,7 +899,7 @@ func (c *sentinelFailover) MasterAddr(ctx context.Context) (string, error) { addr, err := c.getMasterAddr(ctx, c.sentinel) if err != nil { _ = c.closeSentinel() - if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + if isContextError(ctx.Err()) { return "", err } // Continue on other errors @@ -540,29 +910,55 @@ func (c *sentinelFailover) MasterAddr(ctx context.Context) (string, error) { } } - for i, sentinelAddr := range c.sentinelAddrs { - sentinel := NewSentinelClient(c.opt.sentinelOptions(sentinelAddr)) - - masterAddr, err := sentinel.GetMasterAddrByName(ctx, c.opt.MasterName).Result() - if err != nil { - _ = sentinel.Close() - if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { - return "", err - } - internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName master=%q failed: %s", - c.opt.MasterName, err) - continue - } - - // Push working sentinel to the top. - c.sentinelAddrs[0], c.sentinelAddrs[i] = c.sentinelAddrs[i], c.sentinelAddrs[0] - c.setSentinel(ctx, sentinel) - - addr := net.JoinHostPort(masterAddr[0], masterAddr[1]) - return addr, nil + // short circuit if no sentinels configured + if len(c.sentinelAddrs) == 0 { + return "", errors.New("redis: no sentinels configured") } - return "", errors.New("redis: all sentinels specified in configuration are unreachable") + var ( + masterAddr string + wg sync.WaitGroup + once sync.Once + errCh = make(chan error, len(c.sentinelAddrs)) + ) + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + for i, sentinelAddr := range c.sentinelAddrs { + wg.Add(1) + go func(i int, addr string) { + defer wg.Done() + sentinelCli := NewSentinelClient(c.opt.sentinelOptions(addr)) + addrVal, err := sentinelCli.GetMasterAddrByName(ctx, c.opt.MasterName).Result() + if err != nil { + internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName addr=%s, master=%q failed: %s", + addr, c.opt.MasterName, err) + _ = sentinelCli.Close() + errCh <- err + return + } + once.Do(func() { + masterAddr = net.JoinHostPort(addrVal[0], addrVal[1]) + // Push working sentinel to the top + c.sentinelAddrs[0], c.sentinelAddrs[i] = c.sentinelAddrs[i], c.sentinelAddrs[0] + c.setSentinel(ctx, sentinelCli) + internal.Logger.Printf(ctx, "sentinel: selected addr=%s masterAddr=%s", addr, masterAddr) + cancel() + }) + }(i, sentinelAddr) + } + + wg.Wait() + close(errCh) + if masterAddr != "" { + return masterAddr, nil + } + errs := make([]error, 0, len(errCh)) + for err := range errCh { + errs = append(errs, err) + } + return "", fmt.Errorf("redis: all sentinels specified in configuration are unreachable: %w", errors.Join(errs...)) } func (c *sentinelFailover) replicaAddrs(ctx context.Context, useDisconnected bool) ([]string, error) { @@ -573,7 +969,7 @@ func (c *sentinelFailover) replicaAddrs(ctx context.Context, useDisconnected boo if sentinel != nil { addrs, err := c.getReplicaAddrs(ctx, sentinel) if err != nil { - if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + if isContextError(ctx.Err()) { return nil, err } // Continue on other errors @@ -591,7 +987,7 @@ func (c *sentinelFailover) replicaAddrs(ctx context.Context, useDisconnected boo addrs, err := c.getReplicaAddrs(ctx, c.sentinel) if err != nil { _ = c.closeSentinel() - if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + if isContextError(ctx.Err()) { return nil, err } // Continue on other errors @@ -613,7 +1009,7 @@ func (c *sentinelFailover) replicaAddrs(ctx context.Context, useDisconnected boo replicas, err := sentinel.Replicas(ctx, c.opt.MasterName).Result() if err != nil { _ = sentinel.Close() - if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + if isContextError(ctx.Err()) { return nil, err } internal.Logger.Printf(ctx, "sentinel: Replicas master=%q failed: %s", @@ -682,7 +1078,7 @@ func parseReplicaAddrs(addrs []map[string]string, keepDisconnected bool) []strin func (c *sentinelFailover) trySwitchMaster(ctx context.Context, addr string) { c.mu.RLock() - currentAddr := c._masterAddr //nolint:ifshort + currentAddr := c.masterAddr //nolint:ifshort c.mu.RUnlock() if addr == currentAddr { @@ -692,10 +1088,10 @@ func (c *sentinelFailover) trySwitchMaster(ctx context.Context, addr string) { c.mu.Lock() defer c.mu.Unlock() - if addr == c._masterAddr { + if addr == c.masterAddr { return } - c._masterAddr = addr + c.masterAddr = addr internal.Logger.Printf(ctx, "sentinel: new master=%q addr=%q", c.opt.MasterName, addr) @@ -780,6 +1176,10 @@ func contains(slice []string, str string) bool { // NewFailoverClusterClient returns a client that supports routing read-only commands // to a replica node. func NewFailoverClusterClient(failoverOpt *FailoverOptions) *ClusterClient { + if failoverOpt == nil { + panic("redis: NewFailoverClusterClient nil options") + } + sentinelAddrs := make([]string, len(failoverOpt.SentinelAddrs)) copy(sentinelAddrs, failoverOpt.SentinelAddrs) @@ -789,6 +1189,22 @@ func NewFailoverClusterClient(failoverOpt *FailoverOptions) *ClusterClient { } opt := failoverOpt.clusterOptions() + if failoverOpt.DB != 0 { + onConnect := opt.OnConnect + + opt.OnConnect = func(ctx context.Context, cn *Conn) error { + if err := cn.Select(ctx, failoverOpt.DB).Err(); err != nil { + return err + } + + if onConnect != nil { + return onConnect(ctx, cn) + } + + return nil + } + } + opt.ClusterSlots = func(ctx context.Context) ([]ClusterSlot, error) { masterAddr, err := failover.MasterAddr(ctx) if err != nil { diff --git a/vendor/github.com/redis/go-redis/v9/set_commands.go b/vendor/github.com/redis/go-redis/v9/set_commands.go index cef8ad6d..2a465728 100644 --- a/vendor/github.com/redis/go-redis/v9/set_commands.go +++ b/vendor/github.com/redis/go-redis/v9/set_commands.go @@ -1,7 +1,13 @@ package redis -import "context" +import ( + "context" + "github.com/redis/go-redis/v9/internal/hashtag" +) + +// SetCmdable is an interface for Redis set commands. +// Sets are unordered collections of unique strings. type SetCmdable interface { SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd SCard(ctx context.Context, key string) *IntCmd @@ -25,8 +31,12 @@ type SetCmdable interface { SUnionStore(ctx context.Context, destination string, keys ...string) *IntCmd } -//------------------------------------------------------------------------------ - +// Returns the number of elements that were added to the set, not including all +// the elements already present in the set. +// +// For more information about the command please refer to [SADD]. +// +// [SADD]: (https://redis.io/docs/latest/commands/sadd/) func (c cmdable) SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd { args := make([]interface{}, 2, 2+len(members)) args[0] = "sadd" @@ -37,12 +47,25 @@ func (c cmdable) SAdd(ctx context.Context, key string, members ...interface{}) * return cmd } +// Returns the set cardinality (number of elements) of the set stored at key. +// Returns 0 if key does not exist. +// +// For more information about the command please refer to [SCARD]. +// +// [SCARD]: (https://redis.io/docs/latest/commands/scard/) func (c cmdable) SCard(ctx context.Context, key string) *IntCmd { cmd := NewIntCmd(ctx, "scard", key) _ = c(ctx, cmd) return cmd } +// Returns the members of the set resulting from the difference between the first set +// and all the successive sets. +// Keys that do not exist are considered to be empty sets. +// +// For more information about the command please refer to [SDIFF]. +// +// [SDIFF]: (https://redis.io/docs/latest/commands/sdiff/) func (c cmdable) SDiff(ctx context.Context, keys ...string) *StringSliceCmd { args := make([]interface{}, 1+len(keys)) args[0] = "sdiff" @@ -54,6 +77,13 @@ func (c cmdable) SDiff(ctx context.Context, keys ...string) *StringSliceCmd { return cmd } +// Stores the members of the set resulting from the difference between the first set +// and all the successive sets into destination. +// If destination already exists, it is overwritten. +// +// For more information about the command please refer to [SDIFFSTORE]. +// +// [SDIFFSTORE]: (https://redis.io/docs/latest/commands/sdiffstore/) func (c cmdable) SDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd { args := make([]interface{}, 2+len(keys)) args[0] = "sdiffstore" @@ -66,6 +96,13 @@ func (c cmdable) SDiffStore(ctx context.Context, destination string, keys ...str return cmd } +// Returns the members of the set resulting from the intersection of all the given sets. +// Keys that do not exist are considered to be empty sets. +// With one of the keys being an empty set, the resulting set is also empty. +// +// For more information about the command please refer to [SINTER]. +// +// [SINTER]: (https://redis.io/docs/latest/commands/sinter/) func (c cmdable) SInter(ctx context.Context, keys ...string) *StringSliceCmd { args := make([]interface{}, 1+len(keys)) args[0] = "sinter" @@ -77,22 +114,38 @@ func (c cmdable) SInter(ctx context.Context, keys ...string) *StringSliceCmd { return cmd } +// Returns the cardinality of the set resulting from the intersection of all the given sets. +// Keys that do not exist are considered to be empty sets. +// With one of the keys being an empty set, the resulting set is also empty. +// +// The limit parameter sets an upper bound on the number of results returned. +// If limit is 0, no limit is applied. +// +// For more information about the command please refer to [SINTERCARD]. +// +// [SINTERCARD]: (https://redis.io/docs/latest/commands/sintercard/) func (c cmdable) SInterCard(ctx context.Context, limit int64, keys ...string) *IntCmd { - args := make([]interface{}, 4+len(keys)) + numKeys := len(keys) + args := make([]interface{}, 4+numKeys) args[0] = "sintercard" - numkeys := int64(0) + args[1] = numKeys for i, key := range keys { args[2+i] = key - numkeys++ } - args[1] = numkeys - args[2+numkeys] = "limit" - args[3+numkeys] = limit + args[2+numKeys] = "limit" + args[3+numKeys] = limit cmd := NewIntCmd(ctx, args...) _ = c(ctx, cmd) return cmd } +// Stores the members of the set resulting from the intersection of all the given sets +// into destination. +// If destination already exists, it is overwritten. +// +// For more information about the command please refer to [SINTERSTORE]. +// +// [SINTERSTORE]: (https://redis.io/docs/latest/commands/sinterstore/) func (c cmdable) SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd { args := make([]interface{}, 2+len(keys)) args[0] = "sinterstore" @@ -105,13 +158,26 @@ func (c cmdable) SInterStore(ctx context.Context, destination string, keys ...st return cmd } +// Returns if member is a member of the set stored at key. +// Returns true if the element is a member of the set, false if it is not a member +// or if key does not exist. +// +// For more information about the command please refer to [SISMEMBER]. +// +// [SISMEMBER]: (https://redis.io/docs/latest/commands/sismember/) func (c cmdable) SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd { cmd := NewBoolCmd(ctx, "sismember", key, member) _ = c(ctx, cmd) return cmd } -// SMIsMember Redis `SMISMEMBER key member [member ...]` command. +// Returns whether each member is a member of the set stored at key. +// For each member, returns true if the element is a member of the set, false if it is not +// a member or if key does not exist. +// +// For more information about the command please refer to [SMISMEMBER]. +// +// [SMISMEMBER]: (https://redis.io/docs/latest/commands/smismember/) func (c cmdable) SMIsMember(ctx context.Context, key string, members ...interface{}) *BoolSliceCmd { args := make([]interface{}, 2, 2+len(members)) args[0] = "smismember" @@ -122,54 +188,100 @@ func (c cmdable) SMIsMember(ctx context.Context, key string, members ...interfac return cmd } -// SMembers Redis `SMEMBERS key` command output as a slice. +// Returns all the members of the set value stored at key. +// Returns an empty slice if key does not exist. +// +// For more information about the command please refer to [SMEMBERS]. +// +// [SMEMBERS]: (https://redis.io/docs/latest/commands/smembers/) func (c cmdable) SMembers(ctx context.Context, key string) *StringSliceCmd { cmd := NewStringSliceCmd(ctx, "smembers", key) _ = c(ctx, cmd) return cmd } -// SMembersMap Redis `SMEMBERS key` command output as a map. +// Returns all the members of the set value stored at key as a map. +// Returns an empty map if key does not exist. +// +// For more information about the command please refer to [SMEMBERS]. +// +// [SMEMBERS]: (https://redis.io/docs/latest/commands/smembers/) func (c cmdable) SMembersMap(ctx context.Context, key string) *StringStructMapCmd { cmd := NewStringStructMapCmd(ctx, "smembers", key) _ = c(ctx, cmd) return cmd } +// Moves member from the set at source to the set at destination. +// This operation is atomic. In every given moment the element will appear to be a member +// of source or destination for other clients. +// +// For more information about the command please refer to [SMOVE]. +// +// [SMOVE]: (https://redis.io/docs/latest/commands/smove/) func (c cmdable) SMove(ctx context.Context, source, destination string, member interface{}) *BoolCmd { cmd := NewBoolCmd(ctx, "smove", source, destination, member) _ = c(ctx, cmd) return cmd } -// SPop Redis `SPOP key` command. +// Removes and returns one or more random members from the set value stored at key. +// This version returns a single random member. +// +// For more information about the command please refer to [SPOP]. +// +// [SPOP]: (https://redis.io/docs/latest/commands/spop/) func (c cmdable) SPop(ctx context.Context, key string) *StringCmd { cmd := NewStringCmd(ctx, "spop", key) _ = c(ctx, cmd) return cmd } -// SPopN Redis `SPOP key count` command. +// Removes and returns one or more random members from the set value stored at key. +// This version returns up to count random members. +// +// For more information about the command please refer to [SPOP]. +// +// [SPOP]: (https://redis.io/docs/latest/commands/spop/) func (c cmdable) SPopN(ctx context.Context, key string, count int64) *StringSliceCmd { cmd := NewStringSliceCmd(ctx, "spop", key, count) _ = c(ctx, cmd) return cmd } -// SRandMember Redis `SRANDMEMBER key` command. +// Returns a random member from the set value stored at key. +// This version returns a single random member without removing it. +// +// For more information about the command please refer to [SRANDMEMBER]. +// +// [SRANDMEMBER]: (https://redis.io/docs/latest/commands/srandmember/) func (c cmdable) SRandMember(ctx context.Context, key string) *StringCmd { cmd := NewStringCmd(ctx, "srandmember", key) _ = c(ctx, cmd) return cmd } -// SRandMemberN Redis `SRANDMEMBER key count` command. +// Returns an array of random members from the set value stored at key. +// This version returns up to count random members without removing them. +// When called with a positive count, returns distinct elements. +// When called with a negative count, allows for repeated elements. +// +// For more information about the command please refer to [SRANDMEMBER]. +// +// [SRANDMEMBER]: (https://redis.io/docs/latest/commands/srandmember/) func (c cmdable) SRandMemberN(ctx context.Context, key string, count int64) *StringSliceCmd { cmd := NewStringSliceCmd(ctx, "srandmember", key, count) _ = c(ctx, cmd) return cmd } +// Removes the specified members from the set stored at key. +// Specified members that are not a member of this set are ignored. +// If key does not exist, it is treated as an empty set and this command returns 0. +// +// For more information about the command please refer to [SREM]. +// +// [SREM]: (https://redis.io/docs/latest/commands/srem/) func (c cmdable) SRem(ctx context.Context, key string, members ...interface{}) *IntCmd { args := make([]interface{}, 2, 2+len(members)) args[0] = "srem" @@ -180,6 +292,12 @@ func (c cmdable) SRem(ctx context.Context, key string, members ...interface{}) * return cmd } +// Returns the members of the set resulting from the union of all the given sets. +// Keys that do not exist are considered to be empty sets. +// +// For more information about the command please refer to [SUNION]. +// +// [SUNION]: (https://redis.io/docs/latest/commands/sunion/) func (c cmdable) SUnion(ctx context.Context, keys ...string) *StringSliceCmd { args := make([]interface{}, 1+len(keys)) args[0] = "sunion" @@ -191,6 +309,13 @@ func (c cmdable) SUnion(ctx context.Context, keys ...string) *StringSliceCmd { return cmd } +// Stores the members of the set resulting from the union of all the given sets +// into destination. +// If destination already exists, it is overwritten. +// +// For more information about the command please refer to [SUNIONSTORE]. +// +// [SUNIONSTORE]: (https://redis.io/docs/latest/commands/sunionstore/) func (c cmdable) SUnionStore(ctx context.Context, destination string, keys ...string) *IntCmd { args := make([]interface{}, 2+len(keys)) args[0] = "sunionstore" @@ -203,6 +328,17 @@ func (c cmdable) SUnionStore(ctx context.Context, destination string, keys ...st return cmd } +// Incrementally iterates the set elements stored at key. +// This is a cursor-based iterator that allows scanning large sets efficiently. +// +// Parameters: +// - cursor: The cursor value for the iteration (use 0 to start a new scan) +// - match: Optional pattern to match elements (empty string means no pattern) +// - count: Optional hint about how many elements to return per iteration +// +// For more information about the command please refer to [SSCAN]. +// +// [SSCAN]: (https://redis.io/docs/latest/commands/sscan/) func (c cmdable) SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd { args := []interface{}{"sscan", key, cursor} if match != "" { @@ -212,6 +348,9 @@ func (c cmdable) SScan(ctx context.Context, key string, cursor uint64, match str args = append(args, "count", count) } cmd := NewScanCmd(ctx, c, args...) + if hashtag.Present(match) { + cmd.SetFirstKeyPos(4) + } _ = c(ctx, cmd) return cmd } diff --git a/vendor/github.com/redis/go-redis/v9/sortedset_commands.go b/vendor/github.com/redis/go-redis/v9/sortedset_commands.go index 67014027..4a6c8f13 100644 --- a/vendor/github.com/redis/go-redis/v9/sortedset_commands.go +++ b/vendor/github.com/redis/go-redis/v9/sortedset_commands.go @@ -2,8 +2,11 @@ package redis import ( "context" + "errors" "strings" "time" + + "github.com/redis/go-redis/v9/internal/hashtag" ) type SortedSetCmdable interface { @@ -257,16 +260,15 @@ func (c cmdable) ZInterWithScores(ctx context.Context, store *ZStore) *ZSliceCmd } func (c cmdable) ZInterCard(ctx context.Context, limit int64, keys ...string) *IntCmd { - args := make([]interface{}, 4+len(keys)) + numKeys := len(keys) + args := make([]interface{}, 4+numKeys) args[0] = "zintercard" - numkeys := int64(0) + args[1] = numKeys for i, key := range keys { args[2+i] = key - numkeys++ } - args[1] = numkeys - args[2+numkeys] = "limit" - args[3+numkeys] = limit + args[2+numKeys] = "limit" + args[3+numKeys] = limit cmd := NewIntCmd(ctx, args...) _ = c(ctx, cmd) return cmd @@ -312,7 +314,9 @@ func (c cmdable) ZPopMax(ctx context.Context, key string, count ...int64) *ZSlic case 1: args = append(args, count[0]) default: - panic("too many arguments") + cmd := NewZSliceCmd(ctx) + cmd.SetErr(errors.New("too many arguments")) + return cmd } cmd := NewZSliceCmd(ctx, args...) @@ -332,7 +336,9 @@ func (c cmdable) ZPopMin(ctx context.Context, key string, count ...int64) *ZSlic case 1: args = append(args, count[0]) default: - panic("too many arguments") + cmd := NewZSliceCmd(ctx) + cmd.SetErr(errors.New("too many arguments")) + return cmd } cmd := NewZSliceCmd(ctx, args...) @@ -473,10 +479,16 @@ func (c cmdable) zRangeBy(ctx context.Context, zcmd, key string, opt *ZRangeBy, return cmd } +// ZRangeByScore returns members in a sorted set within a range of scores. +// +// Deprecated: Use ZRangeArgs with ByScore option instead as of Redis 6.2.0. func (c cmdable) ZRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd { return c.zRangeBy(ctx, "zrangebyscore", key, opt, false) } +// ZRangeByLex returns members in a sorted set within a lexicographical range. +// +// Deprecated: Use ZRangeArgs with ByLex option instead as of Redis 6.2.0. func (c cmdable) ZRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd { return c.zRangeBy(ctx, "zrangebylex", key, opt, false) } @@ -553,6 +565,9 @@ func (c cmdable) ZRemRangeByLex(ctx context.Context, key, min, max string) *IntC return cmd } +// ZRevRange returns members in a sorted set within a range of indexes in reverse order. +// +// Deprecated: Use ZRangeArgs with Rev option instead as of Redis 6.2.0. func (c cmdable) ZRevRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd { cmd := NewStringSliceCmd(ctx, "zrevrange", key, start, stop) _ = c(ctx, cmd) @@ -582,10 +597,16 @@ func (c cmdable) zRevRangeBy(ctx context.Context, zcmd, key string, opt *ZRangeB return cmd } +// ZRevRangeByScore returns members in a sorted set within a range of scores in reverse order. +// +// Deprecated: Use ZRangeArgs with Rev and ByScore options instead as of Redis 6.2.0. func (c cmdable) ZRevRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd { return c.zRevRangeBy(ctx, "zrevrangebyscore", key, opt) } +// ZRevRangeByLex returns members in a sorted set within a lexicographical range in reverse order. +// +// Deprecated: Use ZRangeArgs with Rev and ByLex options instead as of Redis 6.2.0. func (c cmdable) ZRevRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd { return c.zRevRangeBy(ctx, "zrevrangebylex", key, opt) } @@ -720,6 +741,9 @@ func (c cmdable) ZScan(ctx context.Context, key string, cursor uint64, match str args = append(args, "count", count) } cmd := NewScanCmd(ctx, c, args...) + if hashtag.Present(match) { + cmd.SetFirstKeyPos(4) + } _ = c(ctx, cmd) return cmd } diff --git a/vendor/github.com/redis/go-redis/v9/stream_commands.go b/vendor/github.com/redis/go-redis/v9/stream_commands.go index 0a986920..89ae6a1b 100644 --- a/vendor/github.com/redis/go-redis/v9/stream_commands.go +++ b/vendor/github.com/redis/go-redis/v9/stream_commands.go @@ -2,12 +2,18 @@ package redis import ( "context" + "strconv" + "strings" "time" + + "github.com/redis/go-redis/v9/internal/otel" ) type StreamCmdable interface { XAdd(ctx context.Context, a *XAddArgs) *StringCmd + XAckDel(ctx context.Context, stream string, group string, mode string, ids ...string) *SliceCmd XDel(ctx context.Context, stream string, ids ...string) *IntCmd + XDelEx(ctx context.Context, stream string, mode string, ids ...string) *SliceCmd XLen(ctx context.Context, stream string) *IntCmd XRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd XRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd @@ -31,12 +37,17 @@ type StreamCmdable interface { XAutoClaimJustID(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimJustIDCmd XTrimMaxLen(ctx context.Context, key string, maxLen int64) *IntCmd XTrimMaxLenApprox(ctx context.Context, key string, maxLen, limit int64) *IntCmd + XTrimMaxLenMode(ctx context.Context, key string, maxLen int64, mode string) *IntCmd + XTrimMaxLenApproxMode(ctx context.Context, key string, maxLen, limit int64, mode string) *IntCmd XTrimMinID(ctx context.Context, key string, minID string) *IntCmd XTrimMinIDApprox(ctx context.Context, key string, minID string, limit int64) *IntCmd + XTrimMinIDMode(ctx context.Context, key string, minID string, mode string) *IntCmd + XTrimMinIDApproxMode(ctx context.Context, key string, minID string, limit int64, mode string) *IntCmd XInfoGroups(ctx context.Context, key string) *XInfoGroupsCmd XInfoStream(ctx context.Context, key string) *XInfoStreamCmd XInfoStreamFull(ctx context.Context, key string, count int) *XInfoStreamFullCmd XInfoConsumers(ctx context.Context, key string, group string) *XInfoConsumersCmd + XCfgSet(ctx context.Context, a *XCfgSetArgs) *StatusCmd } // XAddArgs accepts values in the following formats: @@ -46,41 +57,69 @@ type StreamCmdable interface { // // Note that map will not preserve the order of key-value pairs. // MaxLen/MaxLenApprox and MinID are in conflict, only one of them can be used. +// +// For idempotent production (at-most-once production): +// - ProducerID: A unique identifier for the producer (required for both IDMP and IDMPAUTO) +// - IdempotentID: A unique identifier for the message (used with IDMP) +// - IdempotentAuto: If true, Redis will auto-generate an idempotent ID based on message content (IDMPAUTO) +// +// ProducerID and IdempotentID are mutually exclusive with IdempotentAuto. +// When using idempotent production, ID must be "*" or empty. type XAddArgs struct { Stream string NoMkStream bool MaxLen int64 // MAXLEN N MinID string // Approx causes MaxLen and MinID to use "~" matcher (instead of "="). - Approx bool - Limit int64 - ID string - Values interface{} + Approx bool + Limit int64 + Mode string + ID string + Values interface{} + ProducerID string // Producer ID for idempotent production (IDMP or IDMPAUTO) + IdempotentID string // Idempotent ID for IDMP + IdempotentAuto bool // Use IDMPAUTO to auto-generate idempotent ID based on content } func (c cmdable) XAdd(ctx context.Context, a *XAddArgs) *StringCmd { - args := make([]interface{}, 0, 11) + args := make([]interface{}, 0, 15) args = append(args, "xadd", a.Stream) if a.NoMkStream { args = append(args, "nomkstream") } + + if a.Mode != "" { + args = append(args, a.Mode) + } + + if a.ProducerID != "" { + if a.IdempotentAuto { + // IDMPAUTO pid + args = append(args, "idmpauto", a.ProducerID) + } else if a.IdempotentID != "" { + // IDMP pid iid + args = append(args, "idmp", a.ProducerID, a.IdempotentID) + } + } + switch { case a.MaxLen > 0: if a.Approx { args = append(args, "maxlen", "~", a.MaxLen) } else { - args = append(args, "maxlen", a.MaxLen) + args = append(args, "maxlen", "=", a.MaxLen) } case a.MinID != "": if a.Approx { args = append(args, "minid", "~", a.MinID) } else { - args = append(args, "minid", a.MinID) + args = append(args, "minid", "=", a.MinID) } } if a.Limit > 0 { args = append(args, "limit", a.Limit) } + if a.ID != "" { args = append(args, a.ID) } else { @@ -93,6 +132,16 @@ func (c cmdable) XAdd(ctx context.Context, a *XAddArgs) *StringCmd { return cmd } +func (c cmdable) XAckDel(ctx context.Context, stream string, group string, mode string, ids ...string) *SliceCmd { + args := []interface{}{"xackdel", stream, group, mode, "ids", len(ids)} + for _, id := range ids { + args = append(args, id) + } + cmd := NewSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + func (c cmdable) XDel(ctx context.Context, stream string, ids ...string) *IntCmd { args := []interface{}{"xdel", stream} for _, id := range ids { @@ -103,6 +152,16 @@ func (c cmdable) XDel(ctx context.Context, stream string, ids ...string) *IntCmd return cmd } +func (c cmdable) XDelEx(ctx context.Context, stream string, mode string, ids ...string) *SliceCmd { + args := []interface{}{"xdelex", stream, mode, "ids", len(ids)} + for _, id := range ids { + args = append(args, id) + } + cmd := NewSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + func (c cmdable) XLen(ctx context.Context, stream string) *IntCmd { cmd := NewIntCmd(ctx, "xlen", stream) _ = c(ctx, cmd) @@ -137,10 +196,11 @@ type XReadArgs struct { Streams []string // list of streams and ids, e.g. stream1 stream2 id1 id2 Count int64 Block time.Duration + ID string } func (c cmdable) XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd { - args := make([]interface{}, 0, 6+len(a.Streams)) + args := make([]interface{}, 0, 2*len(a.Streams)+6) args = append(args, "xread") keyPos := int8(1) @@ -159,6 +219,11 @@ func (c cmdable) XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd { for _, s := range a.Streams { args = append(args, s) } + if a.ID != "" { + for range a.Streams { + args = append(args, a.ID) + } + } cmd := NewXStreamSliceCmd(ctx, args...) if a.Block >= 0 { @@ -178,36 +243,42 @@ func (c cmdable) XReadStreams(ctx context.Context, streams ...string) *XStreamSl func (c cmdable) XGroupCreate(ctx context.Context, stream, group, start string) *StatusCmd { cmd := NewStatusCmd(ctx, "xgroup", "create", stream, group, start) + cmd.SetFirstKeyPos(2) _ = c(ctx, cmd) return cmd } func (c cmdable) XGroupCreateMkStream(ctx context.Context, stream, group, start string) *StatusCmd { cmd := NewStatusCmd(ctx, "xgroup", "create", stream, group, start, "mkstream") + cmd.SetFirstKeyPos(2) _ = c(ctx, cmd) return cmd } func (c cmdable) XGroupSetID(ctx context.Context, stream, group, start string) *StatusCmd { cmd := NewStatusCmd(ctx, "xgroup", "setid", stream, group, start) + cmd.SetFirstKeyPos(2) _ = c(ctx, cmd) return cmd } func (c cmdable) XGroupDestroy(ctx context.Context, stream, group string) *IntCmd { cmd := NewIntCmd(ctx, "xgroup", "destroy", stream, group) + cmd.SetFirstKeyPos(2) _ = c(ctx, cmd) return cmd } func (c cmdable) XGroupCreateConsumer(ctx context.Context, stream, group, consumer string) *IntCmd { cmd := NewIntCmd(ctx, "xgroup", "createconsumer", stream, group, consumer) + cmd.SetFirstKeyPos(2) _ = c(ctx, cmd) return cmd } func (c cmdable) XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd { cmd := NewIntCmd(ctx, "xgroup", "delconsumer", stream, group, consumer) + cmd.SetFirstKeyPos(2) _ = c(ctx, cmd) return cmd } @@ -219,6 +290,7 @@ type XReadGroupArgs struct { Count int64 Block time.Duration NoAck bool + Claim time.Duration // Claim idle pending entries older than this duration } func (c cmdable) XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd { @@ -238,6 +310,10 @@ func (c cmdable) XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSlic args = append(args, "noack") keyPos++ } + if a.Claim > 0 { + args = append(args, "claim", int64(a.Claim/time.Millisecond)) + keyPos += 2 + } args = append(args, "streams") keyPos++ for _, s := range a.Streams { @@ -250,6 +326,26 @@ func (c cmdable) XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSlic } cmd.SetFirstKeyPos(keyPos) _ = c(ctx, cmd) + + // Record stream lag for each message (if command succeeded) + if cmd.Err() == nil { + streams := cmd.Val() + for _, stream := range streams { + for _, msg := range stream.Messages { + // Parse message ID to extract timestamp (format: "millisecondsTime-sequenceNumber") + if parts := strings.SplitN(msg.ID, "-", 2); len(parts) == 2 { + if timestampMs, err := strconv.ParseInt(parts[0], 10, 64); err == nil { + // Calculate lag (time since message was created) + messageTime := time.Unix(0, timestampMs*int64(time.Millisecond)) + lag := time.Since(messageTime) + // Record lag metric + otel.RecordStreamLag(ctx, lag, nil, stream.Stream, a.Group, a.Consumer) + } + } + } + } + } + return cmd } @@ -363,6 +459,8 @@ func xClaimArgs(a *XClaimArgs) []interface{} { return args } +// TODO: refactor xTrim, xTrimMode and the wrappers over the functions + // xTrim If approx is true, add the "~" parameter, otherwise it is the default "=" (redis default). // example: // @@ -378,6 +476,8 @@ func (c cmdable) xTrim( args = append(args, "xtrim", key, strategy) if approx { args = append(args, "~") + } else { + args = append(args, "=") } args = append(args, threshold) if limit > 0 { @@ -406,6 +506,44 @@ func (c cmdable) XTrimMinIDApprox(ctx context.Context, key string, minID string, return c.xTrim(ctx, key, "minid", true, minID, limit) } +func (c cmdable) xTrimMode( + ctx context.Context, key, strategy string, + approx bool, threshold interface{}, limit int64, + mode string, +) *IntCmd { + args := make([]interface{}, 0, 7) + args = append(args, "xtrim", key, strategy) + if approx { + args = append(args, "~") + } else { + args = append(args, "=") + } + args = append(args, threshold) + if limit > 0 { + args = append(args, "limit", limit) + } + args = append(args, mode) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XTrimMaxLenMode(ctx context.Context, key string, maxLen int64, mode string) *IntCmd { + return c.xTrimMode(ctx, key, "maxlen", false, maxLen, 0, mode) +} + +func (c cmdable) XTrimMaxLenApproxMode(ctx context.Context, key string, maxLen, limit int64, mode string) *IntCmd { + return c.xTrimMode(ctx, key, "maxlen", true, maxLen, limit, mode) +} + +func (c cmdable) XTrimMinIDMode(ctx context.Context, key string, minID string, mode string) *IntCmd { + return c.xTrimMode(ctx, key, "minid", false, minID, 0, mode) +} + +func (c cmdable) XTrimMinIDApproxMode(ctx context.Context, key string, minID string, limit int64, mode string) *IntCmd { + return c.xTrimMode(ctx, key, "minid", true, minID, limit, mode) +} + func (c cmdable) XInfoConsumers(ctx context.Context, key string, group string) *XInfoConsumersCmd { cmd := NewXInfoConsumersCmd(ctx, key, group) _ = c(ctx, cmd) @@ -436,3 +574,28 @@ func (c cmdable) XInfoStreamFull(ctx context.Context, key string, count int) *XI _ = c(ctx, cmd) return cmd } + +// XCfgSetArgs represents the arguments for the XCFGSET command. +// Duration is the duration, in seconds, that Redis keeps each idempotent ID. +// MaxSize is the maximum number of most recent idempotent IDs that Redis keeps for each producer ID. +type XCfgSetArgs struct { + Stream string + Duration int64 + MaxSize int64 +} + +// XCfgSet sets the idempotent production configuration for a stream. +// XCFGSET key [IDMP-DURATION duration] [IDMP-MAXSIZE maxsize] +func (c cmdable) XCfgSet(ctx context.Context, a *XCfgSetArgs) *StatusCmd { + args := make([]interface{}, 0, 6) + args = append(args, "xcfgset", a.Stream) + if a.Duration > 0 { + args = append(args, "idmp-duration", a.Duration) + } + if a.MaxSize > 0 { + args = append(args, "idmp-maxsize", a.MaxSize) + } + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} diff --git a/vendor/github.com/redis/go-redis/v9/string_commands.go b/vendor/github.com/redis/go-redis/v9/string_commands.go index eff5880d..f69d3d05 100644 --- a/vendor/github.com/redis/go-redis/v9/string_commands.go +++ b/vendor/github.com/redis/go-redis/v9/string_commands.go @@ -2,6 +2,7 @@ package redis import ( "context" + "fmt" "time" ) @@ -9,6 +10,8 @@ type StringCmdable interface { Append(ctx context.Context, key, value string) *IntCmd Decr(ctx context.Context, key string) *IntCmd DecrBy(ctx context.Context, key string, decrement int64) *IntCmd + DelExArgs(ctx context.Context, key string, a DelExArgs) *IntCmd + Digest(ctx context.Context, key string) *DigestCmd Get(ctx context.Context, key string) *StringCmd GetRange(ctx context.Context, key string, start, end int64) *StringCmd GetSet(ctx context.Context, key string, value interface{}) *StringCmd @@ -21,9 +24,18 @@ type StringCmdable interface { MGet(ctx context.Context, keys ...string) *SliceCmd MSet(ctx context.Context, values ...interface{}) *StatusCmd MSetNX(ctx context.Context, values ...interface{}) *BoolCmd + MSetEX(ctx context.Context, args MSetEXArgs, values ...interface{}) *IntCmd Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd SetArgs(ctx context.Context, key string, value interface{}, a SetArgs) *StatusCmd SetEx(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd + SetIFEQ(ctx context.Context, key string, value interface{}, matchValue interface{}, expiration time.Duration) *StatusCmd + SetIFEQGet(ctx context.Context, key string, value interface{}, matchValue interface{}, expiration time.Duration) *StringCmd + SetIFNE(ctx context.Context, key string, value interface{}, matchValue interface{}, expiration time.Duration) *StatusCmd + SetIFNEGet(ctx context.Context, key string, value interface{}, matchValue interface{}, expiration time.Duration) *StringCmd + SetIFDEQ(ctx context.Context, key string, value interface{}, matchDigest uint64, expiration time.Duration) *StatusCmd + SetIFDEQGet(ctx context.Context, key string, value interface{}, matchDigest uint64, expiration time.Duration) *StringCmd + SetIFDNE(ctx context.Context, key string, value interface{}, matchDigest uint64, expiration time.Duration) *StatusCmd + SetIFDNEGet(ctx context.Context, key string, value interface{}, matchDigest uint64, expiration time.Duration) *StringCmd SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd @@ -48,6 +60,76 @@ func (c cmdable) DecrBy(ctx context.Context, key string, decrement int64) *IntCm return cmd } +// DelExArgs provides arguments for the DelExArgs function. +type DelExArgs struct { + // Mode can be `IFEQ`, `IFNE`, `IFDEQ`, or `IFDNE`. + Mode string + + // MatchValue is used with IFEQ/IFNE modes for compare-and-delete operations. + // - IFEQ: only delete if current value equals MatchValue + // - IFNE: only delete if current value does not equal MatchValue + MatchValue interface{} + + // MatchDigest is used with IFDEQ/IFDNE modes for digest-based compare-and-delete. + // - IFDEQ: only delete if current value's digest equals MatchDigest + // - IFDNE: only delete if current value's digest does not equal MatchDigest + // + // The digest is a uint64 xxh3 hash value. + // + // For examples of client-side digest generation, see: + // example/digest-optimistic-locking/ + MatchDigest uint64 +} + +// DelExArgs Redis `DELEX key [IFEQ|IFNE|IFDEQ|IFDNE] match-value` command. +// Compare-and-delete with flexible conditions. +// +// Returns the number of keys that were removed (0 or 1). +// +// NOTE DelExArgs is still experimental +// it's signature and behaviour may change +func (c cmdable) DelExArgs(ctx context.Context, key string, a DelExArgs) *IntCmd { + args := []interface{}{"delex", key} + + if a.Mode != "" { + args = append(args, a.Mode) + + // Add match value/digest based on mode + switch a.Mode { + case "ifeq", "IFEQ", "ifne", "IFNE": + if a.MatchValue != nil { + args = append(args, a.MatchValue) + } + case "ifdeq", "IFDEQ", "ifdne", "IFDNE": + if a.MatchDigest != 0 { + args = append(args, fmt.Sprintf("%016x", a.MatchDigest)) + } + } + } + + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// Digest returns the xxh3 hash (uint64) of the specified key's value. +// +// The digest is a 64-bit xxh3 hash that can be used for optimistic locking +// with SetIFDEQ, SetIFDNE, and DelExArgs commands. +// +// For examples of client-side digest generation and usage patterns, see: +// example/digest-optimistic-locking/ +// +// Redis 8.4+. See https://redis.io/commands/digest/ +// +// NOTE Digest is still experimental +// it's signature and behaviour may change +func (c cmdable) Digest(ctx context.Context, key string) *DigestCmd { + cmd := NewDigestCmd(ctx, "digest", key) + _ = c(ctx, cmd) + return cmd +} + // Get Redis `GET key` command. It returns redis.Nil error when key does not exist. func (c cmdable) Get(ctx context.Context, key string) *StringCmd { cmd := NewStringCmd(ctx, "get", key) @@ -61,6 +143,9 @@ func (c cmdable) GetRange(ctx context.Context, key string, start, end int64) *St return cmd } +// GetSet returns the old value stored at key and sets it to the new value. +// +// Deprecated: Use SetArgs with Get option instead as of Redis 6.2.0. func (c cmdable) GetSet(ctx context.Context, key string, value interface{}) *StringCmd { cmd := NewStringCmd(ctx, "getset", key, value) _ = c(ctx, cmd) @@ -112,6 +197,35 @@ func (c cmdable) IncrByFloat(ctx context.Context, key string, value float64) *Fl return cmd } +type SetCondition string + +const ( + // NX only set the keys and their expiration if none exist + NX SetCondition = "NX" + // XX only set the keys and their expiration if all already exist + XX SetCondition = "XX" +) + +type ExpirationMode string + +const ( + // EX sets expiration in seconds + EX ExpirationMode = "EX" + // PX sets expiration in milliseconds + PX ExpirationMode = "PX" + // EXAT sets expiration as Unix timestamp in seconds + EXAT ExpirationMode = "EXAT" + // PXAT sets expiration as Unix timestamp in milliseconds + PXAT ExpirationMode = "PXAT" + // KEEPTTL keeps the existing TTL + KEEPTTL ExpirationMode = "KEEPTTL" +) + +type ExpirationOption struct { + Mode ExpirationMode + Value int64 +} + func (c cmdable) LCS(ctx context.Context, q *LCSQuery) *LCSCmd { cmd := NewLCSCmd(ctx, q) _ = c(ctx, cmd) @@ -157,6 +271,49 @@ func (c cmdable) MSetNX(ctx context.Context, values ...interface{}) *BoolCmd { return cmd } +type MSetEXArgs struct { + Condition SetCondition + Expiration *ExpirationOption +} + +// MSetEX sets the given keys to their respective values. +// This command is an extension of the MSETNX that adds expiration and XX options. +// Available since Redis 8.4 +// Important: When this method is used with Cluster clients, all keys +// must be in the same hash slot, otherwise CROSSSLOT error will be returned. +// For more information, see https://redis.io/commands/msetex +func (c cmdable) MSetEX(ctx context.Context, args MSetEXArgs, values ...interface{}) *IntCmd { + expandedArgs := appendArgs([]interface{}{}, values) + numkeys := len(expandedArgs) / 2 + + cmdArgs := make([]interface{}, 0, 2+len(expandedArgs)+3) + cmdArgs = append(cmdArgs, "msetex", numkeys) + cmdArgs = append(cmdArgs, expandedArgs...) + + if args.Condition != "" { + cmdArgs = append(cmdArgs, string(args.Condition)) + } + + if args.Expiration != nil { + switch args.Expiration.Mode { + case EX: + cmdArgs = append(cmdArgs, "ex", args.Expiration.Value) + case PX: + cmdArgs = append(cmdArgs, "px", args.Expiration.Value) + case EXAT: + cmdArgs = append(cmdArgs, "exat", args.Expiration.Value) + case PXAT: + cmdArgs = append(cmdArgs, "pxat", args.Expiration.Value) + case KEEPTTL: + cmdArgs = append(cmdArgs, "keepttl") + } + } + + cmd := NewIntCmd(ctx, cmdArgs...) + _ = c(ctx, cmd) + return cmd +} + // Set Redis `SET key value [expiration]` command. // Use expiration for `SETEx`-like behavior. // @@ -185,9 +342,24 @@ func (c cmdable) Set(ctx context.Context, key string, value interface{}, expirat // SetArgs provides arguments for the SetArgs function. type SetArgs struct { - // Mode can be `NX` or `XX` or empty. + // Mode can be `NX`, `XX`, `IFEQ`, `IFNE`, `IFDEQ`, `IFDNE` or empty. Mode string + // MatchValue is used with IFEQ/IFNE modes for compare-and-set operations. + // - IFEQ: only set if current value equals MatchValue + // - IFNE: only set if current value does not equal MatchValue + MatchValue interface{} + + // MatchDigest is used with IFDEQ/IFDNE modes for digest-based compare-and-set. + // - IFDEQ: only set if current value's digest equals MatchDigest + // - IFDNE: only set if current value's digest does not equal MatchDigest + // + // The digest is a uint64 xxh3 hash value. + // + // For examples of client-side digest generation, see: + // example/digest-optimistic-locking/ + MatchDigest uint64 + // Zero `TTL` or `Expiration` means that the key has no expiration time. TTL time.Duration ExpireAt time.Time @@ -223,6 +395,18 @@ func (c cmdable) SetArgs(ctx context.Context, key string, value interface{}, a S if a.Mode != "" { args = append(args, a.Mode) + + // Add match value/digest for CAS modes + switch a.Mode { + case "ifeq", "IFEQ", "ifne", "IFNE": + if a.MatchValue != nil { + args = append(args, a.MatchValue) + } + case "ifdeq", "IFDEQ", "ifdne", "IFDNE": + if a.MatchDigest != 0 { + args = append(args, fmt.Sprintf("%016x", a.MatchDigest)) + } + } } if a.Get { @@ -234,14 +418,18 @@ func (c cmdable) SetArgs(ctx context.Context, key string, value interface{}, a S return cmd } -// SetEx Redis `SETEx key expiration value` command. +// SetEx sets the value and expiration of a key. +// +// Deprecated: Use Set with expiration instead as of Redis 2.6.12. func (c cmdable) SetEx(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd { cmd := NewStatusCmd(ctx, "setex", key, formatSec(ctx, expiration), value) _ = c(ctx, cmd) return cmd } -// SetNX Redis `SET key value [expiration] NX` command. +// SetNX sets the value of a key only if the key does not exist. +// +// Deprecated: Use Set with NX option instead as of Redis 2.6.12. // // Zero expiration means the key has no expiration time. // KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0, @@ -290,6 +478,270 @@ func (c cmdable) SetXX(ctx context.Context, key string, value interface{}, expir return cmd } +// SetIFEQ Redis `SET key value [expiration] IFEQ match-value` command. +// Compare-and-set: only sets the value if the current value equals matchValue. +// +// Returns "OK" on success. +// Returns nil if the operation was aborted due to condition not matching. +// Zero expiration means the key has no expiration time. +// +// NOTE SetIFEQ is still experimental +// it's signature and behaviour may change +func (c cmdable) SetIFEQ(ctx context.Context, key string, value interface{}, matchValue interface{}, expiration time.Duration) *StatusCmd { + args := []interface{}{"set", key, value} + + if expiration > 0 { + if usePrecise(expiration) { + args = append(args, "px", formatMs(ctx, expiration)) + } else { + args = append(args, "ex", formatSec(ctx, expiration)) + } + } else if expiration == KeepTTL { + args = append(args, "keepttl") + } + + args = append(args, "ifeq", matchValue) + + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// SetIFEQGet Redis `SET key value [expiration] IFEQ match-value GET` command. +// Compare-and-set with GET: only sets the value if the current value equals matchValue, +// and returns the previous value. +// +// Returns the previous value on success. +// Returns nil if the operation was aborted due to condition not matching. +// Zero expiration means the key has no expiration time. +// +// NOTE SetIFEQGet is still experimental +// it's signature and behaviour may change +func (c cmdable) SetIFEQGet(ctx context.Context, key string, value interface{}, matchValue interface{}, expiration time.Duration) *StringCmd { + args := []interface{}{"set", key, value} + + if expiration > 0 { + if usePrecise(expiration) { + args = append(args, "px", formatMs(ctx, expiration)) + } else { + args = append(args, "ex", formatSec(ctx, expiration)) + } + } else if expiration == KeepTTL { + args = append(args, "keepttl") + } + + args = append(args, "ifeq", matchValue, "get") + + cmd := NewStringCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// SetIFNE Redis `SET key value [expiration] IFNE match-value` command. +// Compare-and-set: only sets the value if the current value does not equal matchValue. +// +// Returns "OK" on success. +// Returns nil if the operation was aborted due to condition not matching. +// Zero expiration means the key has no expiration time. +// +// NOTE SetIFNE is still experimental +// it's signature and behaviour may change +func (c cmdable) SetIFNE(ctx context.Context, key string, value interface{}, matchValue interface{}, expiration time.Duration) *StatusCmd { + args := []interface{}{"set", key, value} + + if expiration > 0 { + if usePrecise(expiration) { + args = append(args, "px", formatMs(ctx, expiration)) + } else { + args = append(args, "ex", formatSec(ctx, expiration)) + } + } else if expiration == KeepTTL { + args = append(args, "keepttl") + } + + args = append(args, "ifne", matchValue) + + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// SetIFNEGet Redis `SET key value [expiration] IFNE match-value GET` command. +// Compare-and-set with GET: only sets the value if the current value does not equal matchValue, +// and returns the previous value. +// +// Returns the previous value on success. +// Returns nil if the operation was aborted due to condition not matching. +// Zero expiration means the key has no expiration time. +// +// NOTE SetIFNEGet is still experimental +// it's signature and behaviour may change +func (c cmdable) SetIFNEGet(ctx context.Context, key string, value interface{}, matchValue interface{}, expiration time.Duration) *StringCmd { + args := []interface{}{"set", key, value} + + if expiration > 0 { + if usePrecise(expiration) { + args = append(args, "px", formatMs(ctx, expiration)) + } else { + args = append(args, "ex", formatSec(ctx, expiration)) + } + } else if expiration == KeepTTL { + args = append(args, "keepttl") + } + + args = append(args, "ifne", matchValue, "get") + + cmd := NewStringCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// SetIFDEQ sets the value only if the current value's digest equals matchDigest. +// +// This is a compare-and-set operation using xxh3 digest for optimistic locking. +// The matchDigest parameter is a uint64 xxh3 hash value. +// +// Returns "OK" on success. +// Returns redis.Nil if the digest doesn't match (value was modified). +// Zero expiration means the key has no expiration time. +// +// For examples of client-side digest generation and usage patterns, see: +// example/digest-optimistic-locking/ +// +// Redis 8.4+. See https://redis.io/commands/set/ +// +// NOTE SetIFNEQ is still experimental +// it's signature and behaviour may change +func (c cmdable) SetIFDEQ(ctx context.Context, key string, value interface{}, matchDigest uint64, expiration time.Duration) *StatusCmd { + args := []interface{}{"set", key, value} + + if expiration > 0 { + if usePrecise(expiration) { + args = append(args, "px", formatMs(ctx, expiration)) + } else { + args = append(args, "ex", formatSec(ctx, expiration)) + } + } else if expiration == KeepTTL { + args = append(args, "keepttl") + } + + args = append(args, "ifdeq", fmt.Sprintf("%016x", matchDigest)) + + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// SetIFDEQGet sets the value only if the current value's digest equals matchDigest, +// and returns the previous value. +// +// This is a compare-and-set operation using xxh3 digest for optimistic locking. +// The matchDigest parameter is a uint64 xxh3 hash value. +// +// Returns the previous value on success. +// Returns redis.Nil if the digest doesn't match (value was modified). +// Zero expiration means the key has no expiration time. +// +// For examples of client-side digest generation and usage patterns, see: +// example/digest-optimistic-locking/ +// +// Redis 8.4+. See https://redis.io/commands/set/ +// +// NOTE SetIFNEQGet is still experimental +// it's signature and behaviour may change +func (c cmdable) SetIFDEQGet(ctx context.Context, key string, value interface{}, matchDigest uint64, expiration time.Duration) *StringCmd { + args := []interface{}{"set", key, value} + + if expiration > 0 { + if usePrecise(expiration) { + args = append(args, "px", formatMs(ctx, expiration)) + } else { + args = append(args, "ex", formatSec(ctx, expiration)) + } + } else if expiration == KeepTTL { + args = append(args, "keepttl") + } + + args = append(args, "ifdeq", fmt.Sprintf("%016x", matchDigest), "get") + + cmd := NewStringCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// SetIFDNE sets the value only if the current value's digest does NOT equal matchDigest. +// +// This is a compare-and-set operation using xxh3 digest for optimistic locking. +// The matchDigest parameter is a uint64 xxh3 hash value. +// +// Returns "OK" on success (digest didn't match, value was set). +// Returns redis.Nil if the digest matches (value was not modified). +// Zero expiration means the key has no expiration time. +// +// For examples of client-side digest generation and usage patterns, see: +// example/digest-optimistic-locking/ +// +// Redis 8.4+. See https://redis.io/commands/set/ +// +// NOTE SetIFDNE is still experimental +// it's signature and behaviour may change +func (c cmdable) SetIFDNE(ctx context.Context, key string, value interface{}, matchDigest uint64, expiration time.Duration) *StatusCmd { + args := []interface{}{"set", key, value} + + if expiration > 0 { + if usePrecise(expiration) { + args = append(args, "px", formatMs(ctx, expiration)) + } else { + args = append(args, "ex", formatSec(ctx, expiration)) + } + } else if expiration == KeepTTL { + args = append(args, "keepttl") + } + + args = append(args, "ifdne", fmt.Sprintf("%016x", matchDigest)) + + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// SetIFDNEGet sets the value only if the current value's digest does NOT equal matchDigest, +// and returns the previous value. +// +// This is a compare-and-set operation using xxh3 digest for optimistic locking. +// The matchDigest parameter is a uint64 xxh3 hash value. +// +// Returns the previous value on success (digest didn't match, value was set). +// Returns redis.Nil if the digest matches (value was not modified). +// Zero expiration means the key has no expiration time. +// +// For examples of client-side digest generation and usage patterns, see: +// example/digest-optimistic-locking/ +// +// Redis 8.4+. See https://redis.io/commands/set/ +// +// NOTE SetIFDNEGet is still experimental +// it's signature and behaviour may change +func (c cmdable) SetIFDNEGet(ctx context.Context, key string, value interface{}, matchDigest uint64, expiration time.Duration) *StringCmd { + args := []interface{}{"set", key, value} + + if expiration > 0 { + if usePrecise(expiration) { + args = append(args, "px", formatMs(ctx, expiration)) + } else { + args = append(args, "ex", formatSec(ctx, expiration)) + } + } else if expiration == KeepTTL { + args = append(args, "keepttl") + } + + args = append(args, "ifdne", fmt.Sprintf("%016x", matchDigest), "get") + + cmd := NewStringCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + func (c cmdable) SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd { cmd := NewIntCmd(ctx, "setrange", key, offset, value) _ = c(ctx, cmd) diff --git a/vendor/github.com/redis/go-redis/v9/timeseries_commands.go b/vendor/github.com/redis/go-redis/v9/timeseries_commands.go index 6f1b2fa4..15d80168 100644 --- a/vendor/github.com/redis/go-redis/v9/timeseries_commands.go +++ b/vendor/github.com/redis/go-redis/v9/timeseries_commands.go @@ -2,9 +2,9 @@ package redis import ( "context" - "strconv" "github.com/redis/go-redis/v9/internal/proto" + "github.com/redis/go-redis/v9/internal/util" ) type TimeseriesCmdable interface { @@ -40,25 +40,32 @@ type TimeseriesCmdable interface { } type TSOptions struct { - Retention int - ChunkSize int - Encoding string - DuplicatePolicy string - Labels map[string]string + Retention int + ChunkSize int + Encoding string + DuplicatePolicy string + Labels map[string]string + IgnoreMaxTimeDiff int64 + IgnoreMaxValDiff float64 } type TSIncrDecrOptions struct { - Timestamp int64 - Retention int - ChunkSize int - Uncompressed bool - Labels map[string]string + Timestamp int64 + Retention int + ChunkSize int + Uncompressed bool + DuplicatePolicy string + Labels map[string]string + IgnoreMaxTimeDiff int64 + IgnoreMaxValDiff float64 } type TSAlterOptions struct { - Retention int - ChunkSize int - DuplicatePolicy string - Labels map[string]string + Retention int + ChunkSize int + DuplicatePolicy string + Labels map[string]string + IgnoreMaxTimeDiff int64 + IgnoreMaxValDiff float64 } type TSCreateRuleOptions struct { @@ -89,6 +96,8 @@ const ( VarP VarS Twa + CountNaN + CountAll ) func (a Aggregator) String() string { @@ -121,6 +130,10 @@ func (a Aggregator) String() string { return "VAR.S" case Twa: return "TWA" + case CountNaN: + return "COUNTNAN" + case CountAll: + return "COUNTALL" default: return "" } @@ -223,6 +236,9 @@ func (c cmdable) TSAddWithArgs(ctx context.Context, key string, timestamp interf args = append(args, label, value) } } + if options.IgnoreMaxTimeDiff != 0 || options.IgnoreMaxValDiff != 0 { + args = append(args, "IGNORE", options.IgnoreMaxTimeDiff, options.IgnoreMaxValDiff) + } } cmd := NewIntCmd(ctx, args...) _ = c(ctx, cmd) @@ -264,6 +280,9 @@ func (c cmdable) TSCreateWithArgs(ctx context.Context, key string, options *TSOp args = append(args, label, value) } } + if options.IgnoreMaxTimeDiff != 0 || options.IgnoreMaxValDiff != 0 { + args = append(args, "IGNORE", options.IgnoreMaxTimeDiff, options.IgnoreMaxValDiff) + } } cmd := NewStatusCmd(ctx, args...) _ = c(ctx, cmd) @@ -292,6 +311,9 @@ func (c cmdable) TSAlter(ctx context.Context, key string, options *TSAlterOption args = append(args, label, value) } } + if options.IgnoreMaxTimeDiff != 0 || options.IgnoreMaxValDiff != 0 { + args = append(args, "IGNORE", options.IgnoreMaxTimeDiff, options.IgnoreMaxValDiff) + } } cmd := NewStatusCmd(ctx, args...) _ = c(ctx, cmd) @@ -351,12 +373,18 @@ func (c cmdable) TSIncrByWithArgs(ctx context.Context, key string, timestamp flo if options.Uncompressed { args = append(args, "UNCOMPRESSED") } + if options.DuplicatePolicy != "" { + args = append(args, "DUPLICATE_POLICY", options.DuplicatePolicy) + } if options.Labels != nil { args = append(args, "LABELS") for label, value := range options.Labels { args = append(args, label, value) } } + if options.IgnoreMaxTimeDiff != 0 || options.IgnoreMaxValDiff != 0 { + args = append(args, "IGNORE", options.IgnoreMaxTimeDiff, options.IgnoreMaxValDiff) + } } cmd := NewIntCmd(ctx, args...) _ = c(ctx, cmd) @@ -391,12 +419,18 @@ func (c cmdable) TSDecrByWithArgs(ctx context.Context, key string, timestamp flo if options.Uncompressed { args = append(args, "UNCOMPRESSED") } + if options.DuplicatePolicy != "" { + args = append(args, "DUPLICATE_POLICY", options.DuplicatePolicy) + } if options.Labels != nil { args = append(args, "LABELS") for label, value := range options.Labels { args = append(args, label, value) } } + if options.IgnoreMaxTimeDiff != 0 || options.IgnoreMaxValDiff != 0 { + args = append(args, "IGNORE", options.IgnoreMaxTimeDiff, options.IgnoreMaxValDiff) + } } cmd := NewIntCmd(ctx, args...) _ = c(ctx, cmd) @@ -458,8 +492,9 @@ type TSTimestampValueCmd struct { func newTSTimestampValueCmd(ctx context.Context, args ...interface{}) *TSTimestampValueCmd { return &TSTimestampValueCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeTSTimestampValue, }, } } @@ -496,7 +531,7 @@ func (cmd *TSTimestampValueCmd) readReply(rd *proto.Reader) (err error) { return err } cmd.val.Timestamp = timestamp - cmd.val.Value, err = strconv.ParseFloat(value, 64) + cmd.val.Value, err = util.ParseStringToFloat(value) if err != nil { return err } @@ -505,6 +540,13 @@ func (cmd *TSTimestampValueCmd) readReply(rd *proto.Reader) (err error) { return nil } +func (cmd *TSTimestampValueCmd) Clone() Cmder { + return &TSTimestampValueCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: cmd.val, // TSTimestampValue is a simple struct, can be copied directly + } +} + // TSInfo - Returns information about a time-series key. // For more information - https://redis.io/commands/ts.info/ func (c cmdable) TSInfo(ctx context.Context, key string) *MapStringInterfaceCmd { @@ -676,8 +718,9 @@ type TSTimestampValueSliceCmd struct { func newTSTimestampValueSliceCmd(ctx context.Context, args ...interface{}) *TSTimestampValueSliceCmd { return &TSTimestampValueSliceCmd{ baseCmd: baseCmd{ - ctx: ctx, - args: args, + ctx: ctx, + args: args, + cmdType: CmdTypeTSTimestampValueSlice, }, } } @@ -715,7 +758,7 @@ func (cmd *TSTimestampValueSliceCmd) readReply(rd *proto.Reader) (err error) { return err } cmd.val[i].Timestamp = timestamp - cmd.val[i].Value, err = strconv.ParseFloat(value, 64) + cmd.val[i].Value, err = util.ParseStringToFloat(value) if err != nil { return err } @@ -724,6 +767,18 @@ func (cmd *TSTimestampValueSliceCmd) readReply(rd *proto.Reader) (err error) { return nil } +func (cmd *TSTimestampValueSliceCmd) Clone() Cmder { + var val []TSTimestampValue + if cmd.val != nil { + val = make([]TSTimestampValue, len(cmd.val)) + copy(val, cmd.val) + } + return &TSTimestampValueSliceCmd{ + baseCmd: cmd.cloneBaseCmd(), + val: val, + } +} + // TSMRange - Returns a range of samples from multiple time-series keys. // For more information - https://redis.io/commands/ts.mrange/ func (c cmdable) TSMRange(ctx context.Context, fromTimestamp int, toTimestamp int, filterExpr []string) *MapStringSliceInterfaceCmd { diff --git a/vendor/github.com/redis/go-redis/v9/tx.go b/vendor/github.com/redis/go-redis/v9/tx.go index 039eaf35..40bc1d66 100644 --- a/vendor/github.com/redis/go-redis/v9/tx.go +++ b/vendor/github.com/redis/go-redis/v9/tx.go @@ -19,16 +19,16 @@ type Tx struct { baseClient cmdable statefulCmdable - hooksMixin } func (c *Client) newTx() *Tx { tx := Tx{ baseClient: baseClient{ - opt: c.opt, - connPool: pool.NewStickyConnPool(c.connPool), + opt: c.opt.clone(), // Clone options to avoid sharing mutable state between transaction and parent client + connPool: pool.NewStickyConnPool(c.connPool), + hooksMixin: c.hooksMixin.clone(), + pushProcessor: c.pushProcessor, // Copy push processor from parent client }, - hooksMixin: c.hooksMixin.clone(), } tx.init() return &tx diff --git a/vendor/github.com/redis/go-redis/v9/universal.go b/vendor/github.com/redis/go-redis/v9/universal.go index 275bef3d..2531cb59 100644 --- a/vendor/github.com/redis/go-redis/v9/universal.go +++ b/vendor/github.com/redis/go-redis/v9/universal.go @@ -5,6 +5,10 @@ import ( "crypto/tls" "net" "time" + + "github.com/redis/go-redis/v9/auth" + "github.com/redis/go-redis/v9/maintnotifications" + "github.com/redis/go-redis/v9/push" ) // UniversalOptions information is required by UniversalClient to establish @@ -26,9 +30,27 @@ type UniversalOptions struct { Dialer func(ctx context.Context, network, addr string) (net.Conn, error) OnConnect func(ctx context.Context, cn *Conn) error - Protocol int - Username string - Password string + Protocol int + Username string + Password string + // CredentialsProvider allows the username and password to be updated + // before reconnecting. It should return the current username and password. + CredentialsProvider func() (username string, password string) + + // CredentialsProviderContext is an enhanced parameter of CredentialsProvider, + // done to maintain API compatibility. In the future, + // there might be a merge between CredentialsProviderContext and CredentialsProvider. + // There will be a conflict between them; if CredentialsProviderContext exists, we will ignore CredentialsProvider. + CredentialsProviderContext func(ctx context.Context) (username string, password string, err error) + + // StreamingCredentialsProvider is used to retrieve the credentials + // for the connection from an external source. Those credentials may change + // during the connection lifetime. This is useful for managed identity + // scenarios where the credentials are retrieved from an external source. + // + // Currently, this is a placeholder for the future implementation. + StreamingCredentialsProvider auth.StreamingCredentialsProvider + SentinelUsername string SentinelPassword string @@ -36,21 +58,52 @@ type UniversalOptions struct { MinRetryBackoff time.Duration MaxRetryBackoff time.Duration - DialTimeout time.Duration + DialTimeout time.Duration + + // DialerRetries is the maximum number of retry attempts when dialing fails. + // + // default: 5 + DialerRetries int + + // DialerRetryTimeout is the backoff duration between retry attempts. + // + // default: 100 milliseconds + DialerRetryTimeout time.Duration + ReadTimeout time.Duration WriteTimeout time.Duration ContextTimeoutEnabled bool + // ReadBufferSize is the size of the bufio.Reader buffer for each connection. + // Larger buffers can improve performance for commands that return large responses. + // Smaller buffers can improve memory usage for larger pools. + // + // default: 32KiB (32768 bytes) + ReadBufferSize int + + // WriteBufferSize is the size of the bufio.Writer buffer for each connection. + // Larger buffers can improve performance for large pipelines and commands with many arguments. + // Smaller buffers can improve memory usage for larger pools. + // + // default: 32KiB (32768 bytes) + WriteBufferSize int + // PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO). PoolFIFO bool - PoolSize int - PoolTimeout time.Duration - MinIdleConns int - MaxIdleConns int - MaxActiveConns int - ConnMaxIdleTime time.Duration - ConnMaxLifetime time.Duration + PoolSize int + + // MaxConcurrentDials is the maximum number of concurrent connection creation goroutines. + // If <= 0, defaults to PoolSize. If > PoolSize, it will be capped at PoolSize. + MaxConcurrentDials int + + PoolTimeout time.Duration + MinIdleConns int + MaxIdleConns int + MaxActiveConns int + ConnMaxIdleTime time.Duration + ConnMaxLifetime time.Duration + ConnMaxLifetimeJitter time.Duration TLSConfig *tls.Config @@ -61,13 +114,40 @@ type UniversalOptions struct { RouteByLatency bool RouteRandomly bool - // The sentinel master name. - // Only failover clients. - + // MasterName is the sentinel master name. + // Only for failover clients. MasterName string + // DisableIndentity - Disable set-lib on connect. + // + // default: false + // + // Deprecated: Use DisableIdentity instead. DisableIndentity bool - IdentitySuffix string + + // DisableIdentity is used to disable CLIENT SETINFO command on connect. + // + // default: false + DisableIdentity bool + + IdentitySuffix string + + // FailingTimeoutSeconds is the timeout in seconds for marking a cluster node as failing. + // When a node is marked as failing, it will be avoided for this duration. + // Only applies to cluster clients. Default is 15 seconds. + FailingTimeoutSeconds int + + UnstableResp3 bool + + // PushNotificationProcessor is the processor for handling push notifications. + // If nil, a default processor will be created for RESP3 connections. + PushNotificationProcessor push.NotificationProcessor + + // IsClusterMode can be used when only one Addrs is provided (e.g. Elasticache supports setting up cluster mode with configuration endpoint). + IsClusterMode bool + + // MaintNotificationsConfig provides configuration for maintnotifications upgrades. + MaintNotificationsConfig *maintnotifications.Config } // Cluster returns cluster options created from the universal options. @@ -82,9 +162,12 @@ func (o *UniversalOptions) Cluster() *ClusterOptions { Dialer: o.Dialer, OnConnect: o.OnConnect, - Protocol: o.Protocol, - Username: o.Username, - Password: o.Password, + Protocol: o.Protocol, + Username: o.Username, + Password: o.Password, + CredentialsProvider: o.CredentialsProvider, + CredentialsProviderContext: o.CredentialsProviderContext, + StreamingCredentialsProvider: o.StreamingCredentialsProvider, MaxRedirects: o.MaxRedirects, ReadOnly: o.ReadOnly, @@ -95,25 +178,37 @@ func (o *UniversalOptions) Cluster() *ClusterOptions { MinRetryBackoff: o.MinRetryBackoff, MaxRetryBackoff: o.MaxRetryBackoff, - DialTimeout: o.DialTimeout, - ReadTimeout: o.ReadTimeout, - WriteTimeout: o.WriteTimeout, + DialTimeout: o.DialTimeout, + DialerRetries: o.DialerRetries, + DialerRetryTimeout: o.DialerRetryTimeout, + ReadTimeout: o.ReadTimeout, + WriteTimeout: o.WriteTimeout, + ContextTimeoutEnabled: o.ContextTimeoutEnabled, - PoolFIFO: o.PoolFIFO, + ReadBufferSize: o.ReadBufferSize, + WriteBufferSize: o.WriteBufferSize, - PoolSize: o.PoolSize, - PoolTimeout: o.PoolTimeout, - MinIdleConns: o.MinIdleConns, - MaxIdleConns: o.MaxIdleConns, - MaxActiveConns: o.MaxActiveConns, - ConnMaxIdleTime: o.ConnMaxIdleTime, - ConnMaxLifetime: o.ConnMaxLifetime, + PoolFIFO: o.PoolFIFO, + PoolSize: o.PoolSize, + MaxConcurrentDials: o.MaxConcurrentDials, + PoolTimeout: o.PoolTimeout, + MinIdleConns: o.MinIdleConns, + MaxIdleConns: o.MaxIdleConns, + MaxActiveConns: o.MaxActiveConns, + ConnMaxIdleTime: o.ConnMaxIdleTime, + ConnMaxLifetime: o.ConnMaxLifetime, + ConnMaxLifetimeJitter: o.ConnMaxLifetimeJitter, TLSConfig: o.TLSConfig, - DisableIndentity: o.DisableIndentity, - IdentitySuffix: o.IdentitySuffix, + DisableIdentity: o.DisableIdentity, + DisableIndentity: o.DisableIndentity, + IdentitySuffix: o.IdentitySuffix, + FailingTimeoutSeconds: o.FailingTimeoutSeconds, + UnstableResp3: o.UnstableResp3, + PushNotificationProcessor: o.PushNotificationProcessor, + MaintNotificationsConfig: o.MaintNotificationsConfig, } } @@ -131,35 +226,56 @@ func (o *UniversalOptions) Failover() *FailoverOptions { Dialer: o.Dialer, OnConnect: o.OnConnect, - DB: o.DB, - Protocol: o.Protocol, - Username: o.Username, - Password: o.Password, + DB: o.DB, + Protocol: o.Protocol, + Username: o.Username, + Password: o.Password, + CredentialsProvider: o.CredentialsProvider, + CredentialsProviderContext: o.CredentialsProviderContext, + StreamingCredentialsProvider: o.StreamingCredentialsProvider, + SentinelUsername: o.SentinelUsername, SentinelPassword: o.SentinelPassword, + RouteByLatency: o.RouteByLatency, + RouteRandomly: o.RouteRandomly, + MaxRetries: o.MaxRetries, MinRetryBackoff: o.MinRetryBackoff, MaxRetryBackoff: o.MaxRetryBackoff, - DialTimeout: o.DialTimeout, - ReadTimeout: o.ReadTimeout, - WriteTimeout: o.WriteTimeout, + DialTimeout: o.DialTimeout, + DialerRetries: o.DialerRetries, + DialerRetryTimeout: o.DialerRetryTimeout, + ReadTimeout: o.ReadTimeout, + WriteTimeout: o.WriteTimeout, + ContextTimeoutEnabled: o.ContextTimeoutEnabled, - PoolFIFO: o.PoolFIFO, - PoolSize: o.PoolSize, - PoolTimeout: o.PoolTimeout, - MinIdleConns: o.MinIdleConns, - MaxIdleConns: o.MaxIdleConns, - MaxActiveConns: o.MaxActiveConns, - ConnMaxIdleTime: o.ConnMaxIdleTime, - ConnMaxLifetime: o.ConnMaxLifetime, + ReadBufferSize: o.ReadBufferSize, + WriteBufferSize: o.WriteBufferSize, + + PoolFIFO: o.PoolFIFO, + PoolSize: o.PoolSize, + MaxConcurrentDials: o.MaxConcurrentDials, + PoolTimeout: o.PoolTimeout, + MinIdleConns: o.MinIdleConns, + MaxIdleConns: o.MaxIdleConns, + MaxActiveConns: o.MaxActiveConns, + ConnMaxIdleTime: o.ConnMaxIdleTime, + ConnMaxLifetime: o.ConnMaxLifetime, + ConnMaxLifetimeJitter: o.ConnMaxLifetimeJitter, TLSConfig: o.TLSConfig, - DisableIndentity: o.DisableIndentity, - IdentitySuffix: o.IdentitySuffix, + ReplicaOnly: o.ReadOnly, + + DisableIdentity: o.DisableIdentity, + DisableIndentity: o.DisableIndentity, + IdentitySuffix: o.IdentitySuffix, + UnstableResp3: o.UnstableResp3, + PushNotificationProcessor: o.PushNotificationProcessor, + // Note: MaintNotificationsConfig not supported for FailoverOptions } } @@ -176,33 +292,48 @@ func (o *UniversalOptions) Simple() *Options { Dialer: o.Dialer, OnConnect: o.OnConnect, - DB: o.DB, - Protocol: o.Protocol, - Username: o.Username, - Password: o.Password, + DB: o.DB, + Protocol: o.Protocol, + Username: o.Username, + Password: o.Password, + CredentialsProvider: o.CredentialsProvider, + CredentialsProviderContext: o.CredentialsProviderContext, + StreamingCredentialsProvider: o.StreamingCredentialsProvider, MaxRetries: o.MaxRetries, MinRetryBackoff: o.MinRetryBackoff, MaxRetryBackoff: o.MaxRetryBackoff, - DialTimeout: o.DialTimeout, - ReadTimeout: o.ReadTimeout, - WriteTimeout: o.WriteTimeout, + DialTimeout: o.DialTimeout, + DialerRetries: o.DialerRetries, + DialerRetryTimeout: o.DialerRetryTimeout, + ReadTimeout: o.ReadTimeout, + WriteTimeout: o.WriteTimeout, + ContextTimeoutEnabled: o.ContextTimeoutEnabled, - PoolFIFO: o.PoolFIFO, - PoolSize: o.PoolSize, - PoolTimeout: o.PoolTimeout, - MinIdleConns: o.MinIdleConns, - MaxIdleConns: o.MaxIdleConns, - MaxActiveConns: o.MaxActiveConns, - ConnMaxIdleTime: o.ConnMaxIdleTime, - ConnMaxLifetime: o.ConnMaxLifetime, + ReadBufferSize: o.ReadBufferSize, + WriteBufferSize: o.WriteBufferSize, + + PoolFIFO: o.PoolFIFO, + PoolSize: o.PoolSize, + MaxConcurrentDials: o.MaxConcurrentDials, + PoolTimeout: o.PoolTimeout, + MinIdleConns: o.MinIdleConns, + MaxIdleConns: o.MaxIdleConns, + MaxActiveConns: o.MaxActiveConns, + ConnMaxIdleTime: o.ConnMaxIdleTime, + ConnMaxLifetime: o.ConnMaxLifetime, + ConnMaxLifetimeJitter: o.ConnMaxLifetimeJitter, TLSConfig: o.TLSConfig, - DisableIndentity: o.DisableIndentity, - IdentitySuffix: o.IdentitySuffix, + DisableIdentity: o.DisableIdentity, + DisableIndentity: o.DisableIndentity, + IdentitySuffix: o.IdentitySuffix, + UnstableResp3: o.UnstableResp3, + PushNotificationProcessor: o.PushNotificationProcessor, + MaintNotificationsConfig: o.MaintNotificationsConfig, } } @@ -234,14 +365,26 @@ var ( // NewUniversalClient returns a new multi client. The type of the returned client depends // on the following conditions: // -// 1. If the MasterName option is specified, a sentinel-backed FailoverClient is returned. -// 2. if the number of Addrs is two or more, a ClusterClient is returned. -// 3. Otherwise, a single-node Client is returned. +// 1. If the MasterName option is specified with RouteByLatency, RouteRandomly or IsClusterMode, +// a FailoverClusterClient is returned. +// 2. If the MasterName option is specified without RouteByLatency, RouteRandomly or IsClusterMode, +// a sentinel-backed FailoverClient is returned. +// 3. If the number of Addrs is two or more, or IsClusterMode option is specified, +// a ClusterClient is returned. +// 4. Otherwise, a single-node Client is returned. func NewUniversalClient(opts *UniversalOptions) UniversalClient { - if opts.MasterName != "" { - return NewFailoverClient(opts.Failover()) - } else if len(opts.Addrs) > 1 { - return NewClusterClient(opts.Cluster()) + if opts == nil { + panic("redis: NewUniversalClient nil options") + } + + switch { + case opts.MasterName != "" && (opts.RouteByLatency || opts.RouteRandomly || opts.IsClusterMode): + return NewFailoverClusterClient(opts.Failover()) + case opts.MasterName != "": + return NewFailoverClient(opts.Failover()) + case len(opts.Addrs) > 1 || opts.IsClusterMode: + return NewClusterClient(opts.Cluster()) + default: + return NewClient(opts.Simple()) } - return NewClient(opts.Simple()) } diff --git a/vendor/github.com/redis/go-redis/v9/vectorset_commands.go b/vendor/github.com/redis/go-redis/v9/vectorset_commands.go new file mode 100644 index 00000000..8f99de07 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/vectorset_commands.go @@ -0,0 +1,358 @@ +package redis + +import ( + "context" + "encoding/json" + "strconv" +) + +// note: the APIs is experimental and may be subject to change. +type VectorSetCmdable interface { + VAdd(ctx context.Context, key, element string, val Vector) *BoolCmd + VAddWithArgs(ctx context.Context, key, element string, val Vector, addArgs *VAddArgs) *BoolCmd + VCard(ctx context.Context, key string) *IntCmd + VDim(ctx context.Context, key string) *IntCmd + VEmb(ctx context.Context, key, element string, raw bool) *SliceCmd + VGetAttr(ctx context.Context, key, element string) *StringCmd + VInfo(ctx context.Context, key string) *MapStringInterfaceCmd + VLinks(ctx context.Context, key, element string) *StringSliceCmd + VLinksWithScores(ctx context.Context, key, element string) *VectorScoreSliceCmd + VRandMember(ctx context.Context, key string) *StringCmd + VRandMemberCount(ctx context.Context, key string, count int) *StringSliceCmd + VRem(ctx context.Context, key, element string) *BoolCmd + VSetAttr(ctx context.Context, key, element string, attr interface{}) *BoolCmd + VClearAttributes(ctx context.Context, key, element string) *BoolCmd + VSim(ctx context.Context, key string, val Vector) *StringSliceCmd + VSimWithScores(ctx context.Context, key string, val Vector) *VectorScoreSliceCmd + VSimWithArgs(ctx context.Context, key string, val Vector, args *VSimArgs) *StringSliceCmd + VSimWithArgsWithScores(ctx context.Context, key string, val Vector, args *VSimArgs) *VectorScoreSliceCmd + VRange(ctx context.Context, key, start, end string, count int64) *StringSliceCmd +} + +type Vector interface { + Value() []any +} + +const ( + vectorFormatFP32 string = "FP32" + vectorFormatValues string = "Values" +) + +type VectorFP32 struct { + Val []byte +} + +func (v *VectorFP32) Value() []any { + return []any{vectorFormatFP32, v.Val} +} + +var _ Vector = (*VectorFP32)(nil) + +type VectorValues struct { + Val []float64 +} + +func (v *VectorValues) Value() []any { + res := make([]any, 2+len(v.Val)) + res[0] = vectorFormatValues + res[1] = len(v.Val) + for i, v := range v.Val { + res[2+i] = v + } + return res +} + +var _ Vector = (*VectorValues)(nil) + +type VectorRef struct { + Name string // the name of the referent vector +} + +func (v *VectorRef) Value() []any { + return []any{"ele", v.Name} +} + +var _ Vector = (*VectorRef)(nil) + +type VectorScore struct { + Name string + Score float64 +} + +// `VADD key (FP32 | VALUES num) vector element` +// note: the API is experimental and may be subject to change. +func (c cmdable) VAdd(ctx context.Context, key, element string, val Vector) *BoolCmd { + return c.VAddWithArgs(ctx, key, element, val, &VAddArgs{}) +} + +type VAddArgs struct { + // the REDUCE option must be passed immediately after the key + Reduce int64 + Cas bool + + // The NoQuant, Q8 and Bin options are mutually exclusive. + NoQuant bool + Q8 bool + Bin bool + + EF int64 + SetAttr string + M int64 +} + +func (v VAddArgs) reduce() int64 { + return v.Reduce +} + +func (v VAddArgs) appendArgs(args []any) []any { + if v.Cas { + args = append(args, "cas") + } + + if v.NoQuant { + args = append(args, "noquant") + } else if v.Q8 { + args = append(args, "q8") + } else if v.Bin { + args = append(args, "bin") + } + + if v.EF > 0 { + args = append(args, "ef", strconv.FormatInt(v.EF, 10)) + } + if len(v.SetAttr) > 0 { + args = append(args, "setattr", v.SetAttr) + } + if v.M > 0 { + args = append(args, "m", strconv.FormatInt(v.M, 10)) + } + return args +} + +// `VADD key [REDUCE dim] (FP32 | VALUES num) vector element [CAS] [NOQUANT | Q8 | BIN] [EF build-exploration-factor] [SETATTR attributes] [M numlinks]` +// note: the API is experimental and may be subject to change. +func (c cmdable) VAddWithArgs(ctx context.Context, key, element string, val Vector, addArgs *VAddArgs) *BoolCmd { + if addArgs == nil { + addArgs = &VAddArgs{} + } + args := []any{"vadd", key} + if addArgs.reduce() > 0 { + args = append(args, "reduce", addArgs.reduce()) + } + args = append(args, val.Value()...) + args = append(args, element) + args = addArgs.appendArgs(args) + cmd := NewBoolCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// `VCARD key` +// note: the API is experimental and may be subject to change. +func (c cmdable) VCard(ctx context.Context, key string) *IntCmd { + cmd := NewIntCmd(ctx, "vcard", key) + _ = c(ctx, cmd) + return cmd +} + +// `VDIM key` +// note: the API is experimental and may be subject to change. +func (c cmdable) VDim(ctx context.Context, key string) *IntCmd { + cmd := NewIntCmd(ctx, "vdim", key) + _ = c(ctx, cmd) + return cmd +} + +// `VEMB key element [RAW]` +// note: the API is experimental and may be subject to change. +func (c cmdable) VEmb(ctx context.Context, key, element string, raw bool) *SliceCmd { + args := []any{"vemb", key, element} + if raw { + args = append(args, "raw") + } + cmd := NewSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// `VGETATTR key element` +// note: the API is experimental and may be subject to change. +func (c cmdable) VGetAttr(ctx context.Context, key, element string) *StringCmd { + cmd := NewStringCmd(ctx, "vgetattr", key, element) + _ = c(ctx, cmd) + return cmd +} + +// `VINFO key` +// note: the API is experimental and may be subject to change. +func (c cmdable) VInfo(ctx context.Context, key string) *MapStringInterfaceCmd { + cmd := NewMapStringInterfaceCmd(ctx, "vinfo", key) + _ = c(ctx, cmd) + return cmd +} + +// `VLINKS key element` +// note: the API is experimental and may be subject to change. +func (c cmdable) VLinks(ctx context.Context, key, element string) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "vlinks", key, element) + _ = c(ctx, cmd) + return cmd +} + +// `VLINKS key element WITHSCORES` +// note: the API is experimental and may be subject to change. +func (c cmdable) VLinksWithScores(ctx context.Context, key, element string) *VectorScoreSliceCmd { + cmd := NewVectorInfoSliceCmd(ctx, "vlinks", key, element, "withscores") + _ = c(ctx, cmd) + return cmd +} + +// `VRANDMEMBER key` +// note: the API is experimental and may be subject to change. +func (c cmdable) VRandMember(ctx context.Context, key string) *StringCmd { + cmd := NewStringCmd(ctx, "vrandmember", key) + _ = c(ctx, cmd) + return cmd +} + +// `VRANDMEMBER key [count]` +// note: the API is experimental and may be subject to change. +func (c cmdable) VRandMemberCount(ctx context.Context, key string, count int) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "vrandmember", key, count) + _ = c(ctx, cmd) + return cmd +} + +// `VREM key element` +// note: the API is experimental and may be subject to change. +func (c cmdable) VRem(ctx context.Context, key, element string) *BoolCmd { + cmd := NewBoolCmd(ctx, "vrem", key, element) + _ = c(ctx, cmd) + return cmd +} + +// `VSETATTR key element "{ JSON obj }"` +// The `attr` must be something that can be marshaled to JSON (using encoding/JSON) unless +// the argument is a string or []byte when we assume that it can be passed directly as JSON. +// +// note: the API is experimental and may be subject to change. +func (c cmdable) VSetAttr(ctx context.Context, key, element string, attr interface{}) *BoolCmd { + var attrStr string + var err error + switch v := attr.(type) { + case string: + attrStr = v + case []byte: + attrStr = string(v) + default: + var bytes []byte + bytes, err = json.Marshal(v) + if err != nil { + // If marshalling fails, create the command and set the error; this command won't be executed. + cmd := NewBoolCmd(ctx, "vsetattr", key, element, "") + cmd.SetErr(err) + return cmd + } + attrStr = string(bytes) + } + cmd := NewBoolCmd(ctx, "vsetattr", key, element, attrStr) + _ = c(ctx, cmd) + return cmd +} + +// `VClearAttributes` clear attributes on a vector set element. +// The implementation of `VClearAttributes` is execute command `VSETATTR key element ""`. +// note: the API is experimental and may be subject to change. +func (c cmdable) VClearAttributes(ctx context.Context, key, element string) *BoolCmd { + cmd := NewBoolCmd(ctx, "vsetattr", key, element, "") + _ = c(ctx, cmd) + return cmd +} + +// `VSIM key (ELE | FP32 | VALUES num) (vector | element)` +// note: the API is experimental and may be subject to change. +func (c cmdable) VSim(ctx context.Context, key string, val Vector) *StringSliceCmd { + return c.VSimWithArgs(ctx, key, val, &VSimArgs{}) +} + +// `VSIM key (ELE | FP32 | VALUES num) (vector | element) WITHSCORES` +// note: the API is experimental and may be subject to change. +func (c cmdable) VSimWithScores(ctx context.Context, key string, val Vector) *VectorScoreSliceCmd { + return c.VSimWithArgsWithScores(ctx, key, val, &VSimArgs{}) +} + +type VSimArgs struct { + Count int64 + EF int64 + Filter string + FilterEF int64 + Truth bool + NoThread bool + Epsilon float64 +} + +func (v VSimArgs) appendArgs(args []any) []any { + if v.Count > 0 { + args = append(args, "count", v.Count) + } + if v.EF > 0 { + args = append(args, "ef", v.EF) + } + if len(v.Filter) > 0 { + args = append(args, "filter", v.Filter) + } + if v.FilterEF > 0 { + args = append(args, "filter-ef", v.FilterEF) + } + if v.Truth { + args = append(args, "truth") + } + if v.NoThread { + args = append(args, "nothread") + } + if v.Epsilon > 0 { + args = append(args, "Epsilon", v.Epsilon) + } + return args +} + +// `VSIM key (ELE | FP32 | VALUES num) (vector | element) [COUNT num] [EPSILON delta] +// [EF search-exploration-factor] [FILTER expression] [FILTER-EF max-filtering-effort] [TRUTH] [NOTHREAD]` +// note: the API is experimental and may be subject to change. +func (c cmdable) VSimWithArgs(ctx context.Context, key string, val Vector, simArgs *VSimArgs) *StringSliceCmd { + if simArgs == nil { + simArgs = &VSimArgs{} + } + args := []any{"vsim", key} + args = append(args, val.Value()...) + args = simArgs.appendArgs(args) + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// `VSIM key (ELE | FP32 | VALUES num) (vector | element) [WITHSCORES] [COUNT num] [EPSILON delta] +// [EF search-exploration-factor] [FILTER expression] [FILTER-EF max-filtering-effort] [TRUTH] [NOTHREAD]` +// note: the API is experimental and may be subject to change. +func (c cmdable) VSimWithArgsWithScores(ctx context.Context, key string, val Vector, simArgs *VSimArgs) *VectorScoreSliceCmd { + if simArgs == nil { + simArgs = &VSimArgs{} + } + args := []any{"vsim", key} + args = append(args, val.Value()...) + args = append(args, "withscores") + args = simArgs.appendArgs(args) + cmd := NewVectorInfoSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// `VRANGE key start end count` +// a negative count means to return all the elements in the vector set. +// note: the API is experimental and may be subject to change. +func (c cmdable) VRange(ctx context.Context, key, start, end string, count int64) *StringSliceCmd { + args := []any{"vrange", key, start, end, count} + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} diff --git a/vendor/github.com/redis/go-redis/v9/version.go b/vendor/github.com/redis/go-redis/v9/version.go index 92f49820..49f001e5 100644 --- a/vendor/github.com/redis/go-redis/v9/version.go +++ b/vendor/github.com/redis/go-redis/v9/version.go @@ -2,5 +2,5 @@ package redis // Version is the current release version. func Version() string { - return "9.4.0" + return "9.18.0" } diff --git a/vendor/github.com/rubenv/sql-migrate/.golangci.yaml b/vendor/github.com/rubenv/sql-migrate/.golangci.yaml index 40d1720e..f5818416 100644 --- a/vendor/github.com/rubenv/sql-migrate/.golangci.yaml +++ b/vendor/github.com/rubenv/sql-migrate/.golangci.yaml @@ -1,98 +1,133 @@ -linters-settings: - gocritic: - disabled-checks: - - ifElseChain - goimports: - local-prefixes: github.com/rubenv/sql-migrate - govet: - enable-all: true - disable: - - fieldalignment - depguard: - list-type: blacklist - include-go-root: true - include-go-std-lib: true - exhaustive: - default-signifies-exhaustive: true - nolintlint: - allow-unused: false - allow-leading-space: false - allow-no-explanation: - - depguard - require-explanation: true - require-specific: true - revive: - enable-all-rules: false - rules: - - name: atomic - - name: blank-imports - - name: bool-literal-in-expr - - name: call-to-gc - - name: constant-logical-expr - - name: context-as-argument - - name: context-keys-type - - name: dot-imports - - name: duplicated-imports - - name: empty-block - - name: empty-lines - - name: error-naming - - name: error-return - - name: error-strings - - name: errorf - - name: exported - - name: identical-branches - - name: imports-blacklist - - name: increment-decrement - - name: indent-error-flow - - name: modifies-parameter - - name: modifies-value-receiver - - name: package-comments - - name: range - - name: range-val-address - - name: range-val-in-closure - - name: receiver-naming - - name: string-format - - name: string-of-int - - name: struct-tag - - name: time-naming - - name: unconditional-recursion - - name: unexported-naming - - name: unexported-return - - name: superfluous-else - - name: unreachable-code - - name: var-declaration - - name: waitgroup-by-value - - name: unused-receiver - - name: unnecessary-stmt - - name: unused-parameter +version: "2" run: tests: true - timeout: 1m linters: - disable-all: true + default: none enable: - asciicheck - depguard - errcheck + - errorlint - exhaustive - gocritic - - gofmt - - gofumpt - - goimports - govet - ineffassign - nolintlint - revive - staticcheck - - typecheck + - unparam - unused - whitespace - - errorlint - - gosimple - - unparam + settings: + depguard: + rules: + main: + allow: + - $gostd + - github.com/denisenkom/go-mssqldb + - github.com/go-sql-driver/mysql + - github.com/go-gorp/gorp/v3 + - github.com/lib/pq + - github.com/mattn/go-sqlite3 + - github.com/mitchellh/cli + - github.com/olekukonko/tablewriter + - github.com/rubenv/sql-migrate + - gopkg.in/check.v1 + - gopkg.in/yaml.v2 + exhaustive: + default-signifies-exhaustive: true + gocritic: + disabled-checks: + - ifElseChain + govet: + disable: + - fieldalignment + enable-all: true + nolintlint: + require-explanation: true + require-specific: true + allow-no-explanation: + - depguard + allow-unused: false + revive: + enable-all-rules: false + rules: + - name: atomic + - name: blank-imports + - name: bool-literal-in-expr + - name: call-to-gc + - name: constant-logical-expr + - name: context-as-argument + - name: context-keys-type + - name: dot-imports + - name: duplicated-imports + - name: empty-block + - name: empty-lines + - name: error-naming + - name: error-return + - name: error-strings + - name: errorf + - name: exported + - name: identical-branches + - name: imports-blocklist + - name: increment-decrement + - name: indent-error-flow + - name: modifies-parameter + - name: modifies-value-receiver + - name: package-comments + - name: range + - name: range-val-address + - name: range-val-in-closure + - name: receiver-naming + - name: string-format + - name: string-of-int + - name: struct-tag + - name: time-naming + - name: unconditional-recursion + - name: unexported-naming + - name: unexported-return + - name: superfluous-else + - name: unreachable-code + - name: var-declaration + - name: waitgroup-by-value + - name: unused-receiver + - name: unnecessary-stmt + - name: unused-parameter + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + rules: + - path: (.+)\.go$ + text: declaration of "err" shadows declaration at + - path: (.+)\.go$ + text: 'error-strings: error strings should not be capitalized or end with punctuation or a newline' + - path: (.+)\.go$ + text: 'ST1005: error strings should not end with punctuation or newline' + - path: (.+)\.go$ + text: 'ST1005: error strings should not be capitalized' + paths: + - third_party$ + - builtin$ + - examples$ issues: - exclude: - - 'declaration of "err" shadows declaration at' # Allow shadowing of `err` because it's so common - - 'error-strings: error strings should not be capitalized or end with punctuation or a newline' - max-same-issues: 10000 max-issues-per-linter: 10000 + max-same-issues: 10000 +formatters: + enable: + - gofmt + - gofumpt + - goimports + settings: + goimports: + local-prefixes: + - github.com/rubenv/sql-migrate + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/vendor/github.com/rubenv/sql-migrate/migrate.go b/vendor/github.com/rubenv/sql-migrate/migrate.go index 7fb56f1a..d08e22b4 100644 --- a/vendor/github.com/rubenv/sql-migrate/migrate.go +++ b/vendor/github.com/rubenv/sql-migrate/migrate.go @@ -640,7 +640,7 @@ func (ms MigrationSet) planMigrationCommon(db *sql.DB, dialect string, m Migrati } // Sort migrations that have been run by Id. - var existingMigrations []*Migration + existingMigrations := make([]*Migration, 0, len(migrationRecords)) for _, migrationRecord := range migrationRecords { existingMigrations = append(existingMigrations, &Migration{ Id: migrationRecord.Id, @@ -700,13 +700,14 @@ func (ms MigrationSet) planMigrationCommon(db *sql.DB, dialect string, m Migrati toApplyCount = max } for _, v := range toApply[0:toApplyCount] { - if dir == Up { + switch dir { + case Up: result = append(result, &PlannedMigration{ Migration: v, Queries: v.Up, DisableTransaction: v.DisableTransactionUp, }) - } else if dir == Down { + case Down: result = append(result, &PlannedMigration{ Migration: v, Queries: v.Down, @@ -779,14 +780,13 @@ func ToApply(migrations []*Migration, current string, direction MigrationDirecti } } - if direction == Up { + switch direction { + case Up: return migrations[index+1:] - } else if direction == Down { + case Down: if index == -1 { return []*Migration{} } - - // Add in reverse order toApply := make([]*Migration, index+1) for i := 0; i < index+1; i++ { toApply[index-i] = migrations[i] diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go index 4d4b4aad..ffb24e8e 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go @@ -7,10 +7,13 @@ import ( "time" ) -type CompareType int +// Deprecated: CompareType has only ever been for internal use and has accidentally been published since v1.6.0. Do not use it. +type CompareType = compareResult + +type compareResult int const ( - compareLess CompareType = iota - 1 + compareLess compareResult = iota - 1 compareEqual compareGreater ) @@ -39,7 +42,7 @@ var ( bytesType = reflect.TypeOf([]byte{}) ) -func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { +func compare(obj1, obj2 interface{}, kind reflect.Kind) (compareResult, bool) { obj1Value := reflect.ValueOf(obj1) obj2Value := reflect.ValueOf(obj2) @@ -325,7 +328,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { timeObj2 = obj2Value.Convert(timeType).Interface().(time.Time) } - return compare(timeObj1.UnixNano(), timeObj2.UnixNano(), reflect.Int64) + if timeObj1.Before(timeObj2) { + return compareLess, true + } + if timeObj1.Equal(timeObj2) { + return compareEqual, true + } + return compareGreater, true } case reflect.Slice: { @@ -345,7 +354,7 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { bytesObj2 = obj2Value.Convert(bytesType).Interface().([]byte) } - return CompareType(bytes.Compare(bytesObj1, bytesObj2)), true + return compareResult(bytes.Compare(bytesObj1, bytesObj2)), true } case reflect.Uintptr: { @@ -381,7 +390,8 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not greater than \"%v\"", e1, e2) + return compareTwoValues(t, e1, e2, []compareResult{compareGreater}, failMessage, msgAndArgs...) } // GreaterOrEqual asserts that the first element is greater than or equal to the second @@ -394,7 +404,8 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not greater than or equal to \"%v\"", e1, e2) + return compareTwoValues(t, e1, e2, []compareResult{compareGreater, compareEqual}, failMessage, msgAndArgs...) } // Less asserts that the first element is less than the second @@ -406,7 +417,8 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not less than \"%v\"", e1, e2) + return compareTwoValues(t, e1, e2, []compareResult{compareLess}, failMessage, msgAndArgs...) } // LessOrEqual asserts that the first element is less than or equal to the second @@ -419,7 +431,8 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not less than or equal to \"%v\"", e1, e2) + return compareTwoValues(t, e1, e2, []compareResult{compareLess, compareEqual}, failMessage, msgAndArgs...) } // Positive asserts that the specified element is positive @@ -431,7 +444,8 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { h.Helper() } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not positive", e) + return compareTwoValues(t, e, zero.Interface(), []compareResult{compareGreater}, failMessage, msgAndArgs...) } // Negative asserts that the specified element is negative @@ -443,10 +457,11 @@ func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { h.Helper() } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not negative", e) + return compareTwoValues(t, e, zero.Interface(), []compareResult{compareLess}, failMessage, msgAndArgs...) } -func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool { +func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []compareResult, failMessage string, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() } @@ -459,17 +474,17 @@ func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedCompare compareResult, isComparable := compare(e1, e2, e1Kind) if !isComparable { - return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...) + return Fail(t, fmt.Sprintf(`Can not compare type "%T"`, e1), msgAndArgs...) } if !containsValue(allowedComparesResults, compareResult) { - return Fail(t, fmt.Sprintf(failMessage, e1, e2), msgAndArgs...) + return Fail(t, failMessage, msgAndArgs...) } return true } -func containsValue(values []CompareType, value CompareType) bool { +func containsValue(values []compareResult, value compareResult) bool { for _, v := range values { if v == value { return true diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go index 3ddab109..c592f6ad 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -50,10 +50,19 @@ func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string return ElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...) } -// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Emptyf asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // assert.Emptyf(t, obj, "error message %s", "formatted") +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -104,8 +113,8 @@ func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, return EqualExportedValues(t, expected, actual, append([]interface{}{msg}, args...)...) } -// EqualValuesf asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValuesf asserts that two objects are equal or convertible to the larger +// type and equal. // // assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { @@ -117,10 +126,8 @@ func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg stri // Errorf asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if assert.Errorf(t, err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) -// } +// actualObj, err := SomeFunction() +// assert.Errorf(t, err, "error message %s", "formatted") func Errorf(t TestingT, err error, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -186,7 +193,7 @@ func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick // assert.EventuallyWithTf(t, func(c *assert.CollectT, "error message %s", "formatted") { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func EventuallyWithTf(t TestingT, condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -438,7 +445,19 @@ func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interf return IsNonIncreasing(t, object, append([]interface{}{msg}, args...)...) } +// IsNotTypef asserts that the specified objects are not of the same type. +// +// assert.IsNotTypef(t, &NotMyStruct{}, &MyStruct{}, "error message %s", "formatted") +func IsNotTypef(t TestingT, theType interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return IsNotType(t, theType, object, append([]interface{}{msg}, args...)...) +} + // IsTypef asserts that the specified objects are of the same type. +// +// assert.IsTypef(t, &MyStruct{}, &MyStruct{}, "error message %s", "formatted") func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -568,8 +587,24 @@ func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, a return NotContains(t, s, contains, append([]interface{}{msg}, args...)...) } -// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// assert.NotElementsMatchf(t, [1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false +// +// assert.NotElementsMatchf(t, [1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true +// +// assert.NotElementsMatchf(t, [1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true +func NotElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...) +} + +// NotEmptyf asserts that the specified object is NOT [Empty]. // // if assert.NotEmptyf(t, obj, "error message %s", "formatted") { // assert.Equal(t, "two", obj[1]) @@ -604,7 +639,16 @@ func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg s return NotEqualValues(t, expected, actual, append([]interface{}{msg}, args...)...) } -// NotErrorIsf asserts that at none of the errors in err's chain matches target. +// NotErrorAsf asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func NotErrorAsf(t TestingT, err error, target interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotErrorAs(t, err, target, append([]interface{}{msg}, args...)...) +} + +// NotErrorIsf asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { @@ -667,12 +711,15 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, return NotSame(t, expected, actual, append([]interface{}{msg}, args...)...) } -// NotSubsetf asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubsetf asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") // assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") +// assert.NotSubsetf(t, [1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted") +// assert.NotSubsetf(t, {"x": 1, "y": 2}, ["z"], "error message %s", "formatted") func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -756,11 +803,15 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg return Same(t, expected, actual, append([]interface{}{msg}, args...)...) } -// Subsetf asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subsetf asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") // assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") +// assert.Subsetf(t, [1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted") +// assert.Subsetf(t, {"x": 1, "y": 2}, ["x"], "error message %s", "formatted") func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go index a84e09bd..58db9284 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -92,10 +92,19 @@ func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg st return ElementsMatchf(a.t, listA, listB, msg, args...) } -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Empty asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // a.Empty(obj) +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -103,10 +112,19 @@ func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { return Empty(a.t, object, msgAndArgs...) } -// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Emptyf asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // a.Emptyf(obj, "error message %s", "formatted") +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -186,8 +204,8 @@ func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface return EqualExportedValuesf(a.t, expected, actual, msg, args...) } -// EqualValues asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValues asserts that two objects are equal or convertible to the larger +// type and equal. // // a.EqualValues(uint32(123), int32(123)) func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { @@ -197,8 +215,8 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn return EqualValues(a.t, expected, actual, msgAndArgs...) } -// EqualValuesf asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValuesf asserts that two objects are equal or convertible to the larger +// type and equal. // // a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { @@ -224,10 +242,8 @@ func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string // Error asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if a.Error(err) { -// assert.Equal(t, expectedError, err) -// } +// actualObj, err := SomeFunction() +// a.Error(err) func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -297,10 +313,8 @@ func (a *Assertions) ErrorIsf(err error, target error, msg string, args ...inter // Errorf asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if a.Errorf(err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) -// } +// actualObj, err := SomeFunction() +// a.Errorf(err, "error message %s", "formatted") func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -336,7 +350,7 @@ func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, ti // a.EventuallyWithT(func(c *assert.CollectT) { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func (a *Assertions) EventuallyWithT(condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -361,7 +375,7 @@ func (a *Assertions) EventuallyWithT(condition func(collect *CollectT), waitFor // a.EventuallyWithTf(func(c *assert.CollectT, "error message %s", "formatted") { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func (a *Assertions) EventuallyWithTf(condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -868,7 +882,29 @@ func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...in return IsNonIncreasingf(a.t, object, msg, args...) } +// IsNotType asserts that the specified objects are not of the same type. +// +// a.IsNotType(&NotMyStruct{}, &MyStruct{}) +func (a *Assertions) IsNotType(theType interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsNotType(a.t, theType, object, msgAndArgs...) +} + +// IsNotTypef asserts that the specified objects are not of the same type. +// +// a.IsNotTypef(&NotMyStruct{}, &MyStruct{}, "error message %s", "formatted") +func (a *Assertions) IsNotTypef(theType interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsNotTypef(a.t, theType, object, msg, args...) +} + // IsType asserts that the specified objects are of the same type. +// +// a.IsType(&MyStruct{}, &MyStruct{}) func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -877,6 +913,8 @@ func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAnd } // IsTypef asserts that the specified objects are of the same type. +// +// a.IsTypef(&MyStruct{}, &MyStruct{}, "error message %s", "formatted") func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1128,8 +1166,41 @@ func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg strin return NotContainsf(a.t, s, contains, msg, args...) } -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// a.NotElementsMatch([1, 1, 2, 3], [1, 1, 2, 3]) -> false +// +// a.NotElementsMatch([1, 1, 2, 3], [1, 2, 3]) -> true +// +// a.NotElementsMatch([1, 2, 3], [1, 2, 4]) -> true +func (a *Assertions) NotElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotElementsMatch(a.t, listA, listB, msgAndArgs...) +} + +// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// a.NotElementsMatchf([1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false +// +// a.NotElementsMatchf([1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true +// +// a.NotElementsMatchf([1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true +func (a *Assertions) NotElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotElementsMatchf(a.t, listA, listB, msg, args...) +} + +// NotEmpty asserts that the specified object is NOT [Empty]. // // if a.NotEmpty(obj) { // assert.Equal(t, "two", obj[1]) @@ -1141,8 +1212,7 @@ func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) boo return NotEmpty(a.t, object, msgAndArgs...) } -// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmptyf asserts that the specified object is NOT [Empty]. // // if a.NotEmptyf(obj, "error message %s", "formatted") { // assert.Equal(t, "two", obj[1]) @@ -1200,7 +1270,25 @@ func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg str return NotEqualf(a.t, expected, actual, msg, args...) } -// NotErrorIs asserts that at none of the errors in err's chain matches target. +// NotErrorAs asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func (a *Assertions) NotErrorAs(err error, target interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotErrorAs(a.t, err, target, msgAndArgs...) +} + +// NotErrorAsf asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func (a *Assertions) NotErrorAsf(err error, target interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotErrorAsf(a.t, err, target, msg, args...) +} + +// NotErrorIs asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { @@ -1209,7 +1297,7 @@ func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface return NotErrorIs(a.t, err, target, msgAndArgs...) } -// NotErrorIsf asserts that at none of the errors in err's chain matches target. +// NotErrorIsf asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { @@ -1326,12 +1414,15 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri return NotSamef(a.t, expected, actual, msg, args...) } -// NotSubset asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubset asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.NotSubset([1, 3, 4], [1, 2]) // a.NotSubset({"x": 1, "y": 2}, {"z": 3}) +// a.NotSubset([1, 3, 4], {1: "one", 2: "two"}) +// a.NotSubset({"x": 1, "y": 2}, ["z"]) func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1339,12 +1430,15 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs return NotSubset(a.t, list, subset, msgAndArgs...) } -// NotSubsetf asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubsetf asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted") // a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") +// a.NotSubsetf([1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted") +// a.NotSubsetf({"x": 1, "y": 2}, ["z"], "error message %s", "formatted") func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1504,11 +1598,15 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, return Samef(a.t, expected, actual, msg, args...) } -// Subset asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subset asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.Subset([1, 2, 3], [1, 2]) // a.Subset({"x": 1, "y": 2}, {"x": 1}) +// a.Subset([1, 2, 3], {1: "one", 2: "two"}) +// a.Subset({"x": 1, "y": 2}, ["x"]) func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1516,11 +1614,15 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ... return Subset(a.t, list, subset, msgAndArgs...) } -// Subsetf asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subsetf asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted") // a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") +// a.Subsetf([1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted") +// a.Subsetf({"x": 1, "y": 2}, ["x"], "error message %s", "formatted") func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go index 00df62a0..2fdf80fd 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_order.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go @@ -6,7 +6,7 @@ import ( ) // isOrdered checks that collection contains orderable elements. -func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool { +func isOrdered(t TestingT, object interface{}, allowedComparesResults []compareResult, failMessage string, msgAndArgs ...interface{}) bool { objKind := reflect.TypeOf(object).Kind() if objKind != reflect.Slice && objKind != reflect.Array { return false @@ -33,7 +33,7 @@ func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareT compareResult, isComparable := compare(prevValueInterface, valueInterface, firstValueKind) if !isComparable { - return Fail(t, fmt.Sprintf("Can not compare type \"%s\" and \"%s\"", reflect.TypeOf(value), reflect.TypeOf(prevValue)), msgAndArgs...) + return Fail(t, fmt.Sprintf(`Can not compare type "%T" and "%T"`, value, prevValue), msgAndArgs...) } if !containsValue(allowedComparesResults, compareResult) { @@ -50,7 +50,7 @@ func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareT // assert.IsIncreasing(t, []float{1, 2}) // assert.IsIncreasing(t, []string{"a", "b"}) func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) + return isOrdered(t, object, []compareResult{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) } // IsNonIncreasing asserts that the collection is not increasing @@ -59,7 +59,7 @@ func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo // assert.IsNonIncreasing(t, []float{2, 1}) // assert.IsNonIncreasing(t, []string{"b", "a"}) func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) + return isOrdered(t, object, []compareResult{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) } // IsDecreasing asserts that the collection is decreasing @@ -68,7 +68,7 @@ func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) // assert.IsDecreasing(t, []float{2, 1}) // assert.IsDecreasing(t, []string{"b", "a"}) func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) + return isOrdered(t, object, []compareResult{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) } // IsNonDecreasing asserts that the collection is not decreasing @@ -77,5 +77,5 @@ func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo // assert.IsNonDecreasing(t, []float{1, 2}) // assert.IsNonDecreasing(t, []string{"a", "b"}) func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) + return isOrdered(t, object, []compareResult{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) } diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go index 0b7570f2..de8de0cb 100644 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -19,7 +19,9 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/pmezard/go-difflib/difflib" - "gopkg.in/yaml.v3" + + // Wrapper around gopkg.in/yaml.v3 + "github.com/stretchr/testify/assert/yaml" ) //go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_format.go.tmpl" @@ -45,6 +47,10 @@ type BoolAssertionFunc func(TestingT, bool, ...interface{}) bool // for table driven tests. type ErrorAssertionFunc func(TestingT, error, ...interface{}) bool +// PanicAssertionFunc is a common function prototype when validating a panic value. Can be useful +// for table driven tests. +type PanicAssertionFunc = func(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool + // Comparison is a custom function that returns true on success and false on failure type Comparison func() (success bool) @@ -204,59 +210,77 @@ the problem actually occurred in calling code.*/ // of each stack frame leading from the current test to the assert call that // failed. func CallerInfo() []string { - var pc uintptr - var ok bool var file string var line int var name string + const stackFrameBufferSize = 10 + pcs := make([]uintptr, stackFrameBufferSize) + callers := []string{} - for i := 0; ; i++ { - pc, file, line, ok = runtime.Caller(i) - if !ok { - // The breaks below failed to terminate the loop, and we ran off the - // end of the call stack. + offset := 1 + + for { + n := runtime.Callers(offset, pcs) + + if n == 0 { break } - // This is a huge edge case, but it will panic if this is the case, see #180 - if file == "" { - break - } + frames := runtime.CallersFrames(pcs[:n]) - f := runtime.FuncForPC(pc) - if f == nil { - break - } - name = f.Name() + for { + frame, more := frames.Next() + pc = frame.PC + file = frame.File + line = frame.Line - // testing.tRunner is the standard library function that calls - // tests. Subtests are called directly by tRunner, without going through - // the Test/Benchmark/Example function that contains the t.Run calls, so - // with subtests we should break when we hit tRunner, without adding it - // to the list of callers. - if name == "testing.tRunner" { - break - } + // This is a huge edge case, but it will panic if this is the case, see #180 + if file == "" { + break + } - parts := strings.Split(file, "/") - if len(parts) > 1 { - filename := parts[len(parts)-1] - dir := parts[len(parts)-2] - if (dir != "assert" && dir != "mock" && dir != "require") || filename == "mock_test.go" { - callers = append(callers, fmt.Sprintf("%s:%d", file, line)) + f := runtime.FuncForPC(pc) + if f == nil { + break + } + name = f.Name() + + // testing.tRunner is the standard library function that calls + // tests. Subtests are called directly by tRunner, without going through + // the Test/Benchmark/Example function that contains the t.Run calls, so + // with subtests we should break when we hit tRunner, without adding it + // to the list of callers. + if name == "testing.tRunner" { + break + } + + parts := strings.Split(file, "/") + if len(parts) > 1 { + filename := parts[len(parts)-1] + dir := parts[len(parts)-2] + if (dir != "assert" && dir != "mock" && dir != "require") || filename == "mock_test.go" { + callers = append(callers, fmt.Sprintf("%s:%d", file, line)) + } + } + + // Drop the package + dotPos := strings.LastIndexByte(name, '.') + name = name[dotPos+1:] + if isTest(name, "Test") || + isTest(name, "Benchmark") || + isTest(name, "Example") { + break + } + + if !more { + break } } - // Drop the package - segments := strings.Split(name, ".") - name = segments[len(segments)-1] - if isTest(name, "Test") || - isTest(name, "Benchmark") || - isTest(name, "Example") { - break - } + // Next batch + offset += cap(pcs) } return callers @@ -431,17 +455,34 @@ func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, return true } +func isType(expectedType, object interface{}) bool { + return ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) +} + // IsType asserts that the specified objects are of the same type. -func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { +// +// assert.IsType(t, &MyStruct{}, &MyStruct{}) +func IsType(t TestingT, expectedType, object interface{}, msgAndArgs ...interface{}) bool { + if isType(expectedType, object) { + return true + } if h, ok := t.(tHelper); ok { h.Helper() } + return Fail(t, fmt.Sprintf("Object expected to be of type %T, but was %T", expectedType, object), msgAndArgs...) +} - if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) { - return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...) +// IsNotType asserts that the specified objects are not of the same type. +// +// assert.IsNotType(t, &NotMyStruct{}, &MyStruct{}) +func IsNotType(t TestingT, theType, object interface{}, msgAndArgs ...interface{}) bool { + if !isType(theType, object) { + return true } - - return true + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Fail(t, fmt.Sprintf("Object type expected to be different than %T", theType), msgAndArgs...) } // Equal asserts that two objects are equal. @@ -469,7 +510,6 @@ func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) } return true - } // validateEqualArgs checks whether provided arguments can be safely used in the @@ -496,10 +536,17 @@ func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) b h.Helper() } - if !samePointers(expected, actual) { + same, ok := samePointers(expected, actual) + if !ok { + return Fail(t, "Both arguments must be pointers", msgAndArgs...) + } + + if !same { + // both are pointers but not the same type & pointing to the same address return Fail(t, fmt.Sprintf("Not same: \n"+ - "expected: %p %#v\n"+ - "actual : %p %#v", expected, expected, actual, actual), msgAndArgs...) + "expected: %p %#[1]v\n"+ + "actual : %p %#[2]v", + expected, actual), msgAndArgs...) } return true @@ -516,29 +563,37 @@ func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{} h.Helper() } - if samePointers(expected, actual) { + same, ok := samePointers(expected, actual) + if !ok { + // fails when the arguments are not pointers + return !(Fail(t, "Both arguments must be pointers", msgAndArgs...)) + } + + if same { return Fail(t, fmt.Sprintf( - "Expected and actual point to the same object: %p %#v", - expected, expected), msgAndArgs...) + "Expected and actual point to the same object: %p %#[1]v", + expected), msgAndArgs...) } return true } -// samePointers compares two generic interface objects and returns whether -// they point to the same object -func samePointers(first, second interface{}) bool { +// samePointers checks if two generic interface objects are pointers of the same +// type pointing to the same object. It returns two values: same indicating if +// they are the same type and point to the same object, and ok indicating that +// both inputs are pointers. +func samePointers(first, second interface{}) (same bool, ok bool) { firstPtr, secondPtr := reflect.ValueOf(first), reflect.ValueOf(second) if firstPtr.Kind() != reflect.Ptr || secondPtr.Kind() != reflect.Ptr { - return false + return false, false // not both are pointers } firstType, secondType := reflect.TypeOf(first), reflect.TypeOf(second) if firstType != secondType { - return false + return false, true // both are pointers, but of different types } // compare pointer addresses - return first == second + return first == second, true } // formatUnequalValues takes two values of arbitrary types and returns string @@ -572,8 +627,8 @@ func truncatingFormat(data interface{}) string { return value } -// EqualValues asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValues asserts that two objects are equal or convertible to the larger +// type and equal. // // assert.EqualValues(t, uint32(123), int32(123)) func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { @@ -590,7 +645,6 @@ func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interfa } return true - } // EqualExportedValues asserts that the types of two objects are equal and their public @@ -615,21 +669,6 @@ func EqualExportedValues(t TestingT, expected, actual interface{}, msgAndArgs .. return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...) } - if aType.Kind() == reflect.Ptr { - aType = aType.Elem() - } - if bType.Kind() == reflect.Ptr { - bType = bType.Elem() - } - - if aType.Kind() != reflect.Struct { - return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", aType.Kind(), reflect.Struct), msgAndArgs...) - } - - if bType.Kind() != reflect.Struct { - return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", bType.Kind(), reflect.Struct), msgAndArgs...) - } - expected = copyExportedFields(expected) actual = copyExportedFields(actual) @@ -660,7 +699,6 @@ func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{} } return Equal(t, expected, actual, msgAndArgs...) - } // NotNil asserts that the specified object is not nil. @@ -710,37 +748,45 @@ func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { // isEmpty gets whether the specified object is considered empty or not. func isEmpty(object interface{}) bool { - // get nil case out of the way if object == nil { return true } - objValue := reflect.ValueOf(object) - - switch objValue.Kind() { - // collection types are empty when they have no element - case reflect.Chan, reflect.Map, reflect.Slice: - return objValue.Len() == 0 - // pointers are empty if nil or if the value they point to is empty - case reflect.Ptr: - if objValue.IsNil() { - return true - } - deref := objValue.Elem().Interface() - return isEmpty(deref) - // for all other types, compare against the zero value - // array types are empty when they match their zero-initialized state - default: - zero := reflect.Zero(objValue.Type()) - return reflect.DeepEqual(object, zero.Interface()) - } + return isEmptyValue(reflect.ValueOf(object)) } -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// isEmptyValue gets whether the specified reflect.Value is considered empty or not. +func isEmptyValue(objValue reflect.Value) bool { + if objValue.IsZero() { + return true + } + // Special cases of non-zero values that we consider empty + switch objValue.Kind() { + // collection types are empty when they have no element + // Note: array types are empty when they match their zero-initialized state. + case reflect.Chan, reflect.Map, reflect.Slice: + return objValue.Len() == 0 + // non-nil pointers are empty if the value they point to is empty + case reflect.Ptr: + return isEmptyValue(objValue.Elem()) + } + return false +} + +// Empty asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // assert.Empty(t, obj) +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { pass := isEmpty(object) if !pass { @@ -751,11 +797,9 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { } return pass - } -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmpty asserts that the specified object is NOT [Empty]. // // if assert.NotEmpty(t, obj) { // assert.Equal(t, "two", obj[1]) @@ -770,7 +814,6 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { } return pass - } // getLen tries to get the length of an object. @@ -814,7 +857,6 @@ func True(t TestingT, value bool, msgAndArgs ...interface{}) bool { } return true - } // False asserts that the specified value is false. @@ -829,7 +871,6 @@ func False(t TestingT, value bool, msgAndArgs ...interface{}) bool { } return true - } // NotEqual asserts that the specified values are NOT equal. @@ -852,7 +893,6 @@ func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{ } return true - } // NotEqualValues asserts that two objects are not equal even when converted to the same type @@ -875,7 +915,6 @@ func NotEqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...inte // return (true, false) if element was not found. // return (true, true) if element was found. func containsElement(list interface{}, element interface{}) (ok, found bool) { - listValue := reflect.ValueOf(list) listType := reflect.TypeOf(list) if listType == nil { @@ -910,7 +949,6 @@ func containsElement(list interface{}, element interface{}) (ok, found bool) { } } return true, false - } // Contains asserts that the specified string, list(array, slice...) or map contains the @@ -933,7 +971,6 @@ func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bo } return true - } // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the @@ -956,14 +993,17 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) } return true - } -// Subset asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subset asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // assert.Subset(t, [1, 2, 3], [1, 2]) // assert.Subset(t, {"x": 1, "y": 2}, {"x": 1}) +// assert.Subset(t, [1, 2, 3], {1: "one", 2: "two"}) +// assert.Subset(t, {"x": 1, "y": 2}, ["x"]) func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { if h, ok := t.(tHelper); ok { h.Helper() @@ -978,7 +1018,7 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok } subsetKind := reflect.TypeOf(subset).Kind() - if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { + if subsetKind != reflect.Array && subsetKind != reflect.Slice && subsetKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) } @@ -1002,6 +1042,13 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok } subsetList := reflect.ValueOf(subset) + if subsetKind == reflect.Map { + keys := make([]interface{}, subsetList.Len()) + for idx, key := range subsetList.MapKeys() { + keys[idx] = key.Interface() + } + subsetList = reflect.ValueOf(keys) + } for i := 0; i < subsetList.Len(); i++ { element := subsetList.Index(i).Interface() ok, found := containsElement(list, element) @@ -1016,12 +1063,15 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok return true } -// NotSubset asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubset asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // assert.NotSubset(t, [1, 3, 4], [1, 2]) // assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) +// assert.NotSubset(t, [1, 3, 4], {1: "one", 2: "two"}) +// assert.NotSubset(t, {"x": 1, "y": 2}, ["z"]) func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1036,7 +1086,7 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) } subsetKind := reflect.TypeOf(subset).Kind() - if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { + if subsetKind != reflect.Array && subsetKind != reflect.Slice && subsetKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) } @@ -1060,11 +1110,18 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) } subsetList := reflect.ValueOf(subset) + if subsetKind == reflect.Map { + keys := make([]interface{}, subsetList.Len()) + for idx, key := range subsetList.MapKeys() { + keys[idx] = key.Interface() + } + subsetList = reflect.ValueOf(keys) + } for i := 0; i < subsetList.Len(); i++ { element := subsetList.Index(i).Interface() ok, found := containsElement(list, element) if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) + return Fail(t, fmt.Sprintf("%q could not be applied builtin len()", list), msgAndArgs...) } if !found { return true @@ -1170,6 +1227,39 @@ func formatListDiff(listA, listB interface{}, extraA, extraB []interface{}) stri return msg.String() } +// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// assert.NotElementsMatch(t, [1, 1, 2, 3], [1, 1, 2, 3]) -> false +// +// assert.NotElementsMatch(t, [1, 1, 2, 3], [1, 2, 3]) -> true +// +// assert.NotElementsMatch(t, [1, 2, 3], [1, 2, 4]) -> true +func NotElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface{}) (ok bool) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if isEmpty(listA) && isEmpty(listB) { + return Fail(t, "listA and listB contain the same elements", msgAndArgs) + } + + if !isList(t, listA, msgAndArgs...) { + return Fail(t, "listA is not a list type", msgAndArgs...) + } + if !isList(t, listB, msgAndArgs...) { + return Fail(t, "listB is not a list type", msgAndArgs...) + } + + extraA, extraB := diffLists(listA, listB) + if len(extraA) == 0 && len(extraB) == 0 { + return Fail(t, "listA and listB contain the same elements", msgAndArgs) + } + + return true +} + // Condition uses a Comparison to assert a complex condition. func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { @@ -1488,6 +1578,9 @@ func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAnd if err != nil { return Fail(t, err.Error(), msgAndArgs...) } + if math.IsNaN(actualEpsilon) { + return Fail(t, "relative error is NaN", msgAndArgs...) + } if actualEpsilon > epsilon { return Fail(t, fmt.Sprintf("Relative error is too high: %#v (expected)\n"+ " < %#v (actual)", epsilon, actualEpsilon), msgAndArgs...) @@ -1550,10 +1643,8 @@ func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { // Error asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if assert.Error(t, err) { -// assert.Equal(t, expectedError, err) -// } +// actualObj, err := SomeFunction() +// assert.Error(t, err) func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { if err == nil { if h, ok := t.(tHelper); ok { @@ -1611,7 +1702,6 @@ func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...in // matchRegexp return true if a specified regexp matches a string. func matchRegexp(rx interface{}, str interface{}) bool { - var r *regexp.Regexp if rr, ok := rx.(*regexp.Regexp); ok { r = rr @@ -1619,8 +1709,14 @@ func matchRegexp(rx interface{}, str interface{}) bool { r = regexp.MustCompile(fmt.Sprint(rx)) } - return (r.FindStringIndex(fmt.Sprint(str)) != nil) - + switch v := str.(type) { + case []byte: + return r.Match(v) + case string: + return r.MatchString(v) + default: + return r.MatchString(fmt.Sprint(v)) + } } // Regexp asserts that a specified regexp matches a string. @@ -1656,7 +1752,6 @@ func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interf } return !match - } // Zero asserts that i is the zero value for its type. @@ -1767,6 +1862,11 @@ func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{ return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid json.\nJSON parsing error: '%s'", expected, err.Error()), msgAndArgs...) } + // Shortcut if same bytes + if actual == expected { + return true + } + if err := json.Unmarshal([]byte(actual), &actualJSONAsInterface); err != nil { return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid json.\nJSON parsing error: '%s'", actual, err.Error()), msgAndArgs...) } @@ -1785,6 +1885,11 @@ func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{ return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid yaml.\nYAML parsing error: '%s'", expected, err.Error()), msgAndArgs...) } + // Shortcut if same bytes + if actual == expected { + return true + } + if err := yaml.Unmarshal([]byte(actual), &actualYAMLAsInterface); err != nil { return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid yaml.\nYAML error: '%s'", actual, err.Error()), msgAndArgs...) } @@ -1872,7 +1977,7 @@ var spewConfigStringerEnabled = spew.ConfigState{ MaxDepth: 10, } -type tHelper interface { +type tHelper = interface { Helper() } @@ -1886,6 +1991,7 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t } ch := make(chan bool, 1) + checkCond := func() { ch <- condition() } timer := time.NewTimer(waitFor) defer timer.Stop() @@ -1893,35 +1999,47 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t ticker := time.NewTicker(tick) defer ticker.Stop() - for tick := ticker.C; ; { + var tickC <-chan time.Time + + // Check the condition once first on the initial call. + go checkCond() + + for { select { case <-timer.C: return Fail(t, "Condition never satisfied", msgAndArgs...) - case <-tick: - tick = nil - go func() { ch <- condition() }() + case <-tickC: + tickC = nil + go checkCond() case v := <-ch: if v { return true } - tick = ticker.C + tickC = ticker.C } } } // CollectT implements the TestingT interface and collects all errors. type CollectT struct { + // A slice of errors. Non-nil slice denotes a failure. + // If it's non-nil but len(c.errors) == 0, this is also a failure + // obtained by direct c.FailNow() call. errors []error } +// Helper is like [testing.T.Helper] but does nothing. +func (CollectT) Helper() {} + // Errorf collects the error. func (c *CollectT) Errorf(format string, args ...interface{}) { c.errors = append(c.errors, fmt.Errorf(format, args...)) } -// FailNow panics. -func (*CollectT) FailNow() { - panic("Assertion failed") +// FailNow stops execution by calling runtime.Goexit. +func (c *CollectT) FailNow() { + c.fail() + runtime.Goexit() } // Deprecated: That was a method for internal usage that should not have been published. Now just panics. @@ -1934,6 +2052,16 @@ func (*CollectT) Copy(TestingT) { panic("Copy() is deprecated") } +func (c *CollectT) fail() { + if !c.failed() { + c.errors = []error{} // Make it non-nil to mark a failure. + } +} + +func (c *CollectT) failed() bool { + return c.errors != nil +} + // EventuallyWithT asserts that given condition will be met in waitFor time, // periodically checking target function each tick. In contrast to Eventually, // it supplies a CollectT to the condition function, so that the condition @@ -1951,14 +2079,22 @@ func (*CollectT) Copy(TestingT) { // assert.EventuallyWithT(t, func(c *assert.CollectT) { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() } var lastFinishedTickErrs []error - ch := make(chan []error, 1) + ch := make(chan *CollectT, 1) + + checkCond := func() { + collect := new(CollectT) + defer func() { + ch <- collect + }() + condition(collect) + } timer := time.NewTimer(waitFor) defer timer.Stop() @@ -1966,29 +2102,28 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time ticker := time.NewTicker(tick) defer ticker.Stop() - for tick := ticker.C; ; { + var tickC <-chan time.Time + + // Check the condition once first on the initial call. + go checkCond() + + for { select { case <-timer.C: for _, err := range lastFinishedTickErrs { t.Errorf("%v", err) } return Fail(t, "Condition never satisfied", msgAndArgs...) - case <-tick: - tick = nil - go func() { - collect := new(CollectT) - defer func() { - ch <- collect.errors - }() - condition(collect) - }() - case errs := <-ch: - if len(errs) == 0 { + case <-tickC: + tickC = nil + go checkCond() + case collect := <-ch: + if !collect.failed() { return true } // Keep the errors from the last ended condition, so that they can be copied to t if timeout is reached. - lastFinishedTickErrs = errs - tick = ticker.C + lastFinishedTickErrs = collect.errors + tickC = ticker.C } } } @@ -2003,6 +2138,7 @@ func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.D } ch := make(chan bool, 1) + checkCond := func() { ch <- condition() } timer := time.NewTimer(waitFor) defer timer.Stop() @@ -2010,18 +2146,23 @@ func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.D ticker := time.NewTicker(tick) defer ticker.Stop() - for tick := ticker.C; ; { + var tickC <-chan time.Time + + // Check the condition once first on the initial call. + go checkCond() + + for { select { case <-timer.C: return true - case <-tick: - tick = nil - go func() { ch <- condition() }() + case <-tickC: + tickC = nil + go checkCond() case v := <-ch: if v { return Fail(t, "Condition satisfied", msgAndArgs...) } - tick = ticker.C + tickC = ticker.C } } } @@ -2039,9 +2180,12 @@ func ErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool { var expectedText string if target != nil { expectedText = target.Error() + if err == nil { + return Fail(t, fmt.Sprintf("Expected error with %q in chain but got nil.", expectedText), msgAndArgs...) + } } - chain := buildErrorChainString(err) + chain := buildErrorChainString(err, false) return Fail(t, fmt.Sprintf("Target error should be in err chain:\n"+ "expected: %q\n"+ @@ -2049,7 +2193,7 @@ func ErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool { ), msgAndArgs...) } -// NotErrorIs asserts that at none of the errors in err's chain matches target. +// NotErrorIs asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func NotErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { @@ -2064,7 +2208,7 @@ func NotErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool { expectedText = target.Error() } - chain := buildErrorChainString(err) + chain := buildErrorChainString(err, false) return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+ "found: %q\n"+ @@ -2082,24 +2226,70 @@ func ErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{ return true } - chain := buildErrorChainString(err) + expectedType := reflect.TypeOf(target).Elem().String() + if err == nil { + return Fail(t, fmt.Sprintf("An error is expected but got nil.\n"+ + "expected: %s", expectedType), msgAndArgs...) + } + + chain := buildErrorChainString(err, true) return Fail(t, fmt.Sprintf("Should be in error chain:\n"+ - "expected: %q\n"+ - "in chain: %s", target, chain, + "expected: %s\n"+ + "in chain: %s", expectedType, chain, ), msgAndArgs...) } -func buildErrorChainString(err error) string { +// NotErrorAs asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func NotErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if !errors.As(err, target) { + return true + } + + chain := buildErrorChainString(err, true) + + return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+ + "found: %s\n"+ + "in chain: %s", reflect.TypeOf(target).Elem().String(), chain, + ), msgAndArgs...) +} + +func unwrapAll(err error) (errs []error) { + errs = append(errs, err) + switch x := err.(type) { + case interface{ Unwrap() error }: + err = x.Unwrap() + if err == nil { + return + } + errs = append(errs, unwrapAll(err)...) + case interface{ Unwrap() []error }: + for _, err := range x.Unwrap() { + errs = append(errs, unwrapAll(err)...) + } + } + return +} + +func buildErrorChainString(err error, withType bool) string { if err == nil { return "" } - e := errors.Unwrap(err) - chain := fmt.Sprintf("%q", err.Error()) - for e != nil { - chain += fmt.Sprintf("\n\t%q", e.Error()) - e = errors.Unwrap(e) + var chain string + errs := unwrapAll(err) + for i := range errs { + if i != 0 { + chain += "\n\t" + } + chain += fmt.Sprintf("%q", errs[i].Error()) + if withType { + chain += fmt.Sprintf(" (%T)", errs[i]) + } } return chain } diff --git a/vendor/github.com/stretchr/testify/assert/doc.go b/vendor/github.com/stretchr/testify/assert/doc.go index 4953981d..a0b953aa 100644 --- a/vendor/github.com/stretchr/testify/assert/doc.go +++ b/vendor/github.com/stretchr/testify/assert/doc.go @@ -1,5 +1,9 @@ // Package assert provides a set of comprehensive testing tools for use with the normal Go testing system. // +// # Note +// +// All functions in this package return a bool value indicating whether the assertion has passed. +// // # Example Usage // // The following is a complete example using assert in a standard test function: diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go index 861ed4b7..5a6bb75f 100644 --- a/vendor/github.com/stretchr/testify/assert/http_assertions.go +++ b/vendor/github.com/stretchr/testify/assert/http_assertions.go @@ -138,7 +138,7 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, contains := strings.Contains(body, fmt.Sprint(str)) if !contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...) + Fail(t, fmt.Sprintf("Expected response body for %q to contain %q but found %q", url+"?"+values.Encode(), str, body), msgAndArgs...) } return contains @@ -158,7 +158,7 @@ func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url strin contains := strings.Contains(body, fmt.Sprint(str)) if contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...) + Fail(t, fmt.Sprintf("Expected response body for %q to NOT contain %q but found %q", url+"?"+values.Encode(), str, body), msgAndArgs...) } return !contains diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go new file mode 100644 index 00000000..5a74c4f4 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go @@ -0,0 +1,24 @@ +//go:build testify_yaml_custom && !testify_yaml_fail && !testify_yaml_default + +// Package yaml is an implementation of YAML functions that calls a pluggable implementation. +// +// This implementation is selected with the testify_yaml_custom build tag. +// +// go test -tags testify_yaml_custom +// +// This implementation can be used at build time to replace the default implementation +// to avoid linking with [gopkg.in/yaml.v3]. +// +// In your test package: +// +// import assertYaml "github.com/stretchr/testify/assert/yaml" +// +// func init() { +// assertYaml.Unmarshal = func (in []byte, out interface{}) error { +// // ... +// return nil +// } +// } +package yaml + +var Unmarshal func(in []byte, out interface{}) error diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go new file mode 100644 index 00000000..0bae80e3 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go @@ -0,0 +1,36 @@ +//go:build !testify_yaml_fail && !testify_yaml_custom + +// Package yaml is just an indirection to handle YAML deserialization. +// +// This package is just an indirection that allows the builder to override the +// indirection with an alternative implementation of this package that uses +// another implementation of YAML deserialization. This allows to not either not +// use YAML deserialization at all, or to use another implementation than +// [gopkg.in/yaml.v3] (for example for license compatibility reasons, see [PR #1120]). +// +// Alternative implementations are selected using build tags: +// +// - testify_yaml_fail: [Unmarshal] always fails with an error +// - testify_yaml_custom: [Unmarshal] is a variable. Caller must initialize it +// before calling any of [github.com/stretchr/testify/assert.YAMLEq] or +// [github.com/stretchr/testify/assert.YAMLEqf]. +// +// Usage: +// +// go test -tags testify_yaml_fail +// +// You can check with "go list" which implementation is linked: +// +// go list -f '{{.Imports}}' github.com/stretchr/testify/assert/yaml +// go list -tags testify_yaml_fail -f '{{.Imports}}' github.com/stretchr/testify/assert/yaml +// go list -tags testify_yaml_custom -f '{{.Imports}}' github.com/stretchr/testify/assert/yaml +// +// [PR #1120]: https://github.com/stretchr/testify/pull/1120 +package yaml + +import goyaml "gopkg.in/yaml.v3" + +// Unmarshal is just a wrapper of [gopkg.in/yaml.v3.Unmarshal]. +func Unmarshal(in []byte, out interface{}) error { + return goyaml.Unmarshal(in, out) +} diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go new file mode 100644 index 00000000..8041803f --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go @@ -0,0 +1,17 @@ +//go:build testify_yaml_fail && !testify_yaml_custom && !testify_yaml_default + +// Package yaml is an implementation of YAML functions that always fail. +// +// This implementation can be used at build time to replace the default implementation +// to avoid linking with [gopkg.in/yaml.v3]: +// +// go test -tags testify_yaml_fail +package yaml + +import "errors" + +var errNotImplemented = errors.New("YAML functions are not available (see https://pkg.go.dev/github.com/stretchr/testify/assert/yaml)") + +func Unmarshal([]byte, interface{}) error { + return errNotImplemented +} diff --git a/vendor/github.com/stretchr/testify/mock/mock.go b/vendor/github.com/stretchr/testify/mock/mock.go index 213bde2e..efc89def 100644 --- a/vendor/github.com/stretchr/testify/mock/mock.go +++ b/vendor/github.com/stretchr/testify/mock/mock.go @@ -80,12 +80,12 @@ type Call struct { requires []*Call } -func newCall(parent *Mock, methodName string, callerInfo []string, methodArguments ...interface{}) *Call { +func newCall(parent *Mock, methodName string, callerInfo []string, methodArguments Arguments, returnArguments Arguments) *Call { return &Call{ Parent: parent, Method: methodName, Arguments: methodArguments, - ReturnArguments: make([]interface{}, 0), + ReturnArguments: returnArguments, callerInfo: callerInfo, Repeatability: 0, WaitFor: nil, @@ -208,9 +208,16 @@ func (c *Call) On(methodName string, arguments ...interface{}) *Call { return c.Parent.On(methodName, arguments...) } -// Unset removes a mock handler from being called. +// Unset removes all mock handlers that satisfy the call instance arguments from being +// called. Only supported on call instances with static input arguments. // -// test.On("func", mock.Anything).Unset() +// For example, the only handler remaining after the following would be "MyMethod(2, 2)": +// +// Mock. +// On("MyMethod", 2, 2).Return(0). +// On("MyMethod", 3, 3).Return(0). +// On("MyMethod", Anything, Anything).Return(0) +// Mock.On("MyMethod", 3, 3).Unset() func (c *Call) Unset() *Call { var unlockOnce sync.Once @@ -256,7 +263,7 @@ func (c *Call) Unset() *Call { // calls have been called as expected. The referenced calls may be from the // same mock instance and/or other mock instances. // -// Mock.On("Do").Return(nil).Notbefore( +// Mock.On("Do").Return(nil).NotBefore( // Mock.On("Init").Return(nil) // ) func (c *Call) NotBefore(calls ...*Call) *Call { @@ -273,6 +280,20 @@ func (c *Call) NotBefore(calls ...*Call) *Call { return c } +// InOrder defines the order in which the calls should be made +// +// For example: +// +// InOrder( +// Mock.On("init").Return(nil), +// Mock.On("Do").Return(nil), +// ) +func InOrder(calls ...*Call) { + for i := 1; i < len(calls); i++ { + calls[i].NotBefore(calls[i-1]) + } +} + // Mock is the workhorse used to track activity on another object. // For an example of its usage, refer to the "Example Usage" section at the top // of this document. @@ -317,7 +338,10 @@ func (m *Mock) TestData() objx.Map { Setting expectations */ -// Test sets the test struct variable of the mock object +// Test sets the [TestingT] on which errors will be reported, otherwise errors +// will cause a panic. +// Test should not be called on an object that is going to be used in a +// goroutine other than the one running the test function. func (m *Mock) Test(t TestingT) { m.mutex.Lock() defer m.mutex.Unlock() @@ -351,7 +375,8 @@ func (m *Mock) On(methodName string, arguments ...interface{}) *Call { m.mutex.Lock() defer m.mutex.Unlock() - c := newCall(m, methodName, assert.CallerInfo(), arguments...) + + c := newCall(m, methodName, assert.CallerInfo(), arguments, make([]interface{}, 0)) m.ExpectedCalls = append(m.ExpectedCalls, c) return c } @@ -479,7 +504,7 @@ func (m *Mock) MethodCalled(methodName string, arguments ...interface{}) Argumen // expected call found, but it has already been called with repeatable times if call != nil { m.mutex.Unlock() - m.fail("\nassert: mock: The method has been called over %d times.\n\tEither do one more Mock.On(\"%s\").Return(...), or remove extra call.\n\tThis call was unexpected:\n\t\t%s\n\tat: %s", call.totalCalls, methodName, callString(methodName, arguments, true), assert.CallerInfo()) + m.fail("\nassert: mock: The method has been called over %d times.\n\tEither do one more Mock.On(%#v).Return(...), or remove extra call.\n\tThis call was unexpected:\n\t\t%s\n\tat: %s", call.totalCalls, methodName, callString(methodName, arguments, true), assert.CallerInfo()) } // we have to fail here - because we don't know what to do // as the return arguments. This is because: @@ -491,14 +516,15 @@ func (m *Mock) MethodCalled(methodName string, arguments ...interface{}) Argumen m.mutex.Unlock() if closestCall != nil { - m.fail("\n\nmock: Unexpected Method Call\n-----------------------------\n\n%s\n\nThe closest call I have is: \n\n%s\n\n%s\nDiff: %s", + m.fail("\n\nmock: Unexpected Method Call\n-----------------------------\n\n%s\n\nThe closest call I have is: \n\n%s\n\n%s\nDiff: %s\nat: %s\n", callString(methodName, arguments, true), callString(methodName, closestCall.Arguments, true), diffArguments(closestCall.Arguments, arguments), strings.TrimSpace(mismatch), + assert.CallerInfo(), ) } else { - m.fail("\nassert: mock: I don't know what to return because the method call was unexpected.\n\tEither do Mock.On(\"%s\").Return(...) first, or remove the %s() call.\n\tThis method was unexpected:\n\t\t%s\n\tat: %s", methodName, methodName, callString(methodName, arguments, true), assert.CallerInfo()) + m.fail("\nassert: mock: I don't know what to return because the method call was unexpected.\n\tEither do Mock.On(%#v).Return(...) first, or remove the %s() call.\n\tThis method was unexpected:\n\t\t%s\n\tat: %s", methodName, methodName, callString(methodName, arguments, true), assert.CallerInfo()) } } @@ -529,7 +555,7 @@ func (m *Mock) MethodCalled(methodName string, arguments ...interface{}) Argumen call.totalCalls++ // add the call - m.Calls = append(m.Calls, *newCall(m, methodName, assert.CallerInfo(), arguments...)) + m.Calls = append(m.Calls, *newCall(m, methodName, assert.CallerInfo(), arguments, call.ReturnArguments)) m.mutex.Unlock() // block if specified @@ -645,7 +671,7 @@ func (m *Mock) AssertNumberOfCalls(t TestingT, methodName string, expectedCalls actualCalls++ } } - return assert.Equal(t, expectedCalls, actualCalls, fmt.Sprintf("Expected number of calls (%d) does not match the actual number of calls (%d).", expectedCalls, actualCalls)) + return assert.Equal(t, expectedCalls, actualCalls, fmt.Sprintf("Expected number of calls (%d) of method %s does not match the actual number of calls (%d).", expectedCalls, methodName, actualCalls)) } // AssertCalled asserts that the method was called. @@ -764,9 +790,17 @@ const ( ) // AnythingOfTypeArgument contains the type of an argument -// for use when type checking. Used in Diff and Assert. +// for use when type checking. Used in [Arguments.Diff] and [Arguments.Assert]. // -// Deprecated: this is an implementation detail that must not be used. Use [AnythingOfType] instead. +// Deprecated: this is an implementation detail that must not be used. Use the [AnythingOfType] constructor instead, example: +// +// m.On("Do", mock.AnythingOfType("string")) +// +// All explicit type declarations can be replaced with interface{} as is expected by [Mock.On], example: +// +// func anyString interface{} { +// return mock.AnythingOfType("string") +// } type AnythingOfTypeArgument = anythingOfTypeArgument // anythingOfTypeArgument is a string that contains the type of an argument @@ -780,53 +814,54 @@ type anythingOfTypeArgument string // // For example: // -// Assert(t, AnythingOfType("string"), AnythingOfType("int")) +// args.Assert(t, AnythingOfType("string"), AnythingOfType("int")) func AnythingOfType(t string) AnythingOfTypeArgument { return anythingOfTypeArgument(t) } // IsTypeArgument is a struct that contains the type of an argument -// for use when type checking. This is an alternative to AnythingOfType. -// Used in Diff and Assert. +// for use when type checking. This is an alternative to [AnythingOfType]. +// Used in [Arguments.Diff] and [Arguments.Assert]. type IsTypeArgument struct { t reflect.Type } // IsType returns an IsTypeArgument object containing the type to check for. // You can provide a zero-value of the type to check. This is an -// alternative to AnythingOfType. Used in Diff and Assert. +// alternative to [AnythingOfType]. Used in [Arguments.Diff] and [Arguments.Assert]. // // For example: -// Assert(t, IsType(""), IsType(0)) +// +// args.Assert(t, IsType(""), IsType(0)) func IsType(t interface{}) *IsTypeArgument { return &IsTypeArgument{t: reflect.TypeOf(t)} } -// FunctionalOptionsArgument is a struct that contains the type and value of an functional option argument -// for use when type checking. +// FunctionalOptionsArgument contains a list of functional options arguments +// expected for use when matching a list of arguments. type FunctionalOptionsArgument struct { - value interface{} + values []interface{} } // String returns the string representation of FunctionalOptionsArgument func (f *FunctionalOptionsArgument) String() string { var name string - tValue := reflect.ValueOf(f.value) - if tValue.Len() > 0 { - name = "[]" + reflect.TypeOf(tValue.Index(0).Interface()).String() + if len(f.values) > 0 { + name = "[]" + reflect.TypeOf(f.values[0]).String() } - return strings.Replace(fmt.Sprintf("%#v", f.value), "[]interface {}", name, 1) + return strings.Replace(fmt.Sprintf("%#v", f.values), "[]interface {}", name, 1) } -// FunctionalOptions returns an FunctionalOptionsArgument object containing the functional option type -// and the values to check of +// FunctionalOptions returns an [FunctionalOptionsArgument] object containing +// the expected functional-options to check for. // // For example: -// Assert(t, FunctionalOptions("[]foo.FunctionalOption", foo.Opt1(), foo.Opt2())) -func FunctionalOptions(value ...interface{}) *FunctionalOptionsArgument { +// +// args.Assert(t, FunctionalOptions(foo.Opt1("strValue"), foo.Opt2(613))) +func FunctionalOptions(values ...interface{}) *FunctionalOptionsArgument { return &FunctionalOptionsArgument{ - value: value, + values: values, } } @@ -873,10 +908,11 @@ func (f argumentMatcher) String() string { // and false otherwise. // // Example: -// m.On("Do", MatchedBy(func(req *http.Request) bool { return req.Host == "example.com" })) // -// |fn|, must be a function accepting a single argument (of the expected type) -// which returns a bool. If |fn| doesn't match the required signature, +// m.On("Do", MatchedBy(func(req *http.Request) bool { return req.Host == "example.com" })) +// +// fn must be a function accepting a single argument (of the expected type) +// which returns a bool. If fn doesn't match the required signature, // MatchedBy() panics. func MatchedBy(fn interface{}) argumentMatcher { fnType := reflect.TypeOf(fn) @@ -979,20 +1015,17 @@ func (args Arguments) Diff(objects []interface{}) (string, int) { output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, expected.t.Name(), actualT.Name(), actualFmt) } case *FunctionalOptionsArgument: - t := expected.value - var name string - tValue := reflect.ValueOf(t) - if tValue.Len() > 0 { - name = "[]" + reflect.TypeOf(tValue.Index(0).Interface()).String() + if len(expected.values) > 0 { + name = "[]" + reflect.TypeOf(expected.values[0]).String() } - tName := reflect.TypeOf(t).Name() - if name != reflect.TypeOf(actual).String() && tValue.Len() != 0 { + const tName = "[]interface{}" + if name != reflect.TypeOf(actual).String() && len(expected.values) != 0 { differences++ output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, tName, reflect.TypeOf(actual).Name(), actualFmt) } else { - if ef, af := assertOpts(t, actual); ef == "" && af == "" { + if ef, af := assertOpts(expected.values, actual); ef == "" && af == "" { // match output = fmt.Sprintf("%s\t%d: PASS: %s == %s\n", output, i, tName, tName) } else { @@ -1092,7 +1125,7 @@ func (args Arguments) Error(index int) error { return nil } if s, ok = obj.(error); !ok { - panic(fmt.Sprintf("assert: arguments: Error(%d) failed because object wasn't correct type: %v", index, args.Get(index))) + panic(fmt.Sprintf("assert: arguments: Error(%d) failed because object wasn't correct type: %v", index, obj)) } return s } @@ -1181,32 +1214,38 @@ type tHelper interface { func assertOpts(expected, actual interface{}) (expectedFmt, actualFmt string) { expectedOpts := reflect.ValueOf(expected) actualOpts := reflect.ValueOf(actual) + + var expectedFuncs []*runtime.Func var expectedNames []string for i := 0; i < expectedOpts.Len(); i++ { - expectedNames = append(expectedNames, funcName(expectedOpts.Index(i).Interface())) + f := runtimeFunc(expectedOpts.Index(i).Interface()) + expectedFuncs = append(expectedFuncs, f) + expectedNames = append(expectedNames, funcName(f)) } + var actualFuncs []*runtime.Func var actualNames []string for i := 0; i < actualOpts.Len(); i++ { - actualNames = append(actualNames, funcName(actualOpts.Index(i).Interface())) + f := runtimeFunc(actualOpts.Index(i).Interface()) + actualFuncs = append(actualFuncs, f) + actualNames = append(actualNames, funcName(f)) } - if !assert.ObjectsAreEqual(expectedNames, actualNames) { + + if expectedOpts.Len() != actualOpts.Len() { expectedFmt = fmt.Sprintf("%v", expectedNames) actualFmt = fmt.Sprintf("%v", actualNames) return } for i := 0; i < expectedOpts.Len(); i++ { - expectedOpt := expectedOpts.Index(i).Interface() - actualOpt := actualOpts.Index(i).Interface() - - expectedFunc := expectedNames[i] - actualFunc := actualNames[i] - if expectedFunc != actualFunc { - expectedFmt = expectedFunc - actualFmt = actualFunc + if !isFuncSame(expectedFuncs[i], actualFuncs[i]) { + expectedFmt = expectedNames[i] + actualFmt = actualNames[i] return } + expectedOpt := expectedOpts.Index(i).Interface() + actualOpt := actualOpts.Index(i).Interface() + ot := reflect.TypeOf(expectedOpt) var expectedValues []reflect.Value var actualValues []reflect.Value @@ -1224,9 +1263,9 @@ func assertOpts(expected, actual interface{}) (expectedFmt, actualFmt string) { reflect.ValueOf(actualOpt).Call(actualValues) for i := 0; i < ot.NumIn(); i++ { - if !assert.ObjectsAreEqual(expectedValues[i].Interface(), actualValues[i].Interface()) { - expectedFmt = fmt.Sprintf("%s %+v", expectedNames[i], expectedValues[i].Interface()) - actualFmt = fmt.Sprintf("%s %+v", expectedNames[i], actualValues[i].Interface()) + if expectedArg, actualArg := expectedValues[i].Interface(), actualValues[i].Interface(); !assert.ObjectsAreEqual(expectedArg, actualArg) { + expectedFmt = fmt.Sprintf("%s(%T) -> %#v", expectedNames[i], expectedArg, expectedArg) + actualFmt = fmt.Sprintf("%s(%T) -> %#v", expectedNames[i], actualArg, actualArg) return } } @@ -1235,7 +1274,25 @@ func assertOpts(expected, actual interface{}) (expectedFmt, actualFmt string) { return "", "" } -func funcName(opt interface{}) string { - n := runtime.FuncForPC(reflect.ValueOf(opt).Pointer()).Name() - return strings.TrimSuffix(path.Base(n), path.Ext(n)) +func runtimeFunc(opt interface{}) *runtime.Func { + return runtime.FuncForPC(reflect.ValueOf(opt).Pointer()) +} + +func funcName(f *runtime.Func) string { + name := f.Name() + trimmed := strings.TrimSuffix(path.Base(name), path.Ext(name)) + splitted := strings.Split(trimmed, ".") + + if len(splitted) == 0 { + return trimmed + } + + return splitted[len(splitted)-1] +} + +func isFuncSame(f1, f2 *runtime.Func) bool { + f1File, f1Loc := f1.FileLine(f1.Entry()) + f2File, f2Loc := f2.FileLine(f2.Entry()) + + return f1File == f2File && f1Loc == f2Loc } diff --git a/vendor/github.com/stretchr/testify/require/doc.go b/vendor/github.com/stretchr/testify/require/doc.go index 96843472..c8e3f94a 100644 --- a/vendor/github.com/stretchr/testify/require/doc.go +++ b/vendor/github.com/stretchr/testify/require/doc.go @@ -23,6 +23,8 @@ // // The `require` package have same global functions as in the `assert` package, // but instead of returning a boolean result they call `t.FailNow()`. +// A consequence of this is that it must be called from the goroutine running +// the test function, not from other goroutines created during the test. // // Every assertion function also takes an optional string message as the final argument, // allowing custom error messages to be appended to the message the assertion method outputs. diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go index 506a82f8..2d02f9bc 100644 --- a/vendor/github.com/stretchr/testify/require/require.go +++ b/vendor/github.com/stretchr/testify/require/require.go @@ -34,9 +34,9 @@ func Conditionf(t TestingT, comp assert.Comparison, msg string, args ...interfac // Contains asserts that the specified string, list(array, slice...) or map contains the // specified substring or element. // -// assert.Contains(t, "Hello World", "World") -// assert.Contains(t, ["Hello", "World"], "World") -// assert.Contains(t, {"Hello": "World"}, "Hello") +// require.Contains(t, "Hello World", "World") +// require.Contains(t, ["Hello", "World"], "World") +// require.Contains(t, {"Hello": "World"}, "Hello") func Contains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -50,9 +50,9 @@ func Contains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...int // Containsf asserts that the specified string, list(array, slice...) or map contains the // specified substring or element. // -// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted") -// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") -// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") +// require.Containsf(t, "Hello World", "World", "error message %s", "formatted") +// require.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") +// require.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -91,7 +91,7 @@ func DirExistsf(t TestingT, path string, msg string, args ...interface{}) { // listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, // the number of appearances of each of them in both lists should match. // -// assert.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2]) +// require.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2]) func ElementsMatch(t TestingT, listA interface{}, listB interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -106,7 +106,7 @@ func ElementsMatch(t TestingT, listA interface{}, listB interface{}, msgAndArgs // listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, // the number of appearances of each of them in both lists should match. // -// assert.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") +// require.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -117,10 +117,19 @@ func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string t.FailNow() } -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Empty asserts that the given value is "empty". // -// assert.Empty(t, obj) +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". +// +// require.Empty(t, obj) +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -131,10 +140,19 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { t.FailNow() } -// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Emptyf asserts that the given value is "empty". // -// assert.Emptyf(t, obj, "error message %s", "formatted") +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". +// +// require.Emptyf(t, obj, "error message %s", "formatted") +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -147,7 +165,7 @@ func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) { // Equal asserts that two objects are equal. // -// assert.Equal(t, 123, 123) +// require.Equal(t, 123, 123) // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). Function equality @@ -166,7 +184,7 @@ func Equal(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...i // and that it is equal to the provided error. // // actualObj, err := SomeFunction() -// assert.EqualError(t, err, expectedErrorString) +// require.EqualError(t, err, expectedErrorString) func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -181,7 +199,7 @@ func EqualError(t TestingT, theError error, errString string, msgAndArgs ...inte // and that it is equal to the provided error. // // actualObj, err := SomeFunction() -// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") +// require.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -200,8 +218,8 @@ func EqualErrorf(t TestingT, theError error, errString string, msg string, args // Exported int // notExported int // } -// assert.EqualExportedValues(t, S{1, 2}, S{1, 3}) => true -// assert.EqualExportedValues(t, S{1, 2}, S{2, 3}) => false +// require.EqualExportedValues(t, S{1, 2}, S{1, 3}) => true +// require.EqualExportedValues(t, S{1, 2}, S{2, 3}) => false func EqualExportedValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -220,8 +238,8 @@ func EqualExportedValues(t TestingT, expected interface{}, actual interface{}, m // Exported int // notExported int // } -// assert.EqualExportedValuesf(t, S{1, 2}, S{1, 3}, "error message %s", "formatted") => true -// assert.EqualExportedValuesf(t, S{1, 2}, S{2, 3}, "error message %s", "formatted") => false +// require.EqualExportedValuesf(t, S{1, 2}, S{1, 3}, "error message %s", "formatted") => true +// require.EqualExportedValuesf(t, S{1, 2}, S{2, 3}, "error message %s", "formatted") => false func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -232,10 +250,10 @@ func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, t.FailNow() } -// EqualValues asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValues asserts that two objects are equal or convertible to the larger +// type and equal. // -// assert.EqualValues(t, uint32(123), int32(123)) +// require.EqualValues(t, uint32(123), int32(123)) func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -246,10 +264,10 @@ func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArg t.FailNow() } -// EqualValuesf asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValuesf asserts that two objects are equal or convertible to the larger +// type and equal. // -// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") +// require.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -262,7 +280,7 @@ func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg stri // Equalf asserts that two objects are equal. // -// assert.Equalf(t, 123, 123, "error message %s", "formatted") +// require.Equalf(t, 123, 123, "error message %s", "formatted") // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). Function equality @@ -279,10 +297,8 @@ func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, ar // Error asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if assert.Error(t, err) { -// assert.Equal(t, expectedError, err) -// } +// actualObj, err := SomeFunction() +// require.Error(t, err) func Error(t TestingT, err error, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -321,7 +337,7 @@ func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...int // and that the error contains the specified substring. // // actualObj, err := SomeFunction() -// assert.ErrorContains(t, err, expectedErrorSubString) +// require.ErrorContains(t, err, expectedErrorSubString) func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -336,7 +352,7 @@ func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...in // and that the error contains the specified substring. // // actualObj, err := SomeFunction() -// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted") +// require.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted") func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -373,10 +389,8 @@ func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface // Errorf asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if assert.Errorf(t, err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) -// } +// actualObj, err := SomeFunction() +// require.Errorf(t, err, "error message %s", "formatted") func Errorf(t TestingT, err error, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -390,7 +404,7 @@ func Errorf(t TestingT, err error, msg string, args ...interface{}) { // Eventually asserts that given condition will be met in waitFor time, // periodically checking target function each tick. // -// assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond) +// require.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond) func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -415,10 +429,10 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t // time.Sleep(8*time.Second) // externalValue = true // }() -// assert.EventuallyWithT(t, func(c *assert.CollectT) { +// require.EventuallyWithT(t, func(c *require.CollectT) { // // add assertions as needed; any assertion failure will fail the current tick -// assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// require.True(c, externalValue, "expected 'externalValue' to be true") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func EventuallyWithT(t TestingT, condition func(collect *assert.CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -443,10 +457,10 @@ func EventuallyWithT(t TestingT, condition func(collect *assert.CollectT), waitF // time.Sleep(8*time.Second) // externalValue = true // }() -// assert.EventuallyWithTf(t, func(c *assert.CollectT, "error message %s", "formatted") { +// require.EventuallyWithTf(t, func(c *require.CollectT, "error message %s", "formatted") { // // add assertions as needed; any assertion failure will fail the current tick -// assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// require.True(c, externalValue, "expected 'externalValue' to be true") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func EventuallyWithTf(t TestingT, condition func(collect *assert.CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -460,7 +474,7 @@ func EventuallyWithTf(t TestingT, condition func(collect *assert.CollectT), wait // Eventuallyf asserts that given condition will be met in waitFor time, // periodically checking target function each tick. // -// assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +// require.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -473,7 +487,7 @@ func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick // Exactly asserts that two objects are equal in value and type. // -// assert.Exactly(t, int32(123), int64(123)) +// require.Exactly(t, int32(123), int64(123)) func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -486,7 +500,7 @@ func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs .. // Exactlyf asserts that two objects are equal in value and type. // -// assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted") +// require.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted") func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -543,7 +557,7 @@ func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) { // False asserts that the specified value is false. // -// assert.False(t, myBool) +// require.False(t, myBool) func False(t TestingT, value bool, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -556,7 +570,7 @@ func False(t TestingT, value bool, msgAndArgs ...interface{}) { // Falsef asserts that the specified value is false. // -// assert.Falsef(t, myBool, "error message %s", "formatted") +// require.Falsef(t, myBool, "error message %s", "formatted") func Falsef(t TestingT, value bool, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -593,9 +607,9 @@ func FileExistsf(t TestingT, path string, msg string, args ...interface{}) { // Greater asserts that the first element is greater than the second // -// assert.Greater(t, 2, 1) -// assert.Greater(t, float64(2), float64(1)) -// assert.Greater(t, "b", "a") +// require.Greater(t, 2, 1) +// require.Greater(t, float64(2), float64(1)) +// require.Greater(t, "b", "a") func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -608,10 +622,10 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface // GreaterOrEqual asserts that the first element is greater than or equal to the second // -// assert.GreaterOrEqual(t, 2, 1) -// assert.GreaterOrEqual(t, 2, 2) -// assert.GreaterOrEqual(t, "b", "a") -// assert.GreaterOrEqual(t, "b", "b") +// require.GreaterOrEqual(t, 2, 1) +// require.GreaterOrEqual(t, 2, 2) +// require.GreaterOrEqual(t, "b", "a") +// require.GreaterOrEqual(t, "b", "b") func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -624,10 +638,10 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in // GreaterOrEqualf asserts that the first element is greater than or equal to the second // -// assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted") -// assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted") -// assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted") -// assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted") +// require.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted") +// require.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted") +// require.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted") +// require.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted") func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -640,9 +654,9 @@ func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, arg // Greaterf asserts that the first element is greater than the second // -// assert.Greaterf(t, 2, 1, "error message %s", "formatted") -// assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted") -// assert.Greaterf(t, "b", "a", "error message %s", "formatted") +// require.Greaterf(t, 2, 1, "error message %s", "formatted") +// require.Greaterf(t, float64(2), float64(1), "error message %s", "formatted") +// require.Greaterf(t, "b", "a", "error message %s", "formatted") func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -656,7 +670,7 @@ func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...in // HTTPBodyContains asserts that a specified handler returns a // body that contains a string. // -// assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") +// require.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { @@ -672,7 +686,7 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url s // HTTPBodyContainsf asserts that a specified handler returns a // body that contains a string. // -// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// require.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { @@ -688,7 +702,7 @@ func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url // HTTPBodyNotContains asserts that a specified handler returns a // body that does not contain a string. // -// assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") +// require.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { @@ -704,7 +718,7 @@ func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, ur // HTTPBodyNotContainsf asserts that a specified handler returns a // body that does not contain a string. // -// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// require.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { @@ -719,7 +733,7 @@ func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, u // HTTPError asserts that a specified handler returns an error status code. // -// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// require.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { @@ -734,7 +748,7 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string, // HTTPErrorf asserts that a specified handler returns an error status code. // -// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// require.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { @@ -749,7 +763,7 @@ func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, // HTTPRedirect asserts that a specified handler returns a redirect status code. // -// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// require.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { @@ -764,7 +778,7 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url strin // HTTPRedirectf asserts that a specified handler returns a redirect status code. // -// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// require.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { @@ -779,7 +793,7 @@ func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url stri // HTTPStatusCode asserts that a specified handler returns a specified status code. // -// assert.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501) +// require.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501) // // Returns whether the assertion was successful (true) or not (false). func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) { @@ -794,7 +808,7 @@ func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method string, url str // HTTPStatusCodef asserts that a specified handler returns a specified status code. // -// assert.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") +// require.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) { @@ -809,7 +823,7 @@ func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url st // HTTPSuccess asserts that a specified handler returns a success status code. // -// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) +// require.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) // // Returns whether the assertion was successful (true) or not (false). func HTTPSuccess(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { @@ -824,7 +838,7 @@ func HTTPSuccess(t TestingT, handler http.HandlerFunc, method string, url string // HTTPSuccessf asserts that a specified handler returns a success status code. // -// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") +// require.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { @@ -839,7 +853,7 @@ func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url strin // Implements asserts that an object is implemented by the specified interface. // -// assert.Implements(t, (*MyInterface)(nil), new(MyObject)) +// require.Implements(t, (*MyInterface)(nil), new(MyObject)) func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -852,7 +866,7 @@ func Implements(t TestingT, interfaceObject interface{}, object interface{}, msg // Implementsf asserts that an object is implemented by the specified interface. // -// assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +// require.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -865,7 +879,7 @@ func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, ms // InDelta asserts that the two numerals are within delta of each other. // -// assert.InDelta(t, math.Pi, 22/7.0, 0.01) +// require.InDelta(t, math.Pi, 22/7.0, 0.01) func InDelta(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -922,7 +936,7 @@ func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta f // InDeltaf asserts that the two numerals are within delta of each other. // -// assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted") +// require.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted") func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -979,9 +993,9 @@ func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon fl // IsDecreasing asserts that the collection is decreasing // -// assert.IsDecreasing(t, []int{2, 1, 0}) -// assert.IsDecreasing(t, []float{2, 1}) -// assert.IsDecreasing(t, []string{"b", "a"}) +// require.IsDecreasing(t, []int{2, 1, 0}) +// require.IsDecreasing(t, []float{2, 1}) +// require.IsDecreasing(t, []string{"b", "a"}) func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -994,9 +1008,9 @@ func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { // IsDecreasingf asserts that the collection is decreasing // -// assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted") -// assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted") -// assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted") +// require.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted") +// require.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted") +// require.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted") func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1009,9 +1023,9 @@ func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface // IsIncreasing asserts that the collection is increasing // -// assert.IsIncreasing(t, []int{1, 2, 3}) -// assert.IsIncreasing(t, []float{1, 2}) -// assert.IsIncreasing(t, []string{"a", "b"}) +// require.IsIncreasing(t, []int{1, 2, 3}) +// require.IsIncreasing(t, []float{1, 2}) +// require.IsIncreasing(t, []string{"a", "b"}) func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1024,9 +1038,9 @@ func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { // IsIncreasingf asserts that the collection is increasing // -// assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted") -// assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted") -// assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted") +// require.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted") +// require.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted") +// require.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted") func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1039,9 +1053,9 @@ func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface // IsNonDecreasing asserts that the collection is not decreasing // -// assert.IsNonDecreasing(t, []int{1, 1, 2}) -// assert.IsNonDecreasing(t, []float{1, 2}) -// assert.IsNonDecreasing(t, []string{"a", "b"}) +// require.IsNonDecreasing(t, []int{1, 1, 2}) +// require.IsNonDecreasing(t, []float{1, 2}) +// require.IsNonDecreasing(t, []string{"a", "b"}) func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1054,9 +1068,9 @@ func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) // IsNonDecreasingf asserts that the collection is not decreasing // -// assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted") -// assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted") -// assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted") +// require.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted") +// require.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted") +// require.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted") func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1069,9 +1083,9 @@ func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interf // IsNonIncreasing asserts that the collection is not increasing // -// assert.IsNonIncreasing(t, []int{2, 1, 1}) -// assert.IsNonIncreasing(t, []float{2, 1}) -// assert.IsNonIncreasing(t, []string{"b", "a"}) +// require.IsNonIncreasing(t, []int{2, 1, 1}) +// require.IsNonIncreasing(t, []float{2, 1}) +// require.IsNonIncreasing(t, []string{"b", "a"}) func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1084,9 +1098,9 @@ func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) // IsNonIncreasingf asserts that the collection is not increasing // -// assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted") -// assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted") -// assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted") +// require.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted") +// require.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted") +// require.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted") func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1097,7 +1111,35 @@ func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interf t.FailNow() } +// IsNotType asserts that the specified objects are not of the same type. +// +// require.IsNotType(t, &NotMyStruct{}, &MyStruct{}) +func IsNotType(t TestingT, theType interface{}, object interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.IsNotType(t, theType, object, msgAndArgs...) { + return + } + t.FailNow() +} + +// IsNotTypef asserts that the specified objects are not of the same type. +// +// require.IsNotTypef(t, &NotMyStruct{}, &MyStruct{}, "error message %s", "formatted") +func IsNotTypef(t TestingT, theType interface{}, object interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.IsNotTypef(t, theType, object, msg, args...) { + return + } + t.FailNow() +} + // IsType asserts that the specified objects are of the same type. +// +// require.IsType(t, &MyStruct{}, &MyStruct{}) func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1109,6 +1151,8 @@ func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs } // IsTypef asserts that the specified objects are of the same type. +// +// require.IsTypef(t, &MyStruct{}, &MyStruct{}, "error message %s", "formatted") func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1121,7 +1165,7 @@ func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg strin // JSONEq asserts that two JSON strings are equivalent. // -// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) +// require.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1134,7 +1178,7 @@ func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{ // JSONEqf asserts that two JSON strings are equivalent. // -// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") +// require.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1148,7 +1192,7 @@ func JSONEqf(t TestingT, expected string, actual string, msg string, args ...int // Len asserts that the specified object has specific length. // Len also fails if the object has a type that len() not accept. // -// assert.Len(t, mySlice, 3) +// require.Len(t, mySlice, 3) func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1162,7 +1206,7 @@ func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) // Lenf asserts that the specified object has specific length. // Lenf also fails if the object has a type that len() not accept. // -// assert.Lenf(t, mySlice, 3, "error message %s", "formatted") +// require.Lenf(t, mySlice, 3, "error message %s", "formatted") func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1175,9 +1219,9 @@ func Lenf(t TestingT, object interface{}, length int, msg string, args ...interf // Less asserts that the first element is less than the second // -// assert.Less(t, 1, 2) -// assert.Less(t, float64(1), float64(2)) -// assert.Less(t, "a", "b") +// require.Less(t, 1, 2) +// require.Less(t, float64(1), float64(2)) +// require.Less(t, "a", "b") func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1190,10 +1234,10 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) // LessOrEqual asserts that the first element is less than or equal to the second // -// assert.LessOrEqual(t, 1, 2) -// assert.LessOrEqual(t, 2, 2) -// assert.LessOrEqual(t, "a", "b") -// assert.LessOrEqual(t, "b", "b") +// require.LessOrEqual(t, 1, 2) +// require.LessOrEqual(t, 2, 2) +// require.LessOrEqual(t, "a", "b") +// require.LessOrEqual(t, "b", "b") func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1206,10 +1250,10 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter // LessOrEqualf asserts that the first element is less than or equal to the second // -// assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted") -// assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted") -// assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted") -// assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted") +// require.LessOrEqualf(t, 1, 2, "error message %s", "formatted") +// require.LessOrEqualf(t, 2, 2, "error message %s", "formatted") +// require.LessOrEqualf(t, "a", "b", "error message %s", "formatted") +// require.LessOrEqualf(t, "b", "b", "error message %s", "formatted") func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1222,9 +1266,9 @@ func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args . // Lessf asserts that the first element is less than the second // -// assert.Lessf(t, 1, 2, "error message %s", "formatted") -// assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted") -// assert.Lessf(t, "a", "b", "error message %s", "formatted") +// require.Lessf(t, 1, 2, "error message %s", "formatted") +// require.Lessf(t, float64(1), float64(2), "error message %s", "formatted") +// require.Lessf(t, "a", "b", "error message %s", "formatted") func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1237,8 +1281,8 @@ func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...inter // Negative asserts that the specified element is negative // -// assert.Negative(t, -1) -// assert.Negative(t, -1.23) +// require.Negative(t, -1) +// require.Negative(t, -1.23) func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1251,8 +1295,8 @@ func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) { // Negativef asserts that the specified element is negative // -// assert.Negativef(t, -1, "error message %s", "formatted") -// assert.Negativef(t, -1.23, "error message %s", "formatted") +// require.Negativef(t, -1, "error message %s", "formatted") +// require.Negativef(t, -1.23, "error message %s", "formatted") func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1266,7 +1310,7 @@ func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) { // Never asserts that the given condition doesn't satisfy in waitFor time, // periodically checking the target function each tick. // -// assert.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond) +// require.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond) func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1280,7 +1324,7 @@ func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.D // Neverf asserts that the given condition doesn't satisfy in waitFor time, // periodically checking the target function each tick. // -// assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +// require.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1293,7 +1337,7 @@ func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time. // Nil asserts that the specified object is nil. // -// assert.Nil(t, err) +// require.Nil(t, err) func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1306,7 +1350,7 @@ func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) { // Nilf asserts that the specified object is nil. // -// assert.Nilf(t, err, "error message %s", "formatted") +// require.Nilf(t, err, "error message %s", "formatted") func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1344,8 +1388,8 @@ func NoDirExistsf(t TestingT, path string, msg string, args ...interface{}) { // NoError asserts that a function returned no error (i.e. `nil`). // // actualObj, err := SomeFunction() -// if assert.NoError(t, err) { -// assert.Equal(t, expectedObj, actualObj) +// if require.NoError(t, err) { +// require.Equal(t, expectedObj, actualObj) // } func NoError(t TestingT, err error, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { @@ -1360,8 +1404,8 @@ func NoError(t TestingT, err error, msgAndArgs ...interface{}) { // NoErrorf asserts that a function returned no error (i.e. `nil`). // // actualObj, err := SomeFunction() -// if assert.NoErrorf(t, err, "error message %s", "formatted") { -// assert.Equal(t, expectedObj, actualObj) +// if require.NoErrorf(t, err, "error message %s", "formatted") { +// require.Equal(t, expectedObj, actualObj) // } func NoErrorf(t TestingT, err error, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { @@ -1400,9 +1444,9 @@ func NoFileExistsf(t TestingT, path string, msg string, args ...interface{}) { // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the // specified substring or element. // -// assert.NotContains(t, "Hello World", "Earth") -// assert.NotContains(t, ["Hello", "World"], "Earth") -// assert.NotContains(t, {"Hello": "World"}, "Earth") +// require.NotContains(t, "Hello World", "Earth") +// require.NotContains(t, ["Hello", "World"], "Earth") +// require.NotContains(t, {"Hello": "World"}, "Earth") func NotContains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1416,9 +1460,9 @@ func NotContains(t TestingT, s interface{}, contains interface{}, msgAndArgs ... // NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the // specified substring or element. // -// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted") -// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") -// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") +// require.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted") +// require.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") +// require.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1429,11 +1473,50 @@ func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, a t.FailNow() } -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. // -// if assert.NotEmpty(t, obj) { -// assert.Equal(t, "two", obj[1]) +// require.NotElementsMatch(t, [1, 1, 2, 3], [1, 1, 2, 3]) -> false +// +// require.NotElementsMatch(t, [1, 1, 2, 3], [1, 2, 3]) -> true +// +// require.NotElementsMatch(t, [1, 2, 3], [1, 2, 4]) -> true +func NotElementsMatch(t TestingT, listA interface{}, listB interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotElementsMatch(t, listA, listB, msgAndArgs...) { + return + } + t.FailNow() +} + +// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// require.NotElementsMatchf(t, [1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false +// +// require.NotElementsMatchf(t, [1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true +// +// require.NotElementsMatchf(t, [1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true +func NotElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotElementsMatchf(t, listA, listB, msg, args...) { + return + } + t.FailNow() +} + +// NotEmpty asserts that the specified object is NOT [Empty]. +// +// if require.NotEmpty(t, obj) { +// require.Equal(t, "two", obj[1]) // } func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { @@ -1445,11 +1528,10 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) { t.FailNow() } -// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmptyf asserts that the specified object is NOT [Empty]. // -// if assert.NotEmptyf(t, obj, "error message %s", "formatted") { -// assert.Equal(t, "two", obj[1]) +// if require.NotEmptyf(t, obj, "error message %s", "formatted") { +// require.Equal(t, "two", obj[1]) // } func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { @@ -1463,7 +1545,7 @@ func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) // NotEqual asserts that the specified values are NOT equal. // -// assert.NotEqual(t, obj1, obj2) +// require.NotEqual(t, obj1, obj2) // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). @@ -1479,7 +1561,7 @@ func NotEqual(t TestingT, expected interface{}, actual interface{}, msgAndArgs . // NotEqualValues asserts that two objects are not equal even when converted to the same type // -// assert.NotEqualValues(t, obj1, obj2) +// require.NotEqualValues(t, obj1, obj2) func NotEqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1492,7 +1574,7 @@ func NotEqualValues(t TestingT, expected interface{}, actual interface{}, msgAnd // NotEqualValuesf asserts that two objects are not equal even when converted to the same type // -// assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted") +// require.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted") func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1505,7 +1587,7 @@ func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg s // NotEqualf asserts that the specified values are NOT equal. // -// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted") +// require.NotEqualf(t, obj1, obj2, "error message %s", "formatted") // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). @@ -1519,7 +1601,31 @@ func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, t.FailNow() } -// NotErrorIs asserts that at none of the errors in err's chain matches target. +// NotErrorAs asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func NotErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotErrorAs(t, err, target, msgAndArgs...) { + return + } + t.FailNow() +} + +// NotErrorAsf asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func NotErrorAsf(t TestingT, err error, target interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotErrorAsf(t, err, target, msg, args...) { + return + } + t.FailNow() +} + +// NotErrorIs asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func NotErrorIs(t TestingT, err error, target error, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { @@ -1531,7 +1637,7 @@ func NotErrorIs(t TestingT, err error, target error, msgAndArgs ...interface{}) t.FailNow() } -// NotErrorIsf asserts that at none of the errors in err's chain matches target. +// NotErrorIsf asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { @@ -1545,7 +1651,7 @@ func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interf // NotImplements asserts that an object does not implement the specified interface. // -// assert.NotImplements(t, (*MyInterface)(nil), new(MyObject)) +// require.NotImplements(t, (*MyInterface)(nil), new(MyObject)) func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1558,7 +1664,7 @@ func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, // NotImplementsf asserts that an object does not implement the specified interface. // -// assert.NotImplementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +// require.NotImplementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") func NotImplementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1571,7 +1677,7 @@ func NotImplementsf(t TestingT, interfaceObject interface{}, object interface{}, // NotNil asserts that the specified object is not nil. // -// assert.NotNil(t, err) +// require.NotNil(t, err) func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1584,7 +1690,7 @@ func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) { // NotNilf asserts that the specified object is not nil. // -// assert.NotNilf(t, err, "error message %s", "formatted") +// require.NotNilf(t, err, "error message %s", "formatted") func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1597,7 +1703,7 @@ func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) { // NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. // -// assert.NotPanics(t, func(){ RemainCalm() }) +// require.NotPanics(t, func(){ RemainCalm() }) func NotPanics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1610,7 +1716,7 @@ func NotPanics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { // NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. // -// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") +// require.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") func NotPanicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1623,8 +1729,8 @@ func NotPanicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interfac // NotRegexp asserts that a specified regexp does not match a string. // -// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") -// assert.NotRegexp(t, "^start", "it's not starting") +// require.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") +// require.NotRegexp(t, "^start", "it's not starting") func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1637,8 +1743,8 @@ func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interf // NotRegexpf asserts that a specified regexp does not match a string. // -// assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") -// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") +// require.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") +// require.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1651,7 +1757,7 @@ func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args .. // NotSame asserts that two pointers do not reference the same object. // -// assert.NotSame(t, ptr1, ptr2) +// require.NotSame(t, ptr1, ptr2) // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -1667,7 +1773,7 @@ func NotSame(t TestingT, expected interface{}, actual interface{}, msgAndArgs .. // NotSamef asserts that two pointers do not reference the same object. // -// assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted") +// require.NotSamef(t, ptr1, ptr2, "error message %s", "formatted") // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -1681,12 +1787,15 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, t.FailNow() } -// NotSubset asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubset asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // -// assert.NotSubset(t, [1, 3, 4], [1, 2]) -// assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) +// require.NotSubset(t, [1, 3, 4], [1, 2]) +// require.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) +// require.NotSubset(t, [1, 3, 4], {1: "one", 2: "two"}) +// require.NotSubset(t, {"x": 1, "y": 2}, ["z"]) func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1697,12 +1806,15 @@ func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...i t.FailNow() } -// NotSubsetf asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubsetf asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // -// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") -// assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") +// require.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") +// require.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") +// require.NotSubsetf(t, [1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted") +// require.NotSubsetf(t, {"x": 1, "y": 2}, ["z"], "error message %s", "formatted") func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1737,7 +1849,7 @@ func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) { // Panics asserts that the code inside the specified PanicTestFunc panics. // -// assert.Panics(t, func(){ GoCrazy() }) +// require.Panics(t, func(){ GoCrazy() }) func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1752,7 +1864,7 @@ func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { // panics, and that the recovered panic value is an error that satisfies the // EqualError comparison. // -// assert.PanicsWithError(t, "crazy error", func(){ GoCrazy() }) +// require.PanicsWithError(t, "crazy error", func(){ GoCrazy() }) func PanicsWithError(t TestingT, errString string, f assert.PanicTestFunc, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1767,7 +1879,7 @@ func PanicsWithError(t TestingT, errString string, f assert.PanicTestFunc, msgAn // panics, and that the recovered panic value is an error that satisfies the // EqualError comparison. // -// assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +// require.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") func PanicsWithErrorf(t TestingT, errString string, f assert.PanicTestFunc, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1781,7 +1893,7 @@ func PanicsWithErrorf(t TestingT, errString string, f assert.PanicTestFunc, msg // PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that // the recovered panic value equals the expected panic value. // -// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) +// require.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) func PanicsWithValue(t TestingT, expected interface{}, f assert.PanicTestFunc, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1795,7 +1907,7 @@ func PanicsWithValue(t TestingT, expected interface{}, f assert.PanicTestFunc, m // PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that // the recovered panic value equals the expected panic value. // -// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +// require.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") func PanicsWithValuef(t TestingT, expected interface{}, f assert.PanicTestFunc, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1808,7 +1920,7 @@ func PanicsWithValuef(t TestingT, expected interface{}, f assert.PanicTestFunc, // Panicsf asserts that the code inside the specified PanicTestFunc panics. // -// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") +// require.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") func Panicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1821,8 +1933,8 @@ func Panicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{} // Positive asserts that the specified element is positive // -// assert.Positive(t, 1) -// assert.Positive(t, 1.23) +// require.Positive(t, 1) +// require.Positive(t, 1.23) func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1835,8 +1947,8 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) { // Positivef asserts that the specified element is positive // -// assert.Positivef(t, 1, "error message %s", "formatted") -// assert.Positivef(t, 1.23, "error message %s", "formatted") +// require.Positivef(t, 1, "error message %s", "formatted") +// require.Positivef(t, 1.23, "error message %s", "formatted") func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1849,8 +1961,8 @@ func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) { // Regexp asserts that a specified regexp matches a string. // -// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") -// assert.Regexp(t, "start...$", "it's not starting") +// require.Regexp(t, regexp.MustCompile("start"), "it's starting") +// require.Regexp(t, "start...$", "it's not starting") func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1863,8 +1975,8 @@ func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface // Regexpf asserts that a specified regexp matches a string. // -// assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") -// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") +// require.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") +// require.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1877,7 +1989,7 @@ func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...in // Same asserts that two pointers reference the same object. // -// assert.Same(t, ptr1, ptr2) +// require.Same(t, ptr1, ptr2) // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -1893,7 +2005,7 @@ func Same(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...in // Samef asserts that two pointers reference the same object. // -// assert.Samef(t, ptr1, ptr2, "error message %s", "formatted") +// require.Samef(t, ptr1, ptr2, "error message %s", "formatted") // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -1907,11 +2019,15 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg t.FailNow() } -// Subset asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subset asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // -// assert.Subset(t, [1, 2, 3], [1, 2]) -// assert.Subset(t, {"x": 1, "y": 2}, {"x": 1}) +// require.Subset(t, [1, 2, 3], [1, 2]) +// require.Subset(t, {"x": 1, "y": 2}, {"x": 1}) +// require.Subset(t, [1, 2, 3], {1: "one", 2: "two"}) +// require.Subset(t, {"x": 1, "y": 2}, ["x"]) func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1922,11 +2038,15 @@ func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...inte t.FailNow() } -// Subsetf asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subsetf asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // -// assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") -// assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") +// require.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") +// require.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") +// require.Subsetf(t, [1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted") +// require.Subsetf(t, {"x": 1, "y": 2}, ["x"], "error message %s", "formatted") func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1939,7 +2059,7 @@ func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args // True asserts that the specified value is true. // -// assert.True(t, myBool) +// require.True(t, myBool) func True(t TestingT, value bool, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1952,7 +2072,7 @@ func True(t TestingT, value bool, msgAndArgs ...interface{}) { // Truef asserts that the specified value is true. // -// assert.Truef(t, myBool, "error message %s", "formatted") +// require.Truef(t, myBool, "error message %s", "formatted") func Truef(t TestingT, value bool, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1965,7 +2085,7 @@ func Truef(t TestingT, value bool, msg string, args ...interface{}) { // WithinDuration asserts that the two times are within duration delta of each other. // -// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second) +// require.WithinDuration(t, time.Now(), time.Now(), 10*time.Second) func WithinDuration(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1978,7 +2098,7 @@ func WithinDuration(t TestingT, expected time.Time, actual time.Time, delta time // WithinDurationf asserts that the two times are within duration delta of each other. // -// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") +// require.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1991,7 +2111,7 @@ func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta tim // WithinRange asserts that a time is within a time range (inclusive). // -// assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) +// require.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) func WithinRange(t TestingT, actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -2004,7 +2124,7 @@ func WithinRange(t TestingT, actual time.Time, start time.Time, end time.Time, m // WithinRangef asserts that a time is within a time range (inclusive). // -// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") +// require.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") func WithinRangef(t TestingT, actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/require/require.go.tmpl b/vendor/github.com/stretchr/testify/require/require.go.tmpl index 55e42dde..8b328368 100644 --- a/vendor/github.com/stretchr/testify/require/require.go.tmpl +++ b/vendor/github.com/stretchr/testify/require/require.go.tmpl @@ -1,4 +1,4 @@ -{{.Comment}} +{{ replace .Comment "assert." "require."}} func {{.DocInfo.Name}}(t TestingT, {{.Params}}) { if h, ok := t.(tHelper); ok { h.Helper() } if assert.{{.DocInfo.Name}}(t, {{.ForwardedParams}}) { return } diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go index eee8310a..e6f7e944 100644 --- a/vendor/github.com/stretchr/testify/require/require_forward.go +++ b/vendor/github.com/stretchr/testify/require/require_forward.go @@ -93,10 +93,19 @@ func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg st ElementsMatchf(a.t, listA, listB, msg, args...) } -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Empty asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // a.Empty(obj) +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -104,10 +113,19 @@ func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) { Empty(a.t, object, msgAndArgs...) } -// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Emptyf asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // a.Emptyf(obj, "error message %s", "formatted") +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -187,8 +205,8 @@ func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface EqualExportedValuesf(a.t, expected, actual, msg, args...) } -// EqualValues asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValues asserts that two objects are equal or convertible to the larger +// type and equal. // // a.EqualValues(uint32(123), int32(123)) func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { @@ -198,8 +216,8 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn EqualValues(a.t, expected, actual, msgAndArgs...) } -// EqualValuesf asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValuesf asserts that two objects are equal or convertible to the larger +// type and equal. // // a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) { @@ -225,10 +243,8 @@ func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string // Error asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if a.Error(err) { -// assert.Equal(t, expectedError, err) -// } +// actualObj, err := SomeFunction() +// a.Error(err) func (a *Assertions) Error(err error, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -298,10 +314,8 @@ func (a *Assertions) ErrorIsf(err error, target error, msg string, args ...inter // Errorf asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if a.Errorf(err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) -// } +// actualObj, err := SomeFunction() +// a.Errorf(err, "error message %s", "formatted") func (a *Assertions) Errorf(err error, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -337,7 +351,7 @@ func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, ti // a.EventuallyWithT(func(c *assert.CollectT) { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func (a *Assertions) EventuallyWithT(condition func(collect *assert.CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -362,7 +376,7 @@ func (a *Assertions) EventuallyWithT(condition func(collect *assert.CollectT), w // a.EventuallyWithTf(func(c *assert.CollectT, "error message %s", "formatted") { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func (a *Assertions) EventuallyWithTf(condition func(collect *assert.CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -869,7 +883,29 @@ func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...in IsNonIncreasingf(a.t, object, msg, args...) } +// IsNotType asserts that the specified objects are not of the same type. +// +// a.IsNotType(&NotMyStruct{}, &MyStruct{}) +func (a *Assertions) IsNotType(theType interface{}, object interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + IsNotType(a.t, theType, object, msgAndArgs...) +} + +// IsNotTypef asserts that the specified objects are not of the same type. +// +// a.IsNotTypef(&NotMyStruct{}, &MyStruct{}, "error message %s", "formatted") +func (a *Assertions) IsNotTypef(theType interface{}, object interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + IsNotTypef(a.t, theType, object, msg, args...) +} + // IsType asserts that the specified objects are of the same type. +// +// a.IsType(&MyStruct{}, &MyStruct{}) func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -878,6 +914,8 @@ func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAnd } // IsTypef asserts that the specified objects are of the same type. +// +// a.IsTypef(&MyStruct{}, &MyStruct{}, "error message %s", "formatted") func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1129,8 +1167,41 @@ func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg strin NotContainsf(a.t, s, contains, msg, args...) } -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// a.NotElementsMatch([1, 1, 2, 3], [1, 1, 2, 3]) -> false +// +// a.NotElementsMatch([1, 1, 2, 3], [1, 2, 3]) -> true +// +// a.NotElementsMatch([1, 2, 3], [1, 2, 4]) -> true +func (a *Assertions) NotElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotElementsMatch(a.t, listA, listB, msgAndArgs...) +} + +// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// a.NotElementsMatchf([1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false +// +// a.NotElementsMatchf([1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true +// +// a.NotElementsMatchf([1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true +func (a *Assertions) NotElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotElementsMatchf(a.t, listA, listB, msg, args...) +} + +// NotEmpty asserts that the specified object is NOT [Empty]. // // if a.NotEmpty(obj) { // assert.Equal(t, "two", obj[1]) @@ -1142,8 +1213,7 @@ func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) { NotEmpty(a.t, object, msgAndArgs...) } -// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmptyf asserts that the specified object is NOT [Empty]. // // if a.NotEmptyf(obj, "error message %s", "formatted") { // assert.Equal(t, "two", obj[1]) @@ -1201,7 +1271,25 @@ func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg str NotEqualf(a.t, expected, actual, msg, args...) } -// NotErrorIs asserts that at none of the errors in err's chain matches target. +// NotErrorAs asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func (a *Assertions) NotErrorAs(err error, target interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotErrorAs(a.t, err, target, msgAndArgs...) +} + +// NotErrorAsf asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func (a *Assertions) NotErrorAsf(err error, target interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotErrorAsf(a.t, err, target, msg, args...) +} + +// NotErrorIs asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { @@ -1210,7 +1298,7 @@ func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface NotErrorIs(a.t, err, target, msgAndArgs...) } -// NotErrorIsf asserts that at none of the errors in err's chain matches target. +// NotErrorIsf asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { @@ -1327,12 +1415,15 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri NotSamef(a.t, expected, actual, msg, args...) } -// NotSubset asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubset asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.NotSubset([1, 3, 4], [1, 2]) // a.NotSubset({"x": 1, "y": 2}, {"z": 3}) +// a.NotSubset([1, 3, 4], {1: "one", 2: "two"}) +// a.NotSubset({"x": 1, "y": 2}, ["z"]) func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1340,12 +1431,15 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs NotSubset(a.t, list, subset, msgAndArgs...) } -// NotSubsetf asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubsetf asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted") // a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") +// a.NotSubsetf([1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted") +// a.NotSubsetf({"x": 1, "y": 2}, ["z"], "error message %s", "formatted") func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1505,11 +1599,15 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, Samef(a.t, expected, actual, msg, args...) } -// Subset asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subset asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.Subset([1, 2, 3], [1, 2]) // a.Subset({"x": 1, "y": 2}, {"x": 1}) +// a.Subset([1, 2, 3], {1: "one", 2: "two"}) +// a.Subset({"x": 1, "y": 2}, ["x"]) func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1517,11 +1615,15 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ... Subset(a.t, list, subset, msgAndArgs...) } -// Subsetf asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subsetf asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted") // a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") +// a.Subsetf([1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted") +// a.Subsetf({"x": 1, "y": 2}, ["x"], "error message %s", "formatted") func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/require/requirements.go b/vendor/github.com/stretchr/testify/require/requirements.go index 91772dfe..6b7ce929 100644 --- a/vendor/github.com/stretchr/testify/require/requirements.go +++ b/vendor/github.com/stretchr/testify/require/requirements.go @@ -6,7 +6,7 @@ type TestingT interface { FailNow() } -type tHelper interface { +type tHelper = interface { Helper() } diff --git a/vendor/github.com/stretchr/testify/suite/doc.go b/vendor/github.com/stretchr/testify/suite/doc.go index 8d55a3aa..05a562f7 100644 --- a/vendor/github.com/stretchr/testify/suite/doc.go +++ b/vendor/github.com/stretchr/testify/suite/doc.go @@ -5,6 +5,8 @@ // or individual tests (depending on which interface(s) you // implement). // +// The suite package does not support parallel tests. See [issue 934]. +// // A testing suite is usually built by first extending the built-in // suite functionality from suite.Suite in testify. Alternatively, // you could reproduce that logic on your own if you wanted (you @@ -63,4 +65,6 @@ // func TestExampleTestSuite(t *testing.T) { // suite.Run(t, new(ExampleTestSuite)) // } +// +// [issue 934]: https://github.com/stretchr/testify/issues/934 package suite diff --git a/vendor/github.com/stretchr/testify/suite/stats.go b/vendor/github.com/stretchr/testify/suite/stats.go index 261da37f..be4ccd67 100644 --- a/vendor/github.com/stretchr/testify/suite/stats.go +++ b/vendor/github.com/stretchr/testify/suite/stats.go @@ -16,26 +16,30 @@ type TestInformation struct { } func newSuiteInformation() *SuiteInformation { - testStats := make(map[string]*TestInformation) - return &SuiteInformation{ - TestStats: testStats, + TestStats: make(map[string]*TestInformation), } } -func (s SuiteInformation) start(testName string) { +func (s *SuiteInformation) start(testName string) { + if s == nil { + return + } s.TestStats[testName] = &TestInformation{ TestName: testName, Start: time.Now(), } } -func (s SuiteInformation) end(testName string, passed bool) { +func (s *SuiteInformation) end(testName string, passed bool) { + if s == nil { + return + } s.TestStats[testName].End = time.Now() s.TestStats[testName].Passed = passed } -func (s SuiteInformation) Passed() bool { +func (s *SuiteInformation) Passed() bool { for _, stats := range s.TestStats { if !stats.Passed { return false diff --git a/vendor/github.com/stretchr/testify/suite/suite.go b/vendor/github.com/stretchr/testify/suite/suite.go index 18443a91..1b19be3b 100644 --- a/vendor/github.com/stretchr/testify/suite/suite.go +++ b/vendor/github.com/stretchr/testify/suite/suite.go @@ -7,6 +7,7 @@ import ( "reflect" "regexp" "runtime/debug" + "strings" "sync" "testing" "time" @@ -15,7 +16,6 @@ import ( "github.com/stretchr/testify/require" ) -var allTestsFilter = func(_, _ string) (bool, error) { return true, nil } var matchMethod = flag.String("testify.m", "", "regular expression to select tests of the testify suite to run") // Suite is a basic testing suite with methods for storing and @@ -116,6 +116,11 @@ func (suite *Suite) Run(name string, subtest func()) bool { }) } +type test = struct { + name string + run func(t *testing.T) +} + // Run takes a testing suite and runs all of the tests attached // to it. func Run(t *testing.T, suite TestingSuite) { @@ -124,45 +129,39 @@ func Run(t *testing.T, suite TestingSuite) { suite.SetT(t) suite.SetS(suite) - var suiteSetupDone bool - var stats *SuiteInformation if _, ok := suite.(WithStats); ok { stats = newSuiteInformation() } - tests := []testing.InternalTest{} + var tests []test methodFinder := reflect.TypeOf(suite) suiteName := methodFinder.Elem().Name() - for i := 0; i < methodFinder.NumMethod(); i++ { - method := methodFinder.Method(i) - - ok, err := methodFilter(method.Name) + var matchMethodRE *regexp.Regexp + if *matchMethod != "" { + var err error + matchMethodRE, err = regexp.Compile(*matchMethod) if err != nil { fmt.Fprintf(os.Stderr, "testify: invalid regexp for -m: %s\n", err) os.Exit(1) } + } - if !ok { + for i := 0; i < methodFinder.NumMethod(); i++ { + method := methodFinder.Method(i) + + if !strings.HasPrefix(method.Name, "Test") { + continue + } + // Apply -testify.m filter + if matchMethodRE != nil && !matchMethodRE.MatchString(method.Name) { continue } - if !suiteSetupDone { - if stats != nil { - stats.Start = time.Now() - } - - if setupAllSuite, ok := suite.(SetupAllSuite); ok { - setupAllSuite.SetupSuite() - } - - suiteSetupDone = true - } - - test := testing.InternalTest{ - Name: method.Name, - F: func(t *testing.T) { + test := test{ + name: method.Name, + run: func(t *testing.T) { parentT := suite.T() suite.SetT(t) defer recoverAndFailOnPanic(t) @@ -171,10 +170,7 @@ func Run(t *testing.T, suite TestingSuite) { r := recover() - if stats != nil { - passed := !t.Failed() && r == nil - stats.end(method.Name, passed) - } + stats.end(method.Name, !t.Failed() && r == nil) if afterTestSuite, ok := suite.(AfterTest); ok { afterTestSuite.AfterTest(suiteName, method.Name) @@ -195,59 +191,47 @@ func Run(t *testing.T, suite TestingSuite) { beforeTestSuite.BeforeTest(methodFinder.Elem().Name(), method.Name) } - if stats != nil { - stats.start(method.Name) - } + stats.start(method.Name) method.Func.Call([]reflect.Value{reflect.ValueOf(suite)}) }, } tests = append(tests, test) } - if suiteSetupDone { - defer func() { - if tearDownAllSuite, ok := suite.(TearDownAllSuite); ok { - tearDownAllSuite.TearDownSuite() - } - if suiteWithStats, measureStats := suite.(WithStats); measureStats { - stats.End = time.Now() - suiteWithStats.HandleStats(suiteName, stats) - } - }() + if len(tests) == 0 { + return } + if stats != nil { + stats.Start = time.Now() + } + + if setupAllSuite, ok := suite.(SetupAllSuite); ok { + setupAllSuite.SetupSuite() + } + + defer func() { + if tearDownAllSuite, ok := suite.(TearDownAllSuite); ok { + tearDownAllSuite.TearDownSuite() + } + + if suiteWithStats, measureStats := suite.(WithStats); measureStats { + stats.End = time.Now() + suiteWithStats.HandleStats(suiteName, stats) + } + }() + runTests(t, tests) } -// Filtering method according to set regular expression -// specified command-line argument -m -func methodFilter(name string) (bool, error) { - if ok, _ := regexp.MatchString("^Test", name); !ok { - return false, nil - } - return regexp.MatchString(*matchMethod, name) -} - -func runTests(t testing.TB, tests []testing.InternalTest) { +func runTests(t *testing.T, tests []test) { if len(tests) == 0 { t.Log("warning: no tests to run") return } - r, ok := t.(runner) - if !ok { // backwards compatibility with Go 1.6 and below - if !testing.RunTests(allTestsFilter, tests) { - t.Fail() - } - return - } - for _, test := range tests { - r.Run(test.Name, test.F) + t.Run(test.name, test.run) } } - -type runner interface { - Run(name string, f func(t *testing.T)) bool -} diff --git a/vendor/github.com/sv-tools/openapi/LICENSE b/vendor/github.com/sv-tools/openapi/LICENSE new file mode 100644 index 00000000..313c4582 --- /dev/null +++ b/vendor/github.com/sv-tools/openapi/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Tools + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/sv-tools/openapi/spec/bool_or_schema.go b/vendor/github.com/sv-tools/openapi/spec/bool_or_schema.go new file mode 100644 index 00000000..1f96abfd --- /dev/null +++ b/vendor/github.com/sv-tools/openapi/spec/bool_or_schema.go @@ -0,0 +1,73 @@ +package spec + +import ( + "encoding/json" + + "gopkg.in/yaml.v3" +) + +// BoolOrSchema handles Boolean or Schema type. +// +// It MUST be used as a pointer, +// otherwise the `false` can be omitted by json or yaml encoders in case of `omitempty` tag is set. +type BoolOrSchema struct { + Schema *RefOrSpec[Schema] + Allowed bool +} + +// NewBoolOrSchema creates BoolOrSchema object. +func NewBoolOrSchema(allowed bool, spec *RefOrSpec[Schema]) *BoolOrSchema { + return &BoolOrSchema{ + Allowed: allowed, + Schema: spec, + } +} + +// UnmarshalJSON implements json.Unmarshaler interface. +func (o *BoolOrSchema) UnmarshalJSON(data []byte) error { + if json.Unmarshal(data, &o.Allowed) == nil { + o.Schema = nil + return nil + } + if err := json.Unmarshal(data, &o.Schema); err != nil { + return err + } + o.Allowed = true + return nil +} + +// MarshalJSON implements json.Marshaler interface. +func (o *BoolOrSchema) MarshalJSON() ([]byte, error) { + var v any + if o.Schema != nil { + v = o.Schema + } else { + v = o.Allowed + } + return json.Marshal(&v) +} + +// UnmarshalYAML implements yaml.Unmarshaler interface. +func (o *BoolOrSchema) UnmarshalYAML(node *yaml.Node) error { + if node.Decode(&o.Allowed) == nil { + o.Schema = nil + return nil + } + if err := node.Decode(&o.Schema); err != nil { + return err + } + o.Allowed = true + return nil +} + +// MarshalYAML implements yaml.Marshaler interface. +func (o BoolOrSchema) MarshalYAML() (any, error) { + var v any + if o.Schema != nil { + v = o.Schema + } else { + v = o.Allowed + } + + return v, nil +} diff --git a/vendor/github.com/sv-tools/openapi/spec/callback.go b/vendor/github.com/sv-tools/openapi/spec/callback.go new file mode 100644 index 00000000..371e7db6 --- /dev/null +++ b/vendor/github.com/sv-tools/openapi/spec/callback.go @@ -0,0 +1,68 @@ +package spec + +import ( + "encoding/json" + + "gopkg.in/yaml.v3" +) + +// Callback is a map of possible out-of band callbacks related to the parent operation. +// Each value in the map is a Path Item Object that describes a set of requests that may be initiated by +// the API provider and the expected responses. +// The key value used to identify the path item object is an expression, evaluated at runtime, +// that identifies a URL to use for the callback operation. +// To describe incoming requests from the API provider independent from another API call, use the webhooks field. +// +// https://spec.openapis.org/oas/v3.1.0#callback-object +// +// Example: +// +// myCallback: +// '{$request.query.queryUrl}': +// post: +// requestBody: +// description: Callback payload +// content: +// 'application/json': +// schema: +// $ref: '#/components/schemas/SomePayload' +// responses: +// '200': +// description: callback successfully processed +type Callback struct { + Callback map[string]*RefOrSpec[Extendable[PathItem]] +} + +// NewCallbackSpec creates Callback object. +func NewCallbackSpec() *RefOrSpec[Extendable[Callback]] { + o := make(map[string]*RefOrSpec[Extendable[PathItem]]) + spec := NewExtendable(&Callback{ + Callback: o, + }) + return NewRefOrSpec[Extendable[Callback]](nil, spec) +} + +// NewCallbackRef creates Ref object. +func NewCallbackRef(ref *Ref) *RefOrSpec[Extendable[Callback]] { + return NewRefOrSpec[Extendable[Callback]](ref, nil) +} + +// MarshalJSON implements json.Marshaler interface. +func (o *Callback) MarshalJSON() ([]byte, error) { + return json.Marshal(&o.Callback) +} + +// UnmarshalJSON implements json.Unmarshaler interface. +func (o *Callback) UnmarshalJSON(data []byte) error { + return json.Unmarshal(data, &o.Callback) +} + +// MarshalYAML implements yaml.Marshaler interface. +func (o *Callback) MarshalYAML() (any, error) { + return o.Callback, nil +} + +// UnmarshalYAML implements yaml.Unmarshaler interface. +func (o *Callback) UnmarshalYAML(node *yaml.Node) error { + return node.Decode(&o.Callback) +} diff --git a/vendor/github.com/sv-tools/openapi/spec/components.go b/vendor/github.com/sv-tools/openapi/spec/components.go new file mode 100644 index 00000000..8ab9a01c --- /dev/null +++ b/vendor/github.com/sv-tools/openapi/spec/components.go @@ -0,0 +1,161 @@ +package spec + +// Components holds a set of reusable objects for different aspects of the OAS. +// All objects defined within the components object will have no effect on the API unless they are explicitly referenced +// from properties outside the components object. +// +// https://spec.openapis.org/oas/v3.1.0#components-object +// +// Example: +// +// components: +// schemas: +// GeneralError: +// type: object +// properties: +// code: +// type: integer +// format: int32 +// message: +// type: string +// Category: +// type: object +// properties: +// id: +// type: integer +// format: int64 +// name: +// type: string +// Tag: +// type: object +// properties: +// id: +// type: integer +// format: int64 +// name: +// type: string +// parameters: +// skipParam: +// name: skip +// in: query +// description: number of items to skip +// required: true +// schema: +// type: integer +// format: int32 +// limitParam: +// name: limit +// in: query +// description: max records to return +// required: true +// schema: +// type: integer +// format: int32 +// responses: +// NotFound: +// description: Entity not found. +// IllegalInput: +// description: Illegal input for operation. +// GeneralError: +// description: General Error +// content: +// application/json: +// schema: +// $ref: '#/components/schemas/GeneralError' +// securitySchemes: +// api_key: +// type: apiKey +// name: api_key +// in: header +// petstore_auth: +// type: oauth2 +// flows: +// implicit: +// authorizationUrl: https://example.org/api/oauth/dialog +// scopes: +// write:pets: modify pets in your account +// read:pets: read your pets +type Components struct { + // An object to hold reusable Schema Objects. + Schemas map[string]*RefOrSpec[Schema] `json:"schemas,omitempty" yaml:"schemas,omitempty"` + // An object to hold reusable Response Objects. + Responses map[string]*RefOrSpec[Extendable[Response]] `json:"responses,omitempty" yaml:"responses,omitempty"` + // An object to hold reusable Parameter Objects. + Parameters map[string]*RefOrSpec[Extendable[Parameter]] `json:"parameters,omitempty" yaml:"parameters,omitempty"` + // An object to hold reusable Example Objects. + Examples map[string]*RefOrSpec[Extendable[Example]] `json:"examples,omitempty" yaml:"examples,omitempty"` + // An object to hold reusable Request Body Objects. + RequestBodies map[string]*RefOrSpec[Extendable[RequestBody]] `json:"requestBodies,omitempty" yaml:"requestBodies,omitempty"` + // An object to hold reusable Header Objects. + Headers map[string]*RefOrSpec[Extendable[Header]] `json:"headers,omitempty" yaml:"headers,omitempty"` + // An object to hold reusable Security Scheme Objects. + SecuritySchemes map[string]*RefOrSpec[Extendable[SecurityScheme]] `json:"securitySchemes,omitempty" yaml:"securitySchemes,omitempty"` + // An object to hold reusable Link Objects. + Links map[string]*RefOrSpec[Extendable[Link]] `json:"links,omitempty" yaml:"links,omitempty"` + // An object to hold reusable Callback Objects. + Callbacks map[string]*RefOrSpec[Extendable[Callback]] `json:"callbacks,omitempty" yaml:"callbacks,omitempty"` + // An object to hold reusable Path Item Object. + Paths map[string]*RefOrSpec[Extendable[PathItem]] `json:"paths,omitempty" yaml:"paths,omitempty"` +} + +// NewComponents creates new Components object. +func NewComponents() *Extendable[Components] { + return NewExtendable(&Components{}) +} + +// WithRefOrSpec adds the given object to the appropriate list based on type and returns the current object (self|this). +func (o *Components) WithRefOrSpec(name string, v any) *Components { + switch spec := v.(type) { + case *RefOrSpec[Schema]: + if o.Schemas == nil { + o.Schemas = make(map[string]*RefOrSpec[Schema], 1) + } + o.Schemas[name] = spec + case *RefOrSpec[Extendable[Response]]: + if o.Responses == nil { + o.Responses = make(map[string]*RefOrSpec[Extendable[Response]], 1) + } + o.Responses[name] = spec + case *RefOrSpec[Extendable[Parameter]]: + if o.Parameters == nil { + o.Parameters = make(map[string]*RefOrSpec[Extendable[Parameter]], 1) + } + o.Parameters[name] = spec + case *RefOrSpec[Extendable[Example]]: + if o.Examples == nil { + o.Examples = make(map[string]*RefOrSpec[Extendable[Example]], 1) + } + o.Examples[name] = spec + case *RefOrSpec[Extendable[RequestBody]]: + if o.RequestBodies == nil { + o.RequestBodies = make(map[string]*RefOrSpec[Extendable[RequestBody]], 1) + } + o.RequestBodies[name] = spec + case *RefOrSpec[Extendable[Header]]: + if o.Headers == nil { + o.Headers = make(map[string]*RefOrSpec[Extendable[Header]], 1) + } + o.Headers[name] = spec + case *RefOrSpec[Extendable[SecurityScheme]]: + if o.SecuritySchemes == nil { + o.SecuritySchemes = make(map[string]*RefOrSpec[Extendable[SecurityScheme]], 1) + } + o.SecuritySchemes[name] = spec + case *RefOrSpec[Extendable[Link]]: + if o.Links == nil { + o.Links = make(map[string]*RefOrSpec[Extendable[Link]], 1) + } + o.Links[name] = spec + case *RefOrSpec[Extendable[Callback]]: + if o.Callbacks == nil { + o.Callbacks = make(map[string]*RefOrSpec[Extendable[Callback]], 1) + } + o.Callbacks[name] = spec + case *RefOrSpec[Extendable[PathItem]]: + if o.Paths == nil { + o.Paths = make(map[string]*RefOrSpec[Extendable[PathItem]], 1) + } + o.Paths[name] = spec + } + return o +} diff --git a/vendor/github.com/sv-tools/openapi/spec/contact.go b/vendor/github.com/sv-tools/openapi/spec/contact.go new file mode 100644 index 00000000..def40ada --- /dev/null +++ b/vendor/github.com/sv-tools/openapi/spec/contact.go @@ -0,0 +1,26 @@ +package spec + +// Contact information for the exposed API. +// +// https://spec.openapis.org/oas/v3.1.0#contact-object +// +// Example: +// +// name: API Support +// url: https://www.example.com/support +// email: support@example.com +type Contact struct { + // The identifying name of the contact person/organization. + Name string `json:"name,omitempty" yaml:"name,omitempty"` + // The URL pointing to the contact information. + // This MUST be in the form of a URL. + URL string `json:"url,omitempty" yaml:"url,omitempty"` + // The email address of the contact person/organization. + // This MUST be in the form of an email address. + Email string `json:"email,omitempty" yaml:"email,omitempty"` +} + +// NewContact creates Contact object. +func NewContact() *Extendable[Contact] { + return NewExtendable(&Contact{}) +} diff --git a/vendor/github.com/sv-tools/openapi/spec/discriminator.go b/vendor/github.com/sv-tools/openapi/spec/discriminator.go new file mode 100644 index 00000000..33710f99 --- /dev/null +++ b/vendor/github.com/sv-tools/openapi/spec/discriminator.go @@ -0,0 +1,35 @@ +package spec + +// Discriminator is used when request bodies or response payloads may be one of a number of different schemas, +// a discriminator object can be used to aid in serialization, deserialization, and validation. +// The discriminator is a specific object in a schema which is used to inform the consumer of the document of +// an alternative schema based on the value associated with it. +// When using the discriminator, inline schemas will not be considered. +// +// https://spec.openapis.org/oas/v3.1.0#discriminator-object +// +// Example: +// +// MyResponseType: +// oneOf: +// - $ref: '#/components/schemas/Cat' +// - $ref: '#/components/schemas/Dog' +// - $ref: '#/components/schemas/Lizard' +// - $ref: 'https://gigantic-server.com/schemas/Monster/schema.json' +// discriminator: +// propertyName: petType +// mapping: +// dog: '#/components/schemas/Dog' +// monster: 'https://gigantic-server.com/schemas/Monster/schema.json' +type Discriminator struct { + // An object to hold mappings between payload values and schema names or references. + Mapping map[string]string `json:"mapping,omitempty" yaml:"mapping,omitempty"` + // REQUIRED. + // The name of the property in the payload that will hold the discriminator value. + PropertyName string `json:"propertyName" yaml:"propertyName"` +} + +// NewDiscriminator creates Discriminator object. +func NewDiscriminator() *Discriminator { + return &Discriminator{} +} diff --git a/vendor/github.com/sv-tools/openapi/spec/encoding.go b/vendor/github.com/sv-tools/openapi/spec/encoding.go new file mode 100644 index 00000000..c74c4c3e --- /dev/null +++ b/vendor/github.com/sv-tools/openapi/spec/encoding.go @@ -0,0 +1,80 @@ +package spec + +// Encoding is definition that applied to a single schema property. +// +// https://spec.openapis.org/oas/v3.1.0#encoding-object +// +// Example: +// +// requestBody: +// content: +// multipart/form-data: +// schema: +// type: object +// properties: +// id: +// # default is text/plain +// type: string +// format: uuid +// address: +// # default is application/json +// type: object +// properties: {} +// historyMetadata: +// # need to declare XML format! +// description: metadata in XML format +// type: object +// properties: {} +// profileImage: {} +// encoding: +// historyMetadata: +// # require XML Content-Type in utf-8 encoding +// contentType: application/xml; charset=utf-8 +// profileImage: +// # only accept png/jpeg +// contentType: image/png, image/jpeg +// headers: +// X-Rate-Limit-Limit: +// description: The number of allowed requests in the current period +// schema: +// type: integer +type Encoding struct { + // The Content-Type for encoding a specific property. + // Default value depends on the property type: + // for object - application/json; + // for array – the default is defined based on the inner type; + // for all other cases the default is application/octet-stream. + // The value can be a specific media type (e.g. application/json), a wildcard media type (e.g. image/*), + // or a comma-separated list of the two types. + ContentType string `json:"contentType,omitempty" yaml:"contentType,omitempty"` + // A map allowing additional information to be provided as headers, for example Content-Disposition. + // Content-Type is described separately and SHALL be ignored in this section. + // This property SHALL be ignored if the request body media type is not a multipart. + Headers map[string]*RefOrSpec[Extendable[Header]] `json:"headers,omitempty" yaml:"headers,omitempty"` + // Describes how a specific property value will be serialized depending on its type. + // See Parameter Object for details on the style property. + // The behavior follows the same values as query parameters, including default values. + // This property SHALL be ignored if the request body media type is not application/x-www-form-urlencoded or multipart/form-data. + // If a value is explicitly defined, then the value of contentType (implicit or explicit) SHALL be ignored. + Style string `json:"style,omitempty" yaml:"style,omitempty"` + // When this is true, property values of type array or object generate separate parameters for each value of the array, + // or key-value-pair of the map. + // For other types of properties this property has no effect. + // When style is form, the default value is true. + // For all other styles, the default value is false. + // This property SHALL be ignored if the request body media type is not application/x-www-form-urlencoded or multipart/form-data. + // If a value is explicitly defined, then the value of contentType (implicit or explicit) SHALL be ignored. + Explode bool `json:"explode,omitempty" yaml:"explode,omitempty"` + // Determines whether the parameter value SHOULD allow reserved characters, as defined by [RFC3986] + // :/?#[]@!$&'()*+,;= + // to be included without percent-encoding. + // The default value is false. + // This property SHALL be ignored if the request body media type is not application/x-www-form-urlencoded or multipart/form-data. + // If a value is explicitly defined, then the value of contentType (implicit or explicit) SHALL be ignored. + AllowReserved bool `json:"allowReserved,omitempty" yaml:"allowReserved,omitempty"` +} + +// NewEncoding creates Encoding object. +func NewEncoding() *Extendable[Encoding] { + return NewExtendable(&Encoding{}) +} diff --git a/vendor/github.com/sv-tools/openapi/spec/example.go b/vendor/github.com/sv-tools/openapi/spec/example.go new file mode 100644 index 00000000..0894e077 --- /dev/null +++ b/vendor/github.com/sv-tools/openapi/spec/example.go @@ -0,0 +1,58 @@ +package spec + +// Example is expected to be compatible with the type schema of its associated value. +// Tooling implementations MAY choose to validate compatibility automatically, and reject the example value(s) if incompatible. +// +// https://spec.openapis.org/oas/v3.1.0#example-object +// +// Example: +// +// requestBody: +// content: +// 'application/json': +// schema: +// $ref: '#/components/schemas/Address' +// examples: +// foo: +// summary: A foo example +// value: {"foo": "bar"} +// bar: +// summary: A bar example +// value: {"bar": "baz"} +// 'application/xml': +// examples: +// xmlExample: +// summary: This is an example in XML +// externalValue: 'https://example.org/examples/address-example.xml' +// 'text/plain': +// examples: +// textExample: +// summary: This is a text example +// externalValue: 'https://foo.bar/examples/address-example.txt' +type Example struct { + // Short description for the example. + Summary string `json:"summary,omitempty" yaml:"summary,omitempty"` + // Long description for the example. + // CommonMark syntax MAY be used for rich text representation. + Description string `json:"description,omitempty" yaml:"description,omitempty"` + // Embedded literal example. + // The value field and externalValue field are mutually exclusive. + // To represent examples of media types that cannot naturally represented in JSON or YAML, + // use a string value to contain the example, escaping where necessary. + Value any `json:"value,omitempty" yaml:"value,omitempty"` + // A URI that points to the literal example. + // This provides the capability to reference examples that cannot easily be included in JSON or YAML documents. + // The value field and externalValue field are mutually exclusive. + // See the rules for resolving Relative References. + ExternalValue string `json:"externalValue,omitempty" yaml:"externalValue,omitempty"` +} + +// NewExampleSpec creates Example object. +func NewExampleSpec() *RefOrSpec[Extendable[Example]] { + return NewRefOrSpec[Extendable[Example]](nil, NewExtendable(&Example{})) +} + +// NewExampleRef creates Ref object. +func NewExampleRef(ref *Ref) *RefOrSpec[Extendable[Example]] { + return NewRefOrSpec[Extendable[Example]](ref, nil) +} diff --git a/vendor/github.com/sv-tools/openapi/spec/extensions.go b/vendor/github.com/sv-tools/openapi/spec/extensions.go new file mode 100644 index 00000000..c0a256e4 --- /dev/null +++ b/vendor/github.com/sv-tools/openapi/spec/extensions.go @@ -0,0 +1,138 @@ +package spec + +import ( + "encoding/json" + "fmt" + "strings" + + "gopkg.in/yaml.v3" +) + +const ExtensionPrefix = "x-" + +// Extendable allows extensions to the OpenAPI Schema. +// The field name MUST begin with `x-`, for example, `x-internal-id`. +// Field names beginning `x-oai-` and `x-oas-` are reserved for uses defined by the OpenAPI Initiative. +// The value can be null, a primitive, an array or an object. +// +// https://spec.openapis.org/oas/v3.1.0#specification-extensions +// +// Example: +// +// openapi: 3.1.0 +// info: +// title: Sample Pet Store App +// summary: A pet store manager. +// description: This is a sample server for a pet store. +// version: 1.0.1 +// x-build-data: 2006-01-02T15:04:05Z07:00 +// x-build-commit-id: dac33af14d0d4a5f1c226141042ca7cefc6aeb75 +type Extendable[T any] struct { + Spec *T `json:"-" yaml:"-"` + Extensions map[string]any `json:"-" yaml:"-"` +} + +// NewExtendable creates new Extendable object for given spec +func NewExtendable[T any](spec *T) *Extendable[T] { + ext := Extendable[T]{ + Spec: spec, + Extensions: make(map[string]any), + } + return &ext +} + +// MarshalJSON implements json.Marshaler interface. +func (o *Extendable[T]) MarshalJSON() ([]byte, error) { + var raw map[string]json.RawMessage + exts, err := json.Marshal(&o.Extensions) + if err != nil { + return nil, fmt.Errorf("%T.Extensions: %w", o.Spec, err) + } + if err := json.Unmarshal(exts, &raw); err != nil { + return nil, fmt.Errorf("%T(raw extensions): %w", o.Spec, err) + } + fields, err := json.Marshal(&o.Spec) + if err != nil { + return nil, fmt.Errorf("%T: %w", o.Spec, err) + } + if err := json.Unmarshal(fields, &raw); err != nil { + return nil, fmt.Errorf("%T(raw fields): %w", o.Spec, err) + } + data, err := json.Marshal(&raw) + if err != nil { + return nil, fmt.Errorf("%T(raw): %w", o.Spec, err) + } + return data, nil +} + +// UnmarshalJSON implements json.Unmarshaler interface. +func (o *Extendable[T]) UnmarshalJSON(data []byte) error { + var raw map[string]json.RawMessage + if err := json.Unmarshal(data, &raw); err != nil { + return fmt.Errorf("%T: %w", o.Spec, err) + } + o.Extensions = make(map[string]any) + for name, value := range raw { + if strings.HasPrefix(name, ExtensionPrefix) { + var v any + if err := json.Unmarshal(value, &v); err != nil { + return fmt.Errorf("%T.Extensions.%s: %w", o.Spec, name, err) + } + o.Extensions[name] = v + delete(raw, name) + } + } + fields, err := json.Marshal(&raw) + if err != nil { + return fmt.Errorf("%T(raw): %w", o.Spec, err) + } + if err := json.Unmarshal(fields, &o.Spec); err != nil { + return fmt.Errorf("%T: %w", o.Spec, err) + } + + return nil +} + +// MarshalYAML implements yaml.Marshaler interface. +func (o *Extendable[T]) MarshalYAML() (any, error) { + var raw map[string]any + exts, err := yaml.Marshal(&o.Extensions) + if err != nil { + return nil, fmt.Errorf("%T.Extensions: %w", o.Spec, err) + } + if err := yaml.Unmarshal(exts, &raw); err != nil { + return nil, fmt.Errorf("%T(raw extensions): %w", o.Spec, err) + } + fields, err := yaml.Marshal(&o.Spec) + if err != nil { + return nil, fmt.Errorf("%T: %w", o.Spec, err) + } + if err := yaml.Unmarshal(fields, &raw); err != nil { + return nil, fmt.Errorf("%T(raw fields): %w", o.Spec, err) + } + return raw, nil +} + +// UnmarshalYAML implements yaml.Unmarshaler interface. +func (o *Extendable[T]) UnmarshalYAML(node *yaml.Node) error { + var raw map[string]any + if err := node.Decode(&raw); err != nil { + return fmt.Errorf("%T: %w", o.Spec, err) + } + o.Extensions = make(map[string]any) + for name, value := range raw { + if strings.HasPrefix(name, ExtensionPrefix) { + o.Extensions[name] = value + delete(raw, name) + } + } + fields, err := yaml.Marshal(&raw) + if err != nil { + return fmt.Errorf("%T(raw): %w", o.Spec, err) + } + if err := yaml.Unmarshal(fields, &o.Spec); err != nil { + return fmt.Errorf("%T: %w", o.Spec, err) + } + + return nil +} diff --git a/vendor/github.com/sv-tools/openapi/spec/external-docs.go b/vendor/github.com/sv-tools/openapi/spec/external-docs.go new file mode 100644 index 00000000..8eafc675 --- /dev/null +++ b/vendor/github.com/sv-tools/openapi/spec/external-docs.go @@ -0,0 +1,24 @@ +package spec + +// ExternalDocs allows referencing an external resource for extended documentation. +// +// https://spec.openapis.org/oas/v3.1.0#external-documentation-object +// +// Example: +// +// description: Find more info here +// url: https://example.com +type ExternalDocs struct { + // A description of the target documentation. + // CommonMark syntax MAY be used for rich text representation. + Description string `json:"description" yaml:"description"` + // REQUIRED. + // The URL for the target documentation. + // This MUST be in the form of a URL. + URL string `json:"url" yaml:"url"` +} + +// NewExternalDocs creates ExternalDocs object. +func NewExternalDocs() *Extendable[ExternalDocs] { + return NewExtendable(&ExternalDocs{}) +} diff --git a/vendor/github.com/sv-tools/openapi/spec/header.go b/vendor/github.com/sv-tools/openapi/spec/header.go new file mode 100644 index 00000000..448cd0b8 --- /dev/null +++ b/vendor/github.com/sv-tools/openapi/spec/header.go @@ -0,0 +1,78 @@ +package spec + +// Header Object follows the structure of the Parameter Object with the some changes. +// +// https://spec.openapis.org/oas/v3.1.0#header-object +// +// Example: +// +// description: The number of allowed requests in the current period +// schema: +// type: integer +// +// All fields are copied from Parameter Object as is, except name and in fields. +type Header struct { + // Example of the parameter’s potential value. + // The example SHOULD match the specified schema and encoding properties if present. + // The example field is mutually exclusive of the examples field. + // Furthermore, if referencing a schema that contains an example, the example value SHALL override the example provided by the schema. + // To represent examples of media types that cannot naturally be represented in JSON or YAML, + // a string value can contain the example with escaping where necessary. + Example any `json:"example,omitempty" yaml:"example,omitempty"` + // The schema defining the type used for the parameter. + Schema *RefOrSpec[Schema] `json:"schema,omitempty" yaml:"schema,omitempty"` + // Examples of the parameter’s potential value. + // Each example SHOULD contain a value in the correct format as specified in the parameter encoding. + // The examples field is mutually exclusive of the example field. + // Furthermore, if referencing a schema that contains an example, the examples value SHALL override the example provided by the schema. + Examples map[string]*RefOrSpec[Extendable[Example]] `json:"examples,omitempty" yaml:"examples,omitempty"` + // A map containing the representations for the parameter. + // The key is the media type and the value describes it. + // The map MUST only contain one entry. + Content map[string]*Extendable[MediaType] `json:"content,omitempty" yaml:"content,omitempty"` + // A brief description of the header. + // This could contain examples of use. + // CommonMark syntax MAY be used for rich text representation. + Description string `json:"description,omitempty" yaml:"description,omitempty"` + // Describes how the parameter value will be serialized depending on the type of the parameter value. + // Default values (based on value of in): + // for query - form; + // for path - simple; + // for header - simple; + // for cookie - form. + Style string `json:"style,omitempty" yaml:"style,omitempty"` + // When this is true, parameter values of type array or object generate separate parameters + // for each value of the array or key-value pair of the map. + // For other types of parameters this property has no effect. + // When style is form, the default value is true. + // For all other styles, the default value is false. + Explode bool `json:"explode,omitempty" yaml:"explode,omitempty"` + // Determines whether the parameter value SHOULD allow reserved characters, as defined by [RFC3986] + // :/?#[]@!$&'()*+,;= + // to be included without percent-encoding. + // This property only applies to parameters with an in value of query. + // The default value is false. + AllowReserved bool `json:"allowReserved,omitempty" yaml:"allowReserved,omitempty"` + // Determines whether this header is mandatory. + // The property MAY be included and its default value is false. + Required bool `json:"required,omitempty" yaml:"required,omitempty"` + // Specifies that a header is deprecated and SHOULD be transitioned out of usage. + // Default value is false. + Deprecated bool `json:"deprecated,omitempty" yaml:"deprecated,omitempty"` + // Sets the ability to pass empty-valued headers. + // This is valid only for query parameters and allows sending a parameter with an empty value. + // Default value is false. + // If style is used, and if behavior is n/a (cannot be serialized), the value of allowEmptyValue SHALL be ignored. + // Use of this property is NOT RECOMMENDED, as it is likely to be removed in a later revision. + AllowEmptyValue bool `json:"allowEmptyValue,omitempty" yaml:"allowEmptyValue,omitempty"` +} + +// NewHeaderSpec creates Header object. +func NewHeaderSpec() *RefOrSpec[Extendable[Header]] { + return NewRefOrSpec[Extendable[Header]](nil, NewExtendable(&Header{})) +} + +// NewHeaderRef creates Ref object. +func NewHeaderRef(ref *Ref) *RefOrSpec[Extendable[Header]] { + return NewRefOrSpec[Extendable[Header]](ref, nil) +} diff --git a/vendor/github.com/sv-tools/openapi/spec/info.go b/vendor/github.com/sv-tools/openapi/spec/info.go new file mode 100644 index 00000000..e1f890cf --- /dev/null +++ b/vendor/github.com/sv-tools/openapi/spec/info.go @@ -0,0 +1,46 @@ +package spec + +// Info provides metadata about the API. +// The metadata MAY be used by the clients if needed, and MAY be presented in editing or documentation generation tools for convenience. +// +// https://spec.openapis.org/oas/v3.1.0#info-object +// +// Example: +// +// title: Sample Pet Store App +// summary: A pet store manager. +// description: This is a sample server for a pet store. +// termsOfService: https://example.com/terms/ +// contact: +// name: API Support +// url: https://www.example.com/support +// email: support@example.com +// license: +// name: Apache 2.0 +// url: https://www.apache.org/licenses/LICENSE-2.0.html +// version: 1.0.1 +type Info struct { + // REQUIRED. + // The title of the API. + Title string `json:"title" yaml:"title"` + // A short summary of the API. + Summary string `json:"summary,omitempty" yaml:"summary,omitempty"` + // A description of the API. + // CommonMark syntax MAY be used for rich text representation. + Description string `json:"description,omitempty" yaml:"description,omitempty"` + // A URL to the Terms of Service for the API. + // This MUST be in the form of a URL. + TermsOfService string `json:"termsOfService,omitempty" yaml:"termsOfService,omitempty"` + // The contact information for the exposed API. + Contact *Extendable[Contact] `json:"contact,omitempty" yaml:"contact,omitempty"` + // The license information for the exposed API. + License *Extendable[License] `json:"license,omitempty" yaml:"license,omitempty"` + // REQUIRED. + // The version of the OpenAPI document (which is distinct from the OpenAPI Specification version or the API implementation version). + Version string `json:"version" yaml:"version"` +} + +// NewInfo creates Info object. +func NewInfo() *Extendable[Info] { + return NewExtendable(&Info{}) +} diff --git a/vendor/github.com/sv-tools/openapi/spec/json_schema.go b/vendor/github.com/sv-tools/openapi/spec/json_schema.go new file mode 100644 index 00000000..8140e85c --- /dev/null +++ b/vendor/github.com/sv-tools/openapi/spec/json_schema.go @@ -0,0 +1,257 @@ +package spec + +// JsonSchemaTypeString +// +// https://json-schema.org/understanding-json-schema/reference/string.html#string +type JsonSchemaTypeString struct { + MinLength *int `json:"minLength,omitempty" yaml:"minLength,omitempty"` + MaxLength *int `json:"maxLength,omitempty" yaml:"maxLength,omitempty"` + Pattern string `json:"pattern,omitempty" yaml:"pattern,omitempty"` + Format string `json:"format,omitempty" yaml:"format,omitempty"` +} + +// JsonSchemaTypeNumber +// +// https://json-schema.org/understanding-json-schema/reference/numeric.html#numeric-types +type JsonSchemaTypeNumber struct { + // MultipleOf restricts the numbers to a multiple of a given number, using the multipleOf keyword. + // It may be set to any positive number. + // + // https://json-schema.org/understanding-json-schema/reference/numeric.html#multiples + MultipleOf *int `json:"multipleOf,omitempty" yaml:"multipleOf,omitempty"` + // x ≥ minimum + Minimum *int `json:"minimum,omitempty" yaml:"minimum,omitempty"` + // x > exclusiveMinimum + ExclusiveMinimum *int `json:"exclusiveMinimum,omitempty" yaml:"exclusiveMinimum,omitempty"` + // x ≤ maximum + Maximum *int `json:"maximum,omitempty" yaml:"maximum,omitempty"` + // x < exclusiveMaximum + ExclusiveMaximum *int `json:"exclusiveMaximum,omitempty" yaml:"exclusiveMaximum,omitempty"` +} + +// JsonSchemaTypeObject +// +// https://json-schema.org/understanding-json-schema/reference/object.html#object +type JsonSchemaTypeObject struct { + // The properties (key-value pairs) on an object are defined using the properties keyword. + // The value of properties is an object, where each key is the name of a property and each value is + // a schema used to validate that property. + // Any property that doesn't match any of the property names in the properties keyword is ignored by this keyword. + // + // https://json-schema.org/understanding-json-schema/reference/object.html#properties + Properties map[string]*RefOrSpec[Schema] `json:"properties,omitempty" yaml:"properties,omitempty"` + // Sometimes you want to say that, given a particular kind of property name, the value should match a particular schema. + // That’s where patternProperties comes in: it maps regular expressions to schemas. + // If a property name matches the given regular expression, the property value must validate against the corresponding schema. + // + // https://json-schema.org/understanding-json-schema/reference/object.html#pattern-properties + PatternProperties map[string]*RefOrSpec[Schema] `json:"patternProperties,omitempty" yaml:"patternProperties,omitempty"` + // The additionalProperties keyword is used to control the handling of extra stuff, that is, + // properties whose names are not listed in the properties keyword or match any of the regular expressions + // in the patternProperties keyword. + // By default any additional properties are allowed. + // + // The value of the additionalProperties keyword is a schema that will be used to validate any properties in the instance + // that are not matched by properties or patternProperties. + // Setting the additionalProperties schema to false means no additional properties will be allowed. + // + // https://json-schema.org/understanding-json-schema/reference/object.html#additional-properties + AdditionalProperties *BoolOrSchema `json:"additionalProperties,omitempty" yaml:"additionalProperties,omitempty"` + // The unevaluatedProperties keyword is similar to additionalProperties except that it can recognize properties declared in subschemas. + // So, the example from the previous section can be rewritten without the need to redeclare properties. + // + // https://json-schema.org/understanding-json-schema/reference/object.html#unevaluated-properties + UnevaluatedProperties *BoolOrSchema `json:"unevaluatedProperties,omitempty" yaml:"unevaluatedProperties,omitempty"` + // The names of properties can be validated against a schema, irrespective of their values. + // This can be useful if you don’t want to enforce specific properties, but you want to make sure that + // the names of those properties follow a specific convention. + // You might, for example, want to enforce that all names are valid ASCII tokens so they can be used + // as attributes in a particular programming language. + // + // https://json-schema.org/understanding-json-schema/reference/object.html#property-names + PropertyNames *RefOrSpec[Schema] `json:"propertyNames,omitempty" yaml:"propertyNames,omitempty"` + // The min number of properties on an object. + // + // https://json-schema.org/understanding-json-schema/reference/object.html#size + MinProperties *int `json:"minProperties,omitempty" yaml:"minProperties,omitempty"` + // The max number of properties on an object. + // + // https://json-schema.org/understanding-json-schema/reference/object.html#size + MaxProperties *int `json:"maxProperties,omitempty" yaml:"maxProperties,omitempty"` + // The required keyword takes an array of zero or more strings. + // Each of these strings must be unique. + // + // https://json-schema.org/understanding-json-schema/reference/object.html#required-properties + Required []string `json:"required,omitempty" yaml:"required,omitempty"` +} + +// JsonSchemaTypeArray +// +// https://json-schema.org/understanding-json-schema/reference/array.html#array +type JsonSchemaTypeArray struct { + // List validation is useful for arrays of arbitrary length where each item matches the same schema. + // For this kind of array, set the items keyword to a single schema that will be used to validate all of the items in the array. + // + // https://json-schema.org/understanding-json-schema/reference/array.html#items + Items *BoolOrSchema `json:"items,omitempty" yaml:"items,omitempty"` + // https://json-schema.org/understanding-json-schema/reference/array.html#length + MaxItems *int `json:"maxItems,omitempty" yaml:"maxItems,omitempty"` + // The unevaluatedItems keyword is similar to unevaluatedProperties, but for items. + // + // https://json-schema.org/understanding-json-schema/reference/array.html#unevaluated-items + UnevaluatedItems *BoolOrSchema `json:"unevaluatedItems,omitempty" yaml:"unevaluatedItems,omitempty"` + // While the items schema must be valid for every item in the array, the contains schema only needs + // to validate against one or more items in the array. + // + // https://json-schema.org/understanding-json-schema/reference/array.html#contains + Contains *RefOrSpec[Schema] `json:"contains,omitempty" yaml:"contains,omitempty"` + MinContains *int `json:"minContains,omitempty" yaml:"minContains,omitempty"` + MaxContains *int `json:"maxContains,omitempty" yaml:"maxContains,omitempty"` + // https://json-schema.org/understanding-json-schema/reference/array.html#length + MinItems *int `json:"minItems,omitempty" yaml:"minItems,omitempty"` + // A schema can ensure that each of the items in an array is unique. + // Simply set the uniqueItems keyword to true. + // + // https://json-schema.org/understanding-json-schema/reference/array.html#uniqueness + UniqueItems *bool `json:"uniqueItems,omitempty" yaml:"uniqueItems,omitempty"` + // The prefixItems is an array, where each item is a schema that corresponds to each index of the document’s array. + // That is, an array where the first element validates the first element of the input array, + // the second element validates the second element of the input array, etc. + // + // https://json-schema.org/understanding-json-schema/reference/array.html#tuple-validation + PrefixItems []*RefOrSpec[Schema] `json:"prefixItems,omitempty" yaml:"prefixItems,omitempty"` +} + +// JsonSchemaGeneric +// +// https://json-schema.org/understanding-json-schema/reference/generic.html +type JsonSchemaGeneric struct { + Default any `json:"default,omitempty" yaml:"default,omitempty"` + Title string `json:"title,omitempty" yaml:"title,omitempty"` + Description string `json:"description,omitempty" yaml:"description,omitempty"` + // The const keyword is used to restrict a value to a single value. + // + // https://json-schema.org/understanding-json-schema/reference/generic.html#constant-values + Const string `json:"const,omitempty" yaml:"const,omitempty"` + // The $comment keyword is strictly intended for adding comments to a schema. + // Its value must always be a string. + // Unlike the annotations title, description, and examples, JSON schema implementations aren’t allowed + // to attach any meaning or behavior to it whatsoever, and may even strip them at any time. + // Therefore, they are useful for leaving notes to future editors of a JSON schema, + // but should not be used to communicate to users of the schema. + // + // https://json-schema.org/understanding-json-schema/reference/generic.html#comments + Comment string `json:"$comment,omitempty" yaml:"$comment,omitempty"` + // The enum keyword is used to restrict a value to a fixed set of values. + // It must be an array with at least one element, where each element is unique. + // + // https://json-schema.org/understanding-json-schema/reference/generic.html#enumerated-values + Enum []any `json:"enum,omitempty" yaml:"enum,omitempty"` + Examples []any `json:"examples,omitempty" yaml:"examples,omitempty"` + ReadOnly bool `json:"readOnly,omitempty" yaml:"readOnly,omitempty"` + WriteOnly bool `json:"writeOnly,omitempty" yaml:"writeOnly,omitempty"` + // The deprecated keyword is a boolean that indicates that the instance value the keyword applies to + // should not be used and may be removed in the future. + Deprecated bool `json:"deprecated,omitempty" yaml:"deprecated,omitempty"` +} + +// JsonSchemaMedia string-encoding non-JSON data +// +// https://json-schema.org/understanding-json-schema/reference/non_json_data.html +type JsonSchemaMedia struct { + // https://json-schema.org/understanding-json-schema/reference/non_json_data.html#contentschema + ContentSchema *RefOrSpec[Schema] `json:"contentSchema,omitempty" yaml:"contentSchema,omitempty"` + // The contentMediaType keyword specifies the MIME type of the contents of a string, as described in RFC 2046. + // There is a list of MIME types officially registered by the IANA, but the set of types supported will be + // application and operating system dependent. + // + // https://json-schema.org/understanding-json-schema/reference/non_json_data.html#contentmediatype + ContentMediaType string `json:"contentMediaType,omitempty" yaml:"contentMediaType,omitempty"` + // The contentEncoding keyword specifies the encoding used to store the contents, as specified in RFC 2054, part 6.1 and RFC 4648. + // + // https://json-schema.org/understanding-json-schema/reference/non_json_data.html#contentencoding + ContentEncoding string `json:"contentEncoding,omitempty" yaml:"contentEncoding,omitempty"` +} + +// JsonSchemaComposition +// +// https://json-schema.org/understanding-json-schema/reference/combining.html +type JsonSchemaComposition struct { + // The not keyword declares that an instance validates if it doesn’t validate against the given subschema. + // + // https://json-schema.org/understanding-json-schema/reference/combining.html#not + Not *RefOrSpec[Schema] `json:"not,omitempty" yaml:"not,omitempty"` + // To validate against allOf, the given data must be valid against all of the given subschemas. + // + // https://json-schema.org/understanding-json-schema/reference/combining.html#allof + AllOf []*RefOrSpec[Schema] `json:"allOf,omitempty" yaml:"allOf,omitempty"` + // To validate against anyOf, the given data must be valid against any (one or more) of the given subschemas. + // + // https://json-schema.org/understanding-json-schema/reference/combining.html#anyof + AnyOf []*RefOrSpec[Schema] `json:"anyOf,omitempty" yaml:"anyOf,omitempty"` + // To validate against oneOf, the given data must be valid against exactly one of the given subschemas. + // + // https://json-schema.org/understanding-json-schema/reference/combining.html#oneof + OneOf []*RefOrSpec[Schema] `json:"oneOf,omitempty" yaml:"oneOf,omitempty"` +} + +// JsonSchemaConditionals Applying Subschemas Conditionally +// +// https://json-schema.org/understanding-json-schema/reference/conditionals.html +type JsonSchemaConditionals struct { + // The dependentRequired keyword conditionally requires that certain properties must be present if + // a given property is present in an object. + // For example, suppose we have a schema representing a customer. + // If you have their credit card number, you also want to ensure you have a billing address. + // If you don’t have their credit card number, a billing address would not be required. + // We represent this dependency of one property on another using the dependentRequired keyword. + // The value of the dependentRequired keyword is an object. + // Each entry in the object maps from the name of a property, p, to an array of strings listing properties that + // are required if p is present. + // + // https://json-schema.org/understanding-json-schema/reference/conditionals.html#dependentrequired + DependentRequired map[string][]string `json:"dependentRequired,omitempty" yaml:"dependentRequired,omitempty"` + // The dependentSchemas keyword conditionally applies a subschema when a given property is present. + // This schema is applied in the same way allOf applies schemas. + // Nothing is merged or extended. + // Both schemas apply independently. + // + // https://json-schema.org/understanding-json-schema/reference/conditionals.html#dependentschemas + DependentSchemas map[string]*RefOrSpec[Schema] `json:"dependentSchemas,omitempty" yaml:"dependentSchemas,omitempty"` + + // https://json-schema.org/understanding-json-schema/reference/conditionals.html#if-then-else + If *RefOrSpec[Schema] `json:"if,omitempty" yaml:"if,omitempty"` + Then *RefOrSpec[Schema] `json:"then,omitempty" yaml:"then,omitempty"` + Else *RefOrSpec[Schema] `json:"else,omitempty" yaml:"else,omitempty"` +} + +type JsonSchemaCore struct { + // https://json-schema.org/understanding-json-schema/reference/schema.html#schema + Schema string `json:"$schema,omitempty" yaml:"$schema,omitempty"` + // https://json-schema.org/understanding-json-schema/structuring.html#id + ID string `json:"$id,omitempty" yaml:"$id,omitempty"` + // https://json-schema.org/understanding-json-schema/structuring.html#defs + Defs map[string]*RefOrSpec[Schema] `json:"$defs,omitempty" yaml:"$defs,omitempty"` + DynamicRef string `json:"$dynamicRef,omitempty" yaml:"$dynamicRef,omitempty"` + Vocabulary map[string]bool `json:"$vocabulary,omitempty" yaml:"$vocabulary,omitempty"` + DynamicAnchor string `json:"$dynamicAnchor,omitempty" yaml:"dynamicAnchor,omitempty"` + // https://json-schema.org/understanding-json-schema/reference/type.html + Type SingleOrArray[string] `json:"type,omitempty" yaml:"type,omitempty"` +} + +// JsonSchema fields +// +// https://json-schema.org/understanding-json-schema/index.html +// +// NOTE: all the other fields are available via Extensions property +type JsonSchema struct { + JsonSchemaTypeNumber `yaml:",inline"` + JsonSchemaConditionals `yaml:",inline"` + JsonSchemaTypeString `yaml:",inline"` + JsonSchemaMedia `yaml:",inline"` + JsonSchemaCore `yaml:",inline"` + JsonSchemaTypeArray `yaml:",inline"` + JsonSchemaTypeObject `yaml:",inline"` + JsonSchemaComposition `yaml:",inline"` + JsonSchemaGeneric `yaml:",inline"` +} diff --git a/vendor/github.com/sv-tools/openapi/spec/license.go b/vendor/github.com/sv-tools/openapi/spec/license.go new file mode 100644 index 00000000..460e2731 --- /dev/null +++ b/vendor/github.com/sv-tools/openapi/spec/license.go @@ -0,0 +1,27 @@ +package spec + +// License information for the exposed API. +// +// https://spec.openapis.org/oas/v3.1.0#license-object +// +// Example: +// +// name: Apache 2.0 +// identifier: Apache-2.0 +type License struct { + // REQUIRED. + // The license name used for the API. + Name string `json:"name" yaml:"name"` + // An SPDX license expression for the API. + // The identifier field is mutually exclusive of the url field. + Identifier string `json:"identifier,omitempty" yaml:"identifier,omitempty"` + // A URL to the license used for the API. + // This MUST be in the form of a URL. + // The url field is mutually exclusive of the identifier field. + URL string `json:"url,omitempty" yaml:"url,omitempty"` +} + +// NewLicense creates License object. +func NewLicense() *Extendable[License] { + return NewExtendable(&License{}) +} diff --git a/vendor/github.com/sv-tools/openapi/spec/link.go b/vendor/github.com/sv-tools/openapi/spec/link.go new file mode 100644 index 00000000..a34dd115 --- /dev/null +++ b/vendor/github.com/sv-tools/openapi/spec/link.go @@ -0,0 +1,90 @@ +package spec + +// Link represents a possible design-time link for a response. +// The presence of a link does not guarantee the caller’s ability to successfully invoke it, +// rather it provides a known relationship and traversal mechanism between responses and other operations. +// Unlike dynamic links (i.e. links provided in the response payload), +// the OAS linking mechanism does not require link information in the runtime response. +// For computing links, and providing instructions to execute them, +// a runtime expression is used for accessing values in an operation and using them as parameters while invoking the linked operation. +// +// https://spec.openapis.org/oas/v3.1.0#link-object +// +// Example: +// +// paths: +// /users/{id}: +// parameters: +// - name: id +// in: path +// required: true +// description: the user identifier, as userId +// schema: +// type: string +// get: +// responses: +// '200': +// description: the user being returned +// content: +// application/json: +// schema: +// type: object +// properties: +// uuid: # the unique user id +// type: string +// format: uuid +// links: +// address: +// # the target link operationId +// operationId: getUserAddress +// parameters: +// # get the `id` field from the request path parameter named `id` +// userId: $request.path.id +// # the path item of the linked operation +// /users/{userid}/address: +// parameters: +// - name: userid +// in: path +// required: true +// description: the user identifier, as userId +// schema: +// type: string +// # linked operation +// get: +// operationId: getUserAddress +// responses: +// '200': +// description: the user's address +type Link struct { + // A literal value or {expression} to use as a request body when calling the target operation. + RequestBody any `json:"requestBody,omitempty" yaml:"requestBody,omitempty"` + // A map representing parameters to pass to an operation as specified with operationId or identified via operationRef. + // The key is the parameter name to be used, whereas the value can be a constant or an expression to be evaluated and + // passed to the linked operation. + // The parameter name can be qualified using the parameter location [{in}.]{name} for operations that use + // the same parameter name in different locations (e.g. path.id). + Parameters map[string]any `json:"parameters,omitempty" yaml:"parameters,omitempty"` + // A server object to be used by the target operation. + Server *Extendable[Server] `json:"server,omitempty" yaml:"server,omitempty"` + // A relative or absolute URI reference to an OAS operation. + // This field is mutually exclusive of the operationId field, and MUST point to an Operation Object. + // Relative operationRef values MAY be used to locate an existing Operation Object in the OpenAPI definition. + // See the rules for resolving Relative References. + OperationRef string `json:"operationRef,omitempty" yaml:"operationRef,omitempty"` + // The name of an existing, resolvable OAS operation, as defined with a unique operationId. + // This field is mutually exclusive of the operationRef field. + OperationId string `json:"operationId,omitempty" yaml:"operationId,omitempty"` + // A description of the link. + // CommonMark syntax MAY be used for rich text representation. + Description string `json:"description,omitempty" yaml:"description,omitempty"` +} + +// NewLinkSpec creates Link object. +func NewLinkSpec() *RefOrSpec[Extendable[Link]] { + return NewRefOrSpec[Extendable[Link]](nil, NewExtendable(&Link{})) +} + +// NewLinkRef creates Ref object. +func NewLinkRef(ref *Ref) *RefOrSpec[Extendable[Link]] { + return NewRefOrSpec[Extendable[Link]](ref, nil) +} diff --git a/vendor/github.com/sv-tools/openapi/spec/media_type.go b/vendor/github.com/sv-tools/openapi/spec/media_type.go new file mode 100644 index 00000000..700ad6ef --- /dev/null +++ b/vendor/github.com/sv-tools/openapi/spec/media_type.go @@ -0,0 +1,52 @@ +package spec + +// MediaType provides schema and examples for the media type identified by its key. +// +// https://spec.openapis.org/oas/v3.1.0#media-type-object +// +// Example: +// +// application/json: +// schema: +// $ref: "#/components/schemas/Pet" +// examples: +// cat: +// summary: An example of a cat +// value: +// name: Fluffy +// petType: Cat +// color: White +// gender: male +// breed: Persian +// dog: +// summary: An example of a dog with a cat's name +// value: +// name: Puma +// petType: Dog +// color: Black +// gender: Female +// breed: Mixed +// frog: +// $ref: "#/components/examples/frog-example" +type MediaType struct { + // The schema defining the content of the request, response, or parameter. + Schema *RefOrSpec[Schema] `json:"schema,omitempty" yaml:"schema,omitempty"` + // Example of the media type. The example object SHOULD be in the correct format as specified by the media type. + // The example field is mutually exclusive of the examples field. + // Furthermore, if referencing a schema which contains an example, the example value SHALL override the example provided by the schema. + Example any `json:"example,omitempty" yaml:"example,omitempty"` + // Examples of the parameter’s potential value. + // Each example SHOULD contain a value in the correct format as specified in the parameter encoding. + // The examples field is mutually exclusive of the example field. + // Furthermore, if referencing a schema that contains an example, the examples value SHALL override the example provided by the schema. + Examples map[string]*RefOrSpec[Extendable[Example]] `json:"examples,omitempty" yaml:"examples,omitempty"` + // A map between a property name and its encoding information. + // The key, being the property name, MUST exist in the schema as a property. + // The encoding object SHALL only apply to requestBody objects when the media type is multipart or application/x-www-form-urlencoded. + Encoding map[string]*Extendable[Encoding] `json:"encoding,omitempty" yaml:"encoding,omitempty"` +} + +// NewMediaType creates MediaType object. +func NewMediaType() *Extendable[MediaType] { + return NewExtendable(&MediaType{}) +} diff --git a/vendor/github.com/sv-tools/openapi/spec/oauth-flow.go b/vendor/github.com/sv-tools/openapi/spec/oauth-flow.go new file mode 100644 index 00000000..ea2615f3 --- /dev/null +++ b/vendor/github.com/sv-tools/openapi/spec/oauth-flow.go @@ -0,0 +1,52 @@ +package spec + +// OAuthFlow configuration details for a supported OAuth Flow +// +// https://spec.openapis.org/oas/v3.1.0#oauth-flow-object +// +// Example: +// +// implicit: +// authorizationUrl: https://example.com/api/oauth/dialog +// scopes: +// write:pets: modify pets in your account +// read:pets: read your pets +// authorizationCode +// authorizationUrl: https://example.com/api/oauth/dialog +// scopes: +// write:pets: modify pets in your account +// read:pets: read your pets +type OAuthFlow struct { + // REQUIRED. + // The available scopes for the OAuth2 security scheme. + // A map between the scope name and a short description for it. + // The map MAY be empty. + // + // Applies To: oauth2 + Scopes map[string]string `json:"scopes,omitempty" yaml:"scopes,omitempty"` + // REQUIRED. + // The authorization URL to be used for this flow. + // This MUST be in the form of a URL. + // The OAuth2 standard requires the use of TLS. + // + // Applies To:oauth2 ("implicit", "authorizationCode") + AuthorizationURL string `json:"authorizationUrl,omitempty" yaml:"authorizationUrl,omitempty"` + // REQUIRED. + // The token URL to be used for this flow. + // This MUST be in the form of a URL. + // The OAuth2 standard requires the use of TLS. + // + // Applies To: oauth2 ("password", "clientCredentials", "authorizationCode") + TokenURL string `json:"tokenUrl,omitempty" yaml:"tokenUrl,omitempty"` + // The URL to be used for obtaining refresh tokens. + // This MUST be in the form of a URL. + // The OAuth2 standard requires the use of TLS. + // + // Applies To: oauth2 + RefreshURL string `json:"refreshUrl,omitempty" yaml:"refreshUrl,omitempty"` +} + +// NewOAuthFlow creates OAuthFlow object +func NewOAuthFlow() *Extendable[OAuthFlow] { + return NewExtendable(&OAuthFlow{}) +} diff --git a/vendor/github.com/sv-tools/openapi/spec/oauth-flows.go b/vendor/github.com/sv-tools/openapi/spec/oauth-flows.go new file mode 100644 index 00000000..7c61cdcd --- /dev/null +++ b/vendor/github.com/sv-tools/openapi/spec/oauth-flows.go @@ -0,0 +1,38 @@ +package spec + +// OAuthFlows allows configuration of the supported OAuth Flows. +// +// https://spec.openapis.org/oas/v3.1.0#oauth-flows-object +// +// Example: +// +// type: oauth2 +// flows: +// implicit: +// authorizationUrl: https://example.com/api/oauth/dialog +// scopes: +// write:pets: modify pets in your account +// read:pets: read your pets +// authorizationCode: +// authorizationUrl: https://example.com/api/oauth/dialog +// tokenUrl: https://example.com/api/oauth/token +// scopes: +// write:pets: modify pets in your account +// read:pets: read your pets +type OAuthFlows struct { + // Configuration for the OAuth Implicit flow. + Implicit *Extendable[OAuthFlow] `json:"implicit,omitempty" yaml:"implicit,omitempty"` + // Configuration for the OAuth Resource Owner Password flow. + Password *Extendable[OAuthFlow] `json:"password,omitempty" yaml:"password,omitempty"` + // Configuration for the OAuth Client Credentials flow. + // Previously called application in OpenAPI 2.0. + ClientCredentials *Extendable[OAuthFlow] `json:"clientCredentials,omitempty" yaml:"clientCredentials,omitempty"` + // Configuration for the OAuth Authorization Code flow. + // Previously called accessCode in OpenAPI 2.0. + AuthorizationCode *Extendable[OAuthFlow] `json:"authorizationCode,omitempty" yaml:"authorizationCode,omitempty"` +} + +// NewOAuthFlows creates OAuthFlows object +func NewOAuthFlows() *Extendable[OAuthFlows] { + return NewExtendable(&OAuthFlows{}) +} diff --git a/vendor/github.com/sv-tools/openapi/spec/openapi.go b/vendor/github.com/sv-tools/openapi/spec/openapi.go new file mode 100644 index 00000000..adce2dd2 --- /dev/null +++ b/vendor/github.com/sv-tools/openapi/spec/openapi.go @@ -0,0 +1,60 @@ +package spec + +// OpenAPI is the root object of the OpenAPI document. +// +// https://spec.openapis.org/oas/v3.1.0#openapi-object +// +// Example: +// +// openapi: 3.1.0 +// info: +// title: Minimal OpenAPI example +// version: 1.0.0 +// paths: { } +type OpenAPI struct { + // An element to hold various schemas for the document. + Components *Extendable[Components] `json:"components,omitempty" yaml:"components,omitempty"` + // REQUIRED + // Provides metadata about the API. The metadata MAY be used by tooling as required. + Info *Extendable[Info] `json:"info" yaml:"info"` + // Additional external documentation. + ExternalDocs *Extendable[ExternalDocs] `json:"externalDocs,omitempty" yaml:"externalDocs,omitempty"` + // Holds the relative paths to the individual endpoints and their operations. + // The path is appended to the URL from the Server Object in order to construct the full URL. + // The Paths MAY be empty, due to Access Control List (ACL) constraints. + Paths *Extendable[Paths] `json:"paths,omitempty" yaml:"paths,omitempty"` + // The incoming webhooks that MAY be received as part of this API and that the API consumer MAY choose to implement. + // Closely related to the callbacks feature, this section describes requests initiated other than by an API call, + // for example by an out of band registration. + // The key name is a unique string to refer to each webhook, while the (optionally referenced) PathItem Object describes + // a request that may be initiated by the API provider and the expected responses. + WebHooks map[string]*RefOrSpec[Extendable[PathItem]] `json:"webhooks,omitempty" yaml:"webhooks,omitempty"` + // The default value for the $schema keyword within Schema Objects contained within this OAS document. + // This MUST be in the form of a URI. + JsonSchemaDialect string `json:"jsonSchemaDialect,omitempty" yaml:"jsonSchemaDialect,omitempty"` + // REQUIRED + // This string MUST be the version number of the OpenAPI Specification that the OpenAPI document uses. + // The openapi field SHOULD be used by tooling to interpret the OpenAPI document. + // This is not related to the API info.version string. + OpenAPI string `json:"openapi" yaml:"openapi"` + // A declaration of which security mechanisms can be used across the API. + // The list of values includes alternative security requirement objects that can be used. + // Only one of the security requirement objects need to be satisfied to authorize a request. + // Individual operations can override this definition. + // To make security optional, an empty security requirement ({}) can be included in the array. + Security []SecurityRequirement `json:"security,omitempty" yaml:"security,omitempty"` + // A list of tags used by the document with additional metadata. + // The order of the tags can be used to reflect on their order by the parsing tools. + // Not all tags that are used by the Operation Object must be declared. + // The tags that are not declared MAY be organized randomly or based on the tools’ logic. + // Each tag name in the list MUST be unique. + Tags []*Extendable[Tag] `json:"tags,omitempty" yaml:"tags,omitempty"` + // An array of Server Objects, which provide connectivity information to a target server. + // If the servers property is not provided, or is an empty array, the default value would be a Server Object with a url value of /. + Servers []*Extendable[Server] `json:"servers,omitempty" yaml:"servers,omitempty"` +} + +// NewOpenAPI creates OpenAPI object. +func NewOpenAPI() *Extendable[OpenAPI] { + return NewExtendable(&OpenAPI{}) +} diff --git a/vendor/github.com/sv-tools/openapi/spec/operation.go b/vendor/github.com/sv-tools/openapi/spec/operation.go new file mode 100644 index 00000000..4c2550b7 --- /dev/null +++ b/vendor/github.com/sv-tools/openapi/spec/operation.go @@ -0,0 +1,103 @@ +package spec + +// Operation Describes a single API operation on a path. +// +// https://spec.openapis.org/oas/v3.1.0#operation-object +// +// Example: +// +// tags: +// - pet +// summary: Updates a pet in the store with form data +// operationId: updatePetWithForm +// parameters: +// - name: petId +// in: path +// description: ID of pet that needs to be updated +// required: true +// schema: +// type: string +// requestBody: +// content: +// 'application/x-www-form-urlencoded': +// schema: +// type: object +// properties: +// name: +// description: Updated name of the pet +// type: string +// status: +// description: Updated status of the pet +// type: string +// required: +// - status +// responses: +// '200': +// description: Pet updated. +// content: +// 'application/json': {} +// 'application/xml': {} +// '405': +// description: Method Not Allowed +// content: +// 'application/json': {} +// 'application/xml': {} +// security: +// - petstore_auth: +// - write:pets +// - read:pets +type Operation struct { + // The request body applicable for this operation. + // The requestBody is fully supported in HTTP methods where the HTTP 1.1 specification [RFC7231] has + // explicitly defined semantics for request bodies. + // In other cases where the HTTP spec is vague (such as [GET]section-4.3.1), [HEAD]section-4.3.2) and + // [DELETE]section-4.3.5)), requestBody is permitted but does not have well-defined semantics and SHOULD be avoided if possible. + RequestBody *RefOrSpec[Extendable[RequestBody]] `json:"requestBody,omitempty" yaml:"requestBody,omitempty"` + // The list of possible responses as they are returned from executing this operation. + Responses *Extendable[Responses] `json:"responses,omitempty" yaml:"responses,omitempty"` + // A map of possible out-of band callbacks related to the parent operation. + // The key is a unique identifier for the Callback Object. + // Each value in the map is a Callback Object that describes a request that may be initiated by the API provider and the expected responses. + Callbacks map[string]*RefOrSpec[Extendable[Callback]] `json:"callbacks,omitempty" yaml:"callbacks,omitempty"` + // Additional external documentation for this operation. + ExternalDocs *Extendable[ExternalDocs] `json:"externalDocs,omitempty" yaml:"externalDocs,omitempty"` + // Unique string used to identify the operation. + // The id MUST be unique among all operations described in the API. + // The operationId value is case-sensitive. + // Tools and libraries MAY use the operationId to uniquely identify an operation, therefore, + // it is RECOMMENDED to follow common programming naming conventions. + OperationID string `json:"operationId,omitempty" yaml:"operationId,omitempty"` + // A short summary of what the operation does. + Summary string `json:"summary,omitempty" yaml:"summary,omitempty"` + // A verbose explanation of the operation behavior. + // CommonMark syntax MAY be used for rich text representation. + Description string `json:"description,omitempty" yaml:"description,omitempty"` + // A list of parameters that are applicable for this operation. + // If a parameter is already defined at the Path Item, the new definition will override it but can never remove it. + // The list MUST NOT include duplicated parameters. + // A unique parameter is defined by a combination of a name and location. + // The list can use the Reference Object to link to parameters that are defined at the OpenAPI Object’s components/parameters. + Parameters []*RefOrSpec[Extendable[Parameter]] `json:"parameters,omitempty" yaml:"parameters,omitempty"` + // A list of tags for API documentation control. + // Tags can be used for logical grouping of operations by resources or any other qualifier. + Tags []string `json:"tags,omitempty" yaml:"tags,omitempty"` + // A declaration of which security mechanisms can be used for this operation. + // The list of values includes alternative security requirement objects that can be used. + // Only one of the security requirement objects need to be satisfied to authorize a request. + // To make security optional, an empty security requirement ({}) can be included in the array. + // This definition overrides any declared top-level security. + // To remove a top-level security declaration, an empty array can be used. + Security []SecurityRequirement `json:"security,omitempty" yaml:"security,omitempty"` + // An alternative server array to service this operation. + // If an alternative server object is specified at the Path Item Object or Root level, it will be overridden by this value. + Servers []*Extendable[Server] `json:"servers,omitempty" yaml:"servers,omitempty"` + // Declares this operation to be deprecated. + // Consumers SHOULD refrain from usage of the declared operation. + // Default value is false. + Deprecated bool `json:"deprecated,omitempty" yaml:"deprecated,omitempty"` +} + +// NewOperation creates Operation object. +func NewOperation() *Extendable[Operation] { + return NewExtendable(&Operation{}) +} diff --git a/vendor/github.com/sv-tools/openapi/spec/parameter.go b/vendor/github.com/sv-tools/openapi/spec/parameter.go new file mode 100644 index 00000000..d639cff4 --- /dev/null +++ b/vendor/github.com/sv-tools/openapi/spec/parameter.go @@ -0,0 +1,112 @@ +package spec + +const ( + // InPath used together with Path Templating, where the parameter value is actually part of the operation’s URL. + // This does not include the host or base path of the API. + // For example, in /items/{itemId}, the path parameter is itemId. + // + // https://spec.openapis.org/oas/v3.1.0#parameter-locations + InPath = "path" + // InQuery used for parameters that are appended to the URL. + // For example, in /items?id=###, the query parameter is id. + // + // https://spec.openapis.org/oas/v3.1.0#parameter-locations + InQuery = "query" + // InHeader used as custom headers that are expected as part of the request. + // Note that [RFC7230] states header names are case insensitive. + // + // https://spec.openapis.org/oas/v3.1.0#parameter-locations + InHeader = "header" + // InCookie used to pass a specific cookie value to the API. + // + // https://spec.openapis.org/oas/v3.1.0#parameter-locations + InCookie = "cookie" +) + +// Parameter describes a single operation parameter. +// A unique parameter is defined by a combination of a name and location. +// +// https://spec.openapis.org/oas/v3.1.0#parameter-object +// +// Example: +// +// name: pet +// description: Pets operations +type Parameter struct { + // Example of the parameter’s potential value. + // The example SHOULD match the specified schema and encoding properties if present. + // The example field is mutually exclusive of the examples field. + // Furthermore, if referencing a schema that contains an example, the example value SHALL override the example provided by the schema. + // To represent examples of media types that cannot naturally be represented in JSON or YAML, + // a string value can contain the example with escaping where necessary. + Example any `json:"example,omitempty" yaml:"example,omitempty"` + // A map containing the representations for the parameter. + // The key is the media type and the value describes it. + // The map MUST only contain one entry. + Content map[string]*Extendable[MediaType] `json:"content,omitempty" yaml:"content,omitempty"` + // Examples of the parameter’s potential value. + // Each example SHOULD contain a value in the correct format as specified in the parameter encoding. + // The examples field is mutually exclusive of the example field. + // Furthermore, if referencing a schema that contains an example, the examples value SHALL override the example provided by the schema. + Examples map[string]*RefOrSpec[Extendable[Example]] `json:"examples,omitempty" yaml:"examples,omitempty"` + // The schema defining the type used for the parameter. + Schema *RefOrSpec[Schema] `json:"schema,omitempty" yaml:"schema,omitempty"` + // REQUIRED. + // The location of the parameter. + // Possible values are "query", "header", "path" or "cookie". + In string `json:"in" yaml:"in"` + // A brief description of the parameter. + // This could contain examples of use. + // CommonMark syntax MAY be used for rich text representation. + Description string `json:"description,omitempty" yaml:"description,omitempty"` + // Describes how the parameter value will be serialized depending on the type of the parameter value. + // Default values (based on value of in): + // for query - form; + // for path - simple; + // for header - simple; + // for cookie - form. + Style string `json:"style,omitempty" yaml:"style,omitempty"` + // REQUIRED. + // The name of the parameter. + // Parameter names are case sensitive. + // If in is "path", the name field MUST correspond to a template expression occurring within the path field in the Paths Object. + // See Path Templating for further information. + // If in is "header" and the name field is "Accept", "Content-Type" or "Authorization", the parameter definition SHALL be ignored. + // For all other cases, the name corresponds to the parameter name used by the in property. + Name string `json:"name" yaml:"name"` + // When this is true, parameter values of type array or object generate separate parameters + // for each value of the array or key-value pair of the map. + // For other types of parameters this property has no effect. + // When style is form, the default value is true. + // For all other styles, the default value is false. + Explode bool `json:"explode,omitempty" yaml:"explode,omitempty"` + // Determines whether the parameter value SHOULD allow reserved characters, as defined by [RFC3986] + // :/?#[]@!$&'()*+,;= + // to be included without percent-encoding. + // This property only applies to parameters with an in value of query. + // The default value is false. + AllowReserved bool `json:"allowReserved,omitempty" yaml:"allowReserved,omitempty"` + // Sets the ability to pass empty-valued parameters. + // This is valid only for query parameters and allows sending a parameter with an empty value. + // Default value is false. + // If style is used, and if behavior is n/a (cannot be serialized), the value of allowEmptyValue SHALL be ignored. + // Use of this property is NOT RECOMMENDED, as it is likely to be removed in a later revision. + AllowEmptyValue bool `json:"allowEmptyValue,omitempty" yaml:"allowEmptyValue,omitempty"` + // Specifies that a parameter is deprecated and SHOULD be transitioned out of usage. + // Default value is false. + Deprecated bool `json:"deprecated,omitempty" yaml:"deprecated,omitempty"` + // Determines whether this parameter is mandatory. + // If the parameter location is "path", this property is REQUIRED and its value MUST be true. + // Otherwise, the property MAY be included and its default value is false. + Required bool `json:"required,omitempty" yaml:"required,omitempty"` +} + +// NewParameterSpec creates Parameter object. +func NewParameterSpec() *RefOrSpec[Extendable[Parameter]] { + return NewRefOrSpec[Extendable[Parameter]](nil, NewExtendable(&Parameter{})) +} + +// NewParameterRef creates Ref object. +func NewParameterRef(ref *Ref) *RefOrSpec[Extendable[Parameter]] { + return NewRefOrSpec[Extendable[Parameter]](ref, nil) +} diff --git a/vendor/github.com/sv-tools/openapi/spec/path_item.go b/vendor/github.com/sv-tools/openapi/spec/path_item.go new file mode 100644 index 00000000..aa209d8e --- /dev/null +++ b/vendor/github.com/sv-tools/openapi/spec/path_item.go @@ -0,0 +1,80 @@ +package spec + +// PathItem describes the operations available on a single path. +// A Path Item MAY be empty, due to ACL constraints. +// The path itself is still exposed to the documentation viewer but they will not know which operations and parameters are available. +// +// https://spec.openapis.org/oas/v3.1.0#path-item-object +// +// Example: +// +// get: +// description: Returns pets based on ID +// summary: Find pets by ID +// operationId: getPetsById +// responses: +// '200': +// description: pet response +// content: +// '*/*' : +// schema: +// type: array +// items: +// $ref: '#/components/schemas/Pet' +// default: +// description: error payload +// content: +// 'text/html': +// schema: +// $ref: '#/components/schemas/ErrorModel' +// parameters: +// - name: id +// in: path +// description: ID of pet to use +// required: true +// schema: +// type: array +// items: +// type: string +// style: simple +type PathItem struct { + // An optional, string summary, intended to apply to all operations in this path. + Summary string `json:"summary,omitempty" yaml:"summary,omitempty"` + // An optional, string description, intended to apply to all operations in this path. + // CommonMark syntax MAY be used for rich text representation. + Description string `json:"description,omitempty" yaml:"description,omitempty"` + // A definition of a GET operation on this path. + Get *Extendable[Operation] `json:"get,omitempty" yaml:"get,omitempty"` + // A definition of a PUT operation on this path. + Put *Extendable[Operation] `json:"put,omitempty" yaml:"put,omitempty"` + // A definition of a POST operation on this path. + Post *Extendable[Operation] `json:"post,omitempty" yaml:"post,omitempty"` + // A definition of a DELETE operation on this path. + Delete *Extendable[Operation] `json:"delete,omitempty" yaml:"delete,omitempty"` + // A definition of a OPTIONS operation on this path. + Options *Extendable[Operation] `json:"options,omitempty" yaml:"options,omitempty"` + // A definition of a HEAD operation on this path. + Head *Extendable[Operation] `json:"head,omitempty" yaml:"head,omitempty"` + // A definition of a PATCH operation on this path. + Patch *Extendable[Operation] `json:"patch,omitempty" yaml:"patch,omitempty"` + // A definition of a TRACE operation on this path. + Trace *Extendable[Operation] `json:"trace,omitempty" yaml:"trace,omitempty"` + // An alternative server array to service all operations in this path. + Servers []*Extendable[Server] `json:"servers,omitempty" yaml:"servers,omitempty"` + // A list of parameters that are applicable for all the operations described under this path. + // These parameters can be overridden at the operation level, but cannot be removed there. + // The list MUST NOT include duplicated parameters. + // A unique parameter is defined by a combination of a name and location. + // The list can use the Reference Object to link to parameters that are defined at the OpenAPI Object’s components/parameters. + Parameters []*RefOrSpec[Extendable[Parameter]] `json:"parameters,omitempty" yaml:"parameters,omitempty"` +} + +// NewPathItemSpec creates PathItem object. +func NewPathItemSpec() *RefOrSpec[Extendable[PathItem]] { + return NewRefOrSpec[Extendable[PathItem]](nil, NewExtendable(&PathItem{})) +} + +// NewPathItemRef creates Ref object. +func NewPathItemRef(ref *Ref) *RefOrSpec[Extendable[PathItem]] { + return NewRefOrSpec[Extendable[PathItem]](ref, nil) +} diff --git a/vendor/github.com/sv-tools/openapi/spec/paths.go b/vendor/github.com/sv-tools/openapi/spec/paths.go new file mode 100644 index 00000000..a4a714d4 --- /dev/null +++ b/vendor/github.com/sv-tools/openapi/spec/paths.go @@ -0,0 +1,65 @@ +package spec + +import ( + "encoding/json" + + "gopkg.in/yaml.v3" +) + +// Paths holds the relative paths to the individual endpoints and their operations. +// The path is appended to the URL from the Server Object in order to construct the full URL. +// The Paths MAY be empty, due to Access Control List (ACL) constraints. +// +// https://spec.openapis.org/oas/v3.1.0#paths-object +// +// Example: +// +// /pets: +// get: +// description: Returns all pets from the system that the user has access to +// responses: +// '200': +// description: A list of pets. +// content: +// application/json: +// schema: +// type: array +// items: +// $ref: '#/components/schemas/pet' +type Paths struct { + // A relative path to an individual endpoint. + // The field name MUST begin with a forward slash (/). + // The path is appended (no relative URL resolution) to the expanded URL + // from the Server Object’s url field in order to construct the full URL. + // Path templating is allowed. + // When matching URLs, concrete (non-templated) paths would be matched before their templated counterparts. + // Templated paths with the same hierarchy but different templated names MUST NOT exist as they are identical. + // In case of ambiguous matching, it’s up to the tooling to decide which one to use. + Paths map[string]*RefOrSpec[Extendable[PathItem]] `json:"-" yaml:"-"` +} + +// NewPaths creates Paths object. +func NewPaths() *Extendable[Paths] { + p := map[string]*RefOrSpec[Extendable[PathItem]]{} + return NewExtendable(&Paths{Paths: p}) +} + +// MarshalJSON implements json.Marshaler interface. +func (o *Paths) MarshalJSON() ([]byte, error) { + return json.Marshal(&o.Paths) +} + +// UnmarshalYAML implements yaml.Unmarshaler interface. +func (o *Paths) UnmarshalYAML(node *yaml.Node) error { + return node.Decode(&o.Paths) +} + +// MarshalYAML implements yaml.Marshaler interface. +func (o *Paths) MarshalYAML() (any, error) { + return o.Paths, nil +} + +// UnmarshalJSON implements json.Unmarshaler interface. +func (o *Paths) UnmarshalJSON(data []byte) error { + return json.Unmarshal(data, &o.Paths) +} diff --git a/vendor/github.com/sv-tools/openapi/spec/ref.go b/vendor/github.com/sv-tools/openapi/spec/ref.go new file mode 100644 index 00000000..70871fdf --- /dev/null +++ b/vendor/github.com/sv-tools/openapi/spec/ref.go @@ -0,0 +1,111 @@ +package spec + +import ( + "encoding/json" + "fmt" + + "gopkg.in/yaml.v3" +) + +// Ref is a simple object to allow referencing other components in the OpenAPI document, internally and externally. +// The $ref string value contains a URI [RFC3986], which identifies the location of the value being referenced. +// See the rules for resolving Relative References. +// +// https://spec.openapis.org/oas/v3.1.0#reference-object +// +// Example: +// +// $ref: '#/components/schemas/Pet' +type Ref struct { + // REQUIRED. + // The reference identifier. + // This MUST be in the form of a URI. + Ref string `json:"$ref" yaml:"$ref"` + // A short summary which by default SHOULD override that of the referenced component. + // If the referenced object-type does not allow a summary field, then this field has no effect. + Summary string `json:"summary,omitempty" yaml:"summary,omitempty"` + // A description which by default SHOULD override that of the referenced component. + // CommonMark syntax MAY be used for rich text representation. + // If the referenced object-type does not allow a description field, then this field has no effect. + Description string `json:"description,omitempty" yaml:"description,omitempty"` +} + +// NewRef creates an object of Ref type. +func NewRef(ref string) *Ref { + return &Ref{ + Ref: ref, + } +} + +// RefOrSpec holds either Ref or any OpenAPI spec type. +// +// NOTE: The Ref object takes precedent over Spec if using json or yaml Marshal and Unmarshal functions. +type RefOrSpec[T any] struct { + Ref *Ref `json:"-" yaml:"-"` + Spec *T `json:"-" yaml:"-"` +} + +// NewRefOrSpec creates an object of RefOrSpec type for either Ref or Spec +func NewRefOrSpec[T any](ref *Ref, spec *T) *RefOrSpec[T] { + o := RefOrSpec[T]{} + switch { + case ref != nil: + o.Ref = ref + case spec != nil: + o.Spec = spec + } + return &o +} + +// MarshalJSON implements json.Marshaler interface. +func (o *RefOrSpec[T]) MarshalJSON() ([]byte, error) { + var v any + if o.Ref != nil { + v = o.Ref + } else { + v = o.Spec + } + data, err := json.Marshal(&v) + if err != nil { + return nil, fmt.Errorf("%T: %w", o.Spec, err) + } + return data, nil +} + +// UnmarshalJSON implements json.Unmarshaler interface. +func (o *RefOrSpec[T]) UnmarshalJSON(data []byte) error { + if json.Unmarshal(data, &o.Ref) == nil && o.Ref.Ref != "" { + o.Spec = nil + return nil + } + + o.Ref = nil + if err := json.Unmarshal(data, &o.Spec); err != nil { + return fmt.Errorf("%T: %w", o.Spec, err) + } + return nil +} + +// MarshalYAML implements yaml.Marshaler interface. +func (o *RefOrSpec[T]) MarshalYAML() (any, error) { + var v any + if o.Ref != nil { + v = o.Ref + } else { + v = o.Spec + } + return v, nil +} + +// UnmarshalYAML implements yaml.Unmarshaler interface. +func (o *RefOrSpec[T]) UnmarshalYAML(node *yaml.Node) error { + if node.Decode(&o.Ref) == nil && o.Ref.Ref != "" { + return nil + } + + o.Ref = nil + if err := node.Decode(&o.Spec); err != nil { + return fmt.Errorf("%T: %w", o.Spec, err) + } + return nil +} diff --git a/vendor/github.com/sv-tools/openapi/spec/request_body.go b/vendor/github.com/sv-tools/openapi/spec/request_body.go new file mode 100644 index 00000000..03b1f7b3 --- /dev/null +++ b/vendor/github.com/sv-tools/openapi/spec/request_body.go @@ -0,0 +1,58 @@ +package spec + +// RequestBody describes a single request body. +// +// https://spec.openapis.org/oas/v3.1.0#request-body-object +// +// Example: +// +// description: user to add to the system +// content: +// 'application/json': +// schema: +// $ref: '#/components/schemas/User' +// examples: +// user: +// summary: User Example +// externalValue: 'https://foo.bar/examples/user-example.json' +// 'application/xml': +// schema: +// $ref: '#/components/schemas/User' +// examples: +// user: +// summary: User example in XML +// externalValue: 'https://foo.bar/examples/user-example.xml' +// 'text/plain': +// examples: +// user: +// summary: User example in Plain text +// externalValue: 'https://foo.bar/examples/user-example.txt' +// '*/*': +// examples: +// user: +// summary: User example in other format +// externalValue: 'https://foo.bar/examples/user-example.whatever' +type RequestBody struct { + // REQUIRED. + // The content of the request body. + // The key is a media type or [media type range]appendix-D) and the value describes it. + // For requests that match multiple keys, only the most specific key is applicable. e.g. text/plain overrides text/* + Content map[string]*Extendable[MediaType] `json:"content,omitempty" yaml:"content,omitempty"` + // A brief description of the request body. + // This could contain examples of use. + // CommonMark syntax MAY be used for rich text representation. + Description string `json:"description,omitempty" yaml:"description,omitempty"` + // Determines if the request body is required in the request. + // Defaults to false. + Required bool `json:"required,omitempty" yaml:"required,omitempty"` +} + +// NewRequestBodySpec creates RequestBody object. +func NewRequestBodySpec() *RefOrSpec[Extendable[RequestBody]] { + return NewRefOrSpec[Extendable[RequestBody]](nil, NewExtendable(&RequestBody{})) +} + +// NewRequestBodyRef creates Ref object. +func NewRequestBodyRef(ref *Ref) *RefOrSpec[Extendable[RequestBody]] { + return NewRefOrSpec[Extendable[RequestBody]](ref, nil) +} diff --git a/vendor/github.com/sv-tools/openapi/spec/response.go b/vendor/github.com/sv-tools/openapi/spec/response.go new file mode 100644 index 00000000..d454c096 --- /dev/null +++ b/vendor/github.com/sv-tools/openapi/spec/response.go @@ -0,0 +1,42 @@ +package spec + +// Response describes a single response from an API Operation, including design-time, static links to operations based on the response. +// +// https://spec.openapis.org/oas/v3.1.0#response-object +// +// Example: +// +// description: A complex object array response +// content: +// application/json: +// schema: +// type: array +// items: +// $ref: '#/components/schemas/VeryComplexType' +type Response struct { + // Maps a header name to its definition. + // [RFC7230] states header names are case insensitive. + // If a response header is defined with the name "Content-Type", it SHALL be ignored. + Headers map[string]*RefOrSpec[Extendable[Header]] `json:"headers,omitempty" yaml:"headers,omitempty"` + // A map containing descriptions of potential response payloads. + // The key is a media type or [media type range]appendix-D) and the value describes it. + // For responses that match multiple keys, only the most specific key is applicable. e.g. text/plain overrides text/* + Content map[string]*Extendable[MediaType] `json:"content,omitempty" yaml:"content,omitempty"` + // A map of operations links that can be followed from the response. + // The key of the map is a short name for the link, following the naming constraints of the names for Component Objects. + Links map[string]*RefOrSpec[Extendable[Link]] `json:"links,omitempty" yaml:"links,omitempty"` + // REQUIRED. + // A description of the response. + // CommonMark syntax MAY be used for rich text representation. + Description string `json:"description,omitempty" yaml:"description,omitempty"` +} + +// NewResponseSpec creates Response object. +func NewResponseSpec() *RefOrSpec[Extendable[Response]] { + return NewRefOrSpec[Extendable[Response]](nil, NewExtendable(&Response{})) +} + +// NewResponseRef creates Ref object. +func NewResponseRef(ref *Ref) *RefOrSpec[Extendable[Response]] { + return NewRefOrSpec[Extendable[Response]](ref, nil) +} diff --git a/vendor/github.com/sv-tools/openapi/spec/responses.go b/vendor/github.com/sv-tools/openapi/spec/responses.go new file mode 100644 index 00000000..aecfbdd2 --- /dev/null +++ b/vendor/github.com/sv-tools/openapi/spec/responses.go @@ -0,0 +1,130 @@ +package spec + +import ( + "encoding/json" + + "gopkg.in/yaml.v3" +) + +// Responses is a container for the expected responses of an operation. +// The container maps a HTTP response code to the expected response. +// The documentation is not necessarily expected to cover all possible HTTP response codes because they may not be known in advance. +// However, documentation is expected to cover a successful operation response and any known errors. +// The default MAY be used as a default response object for all HTTP codes that are not covered individually by the Responses Object. +// The Responses Object MUST contain at least one response code, and if only one response code is provided +// it SHOULD be the response for a successful operation call. +// +// https://spec.openapis.org/oas/v3.1.0#responses-object +// +// Example: +// +// '200': +// description: a pet to be returned +// content: +// application/json: +// schema: +// $ref: '#/components/schemas/Pet' +// default: +// description: Unexpected error +// content: +// application/json: +// schema: +// $ref: '#/components/schemas/ErrorModel' +type Responses struct { + // The documentation of responses other than the ones declared for specific HTTP response codes. + // Use this field to cover undeclared responses. + Default *RefOrSpec[Extendable[Response]] `json:"default,omitempty" yaml:"default,omitempty"` + // Any HTTP status code can be used as the property name, but only one property per code, + // to describe the expected response for that HTTP status code. + // This field MUST be enclosed in quotation marks (for example, “200”) for compatibility between JSON and YAML. + // To define a range of response codes, this field MAY contain the uppercase wildcard character X. + // For example, 2XX represents all response codes between [200-299]. + // Only the following range definitions are allowed: 1XX, 2XX, 3XX, 4XX, and 5XX. + // If a response is defined using an explicit code, the explicit code definition takes precedence over the range definition for that code. + Response map[string]*RefOrSpec[Extendable[Response]] `json:"-" yaml:"-"` +} + +// NewResponses creates Paths object. +func NewResponses() *Extendable[Responses] { + return NewExtendable(&Responses{}) +} + +// MarshalJSON implements json.Marshaler interface. +func (o *Responses) MarshalJSON() ([]byte, error) { + var raw map[string]json.RawMessage + data, err := json.Marshal(&o.Response) + if err != nil { + return nil, err + } + if err := json.Unmarshal(data, &raw); err != nil { + return nil, err + } + + if o.Default != nil { + data, err = json.Marshal(&o.Default) + if err != nil { + return nil, err + } + raw["default"] = data + } + return json.Marshal(&raw) +} + +// UnmarshalJSON implements json.Unmarshaler interface. +func (o *Responses) UnmarshalJSON(data []byte) error { + var raw map[string]json.RawMessage + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + if v, ok := raw["default"]; ok { + if err := json.Unmarshal(v, &o.Default); err != nil { + return err + } + delete(raw, "default") + } + data, err := json.Marshal(&raw) + if err != nil { + return err + } + return json.Unmarshal(data, &o.Response) +} + +// MarshalYAML implements yaml.Marshaler interface. +func (o *Responses) MarshalYAML() (any, error) { + var raw map[string]any + data, err := yaml.Marshal(&o.Response) + if err != nil { + return nil, err + } + if err := yaml.Unmarshal(data, &raw); err != nil { + return nil, err + } + + if o.Default != nil { + raw["default"] = o.Default + } + return raw, nil +} + +// UnmarshalYAML implements yaml.Unmarshaler interface. +func (o *Responses) UnmarshalYAML(node *yaml.Node) error { + var raw map[string]any + if err := node.Decode(&raw); err != nil { + return err + } + if v, ok := raw["default"]; ok { + data, err := yaml.Marshal(&v) + if err != nil { + return err + } + if err := yaml.Unmarshal(data, &o.Default); err != nil { + return err + } + delete(raw, "default") + } + data, err := yaml.Marshal(&raw) + if err != nil { + return err + } + return yaml.Unmarshal(data, &o.Response) +} diff --git a/vendor/github.com/sv-tools/openapi/spec/schema.go b/vendor/github.com/sv-tools/openapi/spec/schema.go new file mode 100644 index 00000000..70fb4ad9 --- /dev/null +++ b/vendor/github.com/sv-tools/openapi/spec/schema.go @@ -0,0 +1,195 @@ +package spec + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + + "gopkg.in/yaml.v3" +) + +// The Schema Object allows the definition of input and output data types. +// These types can be objects, but also primitives and arrays. +// This object is a superset of the JSON Schema Specification Draft 2020-12. +// For more information about the properties, see JSON Schema Core and JSON Schema Validation. +// Unless stated otherwise, the property definitions follow those of JSON Schema and do not add any additional semantics. +// Where JSON Schema indicates that behavior is defined by the application (e.g. for annotations), +// OAS also defers the definition of semantics to the application consuming the OpenAPI document. +// +// https://spec.openapis.org/oas/v3.1.0#schema-object +type Schema struct { + JsonSchema `yaml:",inline"` + + // Adds support for polymorphism. + // The discriminator is an object name that is used to differentiate between other schemas which may satisfy the payload description. + // See Composition and Inheritance for more details. + Discriminator *Discriminator `json:"discriminator,omitempty" yaml:"discriminator,omitempty"` + // Additional external documentation for this tag. + // xml + XML *Extendable[XML] `json:"xml,omitempty" yaml:"xml,omitempty"` + // Additional external documentation for this schema. + ExternalDocs *Extendable[ExternalDocs] `json:"externalDocs,omitempty" yaml:"externalDocs,omitempty"` + // A free-form property to include an example of an instance for this schema. + // To represent examples that cannot be naturally represented in JSON or YAML, a string value can be used to + // contain the example with escaping where necessary. + // + // Deprecated: The example property has been deprecated in favor of the JSON Schema examples keyword. + // Use of example is discouraged, and later versions of this specification may remove it. + Example any `json:"example,omitempty" yaml:"example,omitempty"` + + Extensions map[string]any `json:"-" yaml:"-"` +} + +// NewSchemaSpec creates Schema object. +func NewSchemaSpec() *RefOrSpec[Schema] { + return NewRefOrSpec[Schema](nil, &Schema{}) +} + +// NewSchemaRef creates Ref object. +func NewSchemaRef(ref *Ref) *RefOrSpec[Schema] { + return NewRefOrSpec[Schema](ref, nil) +} + +// returns the list of public fields for given tag and ignores `-` names +func getFields(t reflect.Type, tag string) map[string]struct{} { + if t.Kind() == reflect.Pointer { + t = t.Elem() + } + if t.Kind() != reflect.Struct { + return nil + } + n := t.NumField() + ret := make(map[string]struct{}) + for i := 0; i < n; i++ { + f := t.Field(i) + if !f.IsExported() { + continue + } + if f.Anonymous { + sub := getFields(f.Type, tag) + for n, v := range sub { + ret[n] = v + } + continue + } + name, _, _ := strings.Cut(f.Tag.Get(tag), ",") + if name == "-" { + continue + } + if name == "" { + name = f.Name + } + ret[name] = struct{}{} + } + if len(ret) == 0 { + return nil + } + return ret +} + +type intSchema Schema // needed to avoid recursion in marshal/unmarshal + +// MarshalJSON implements json.Marshaler interface. +func (o *Schema) MarshalJSON() ([]byte, error) { + var raw map[string]json.RawMessage + exts, err := json.Marshal(&o.Extensions) + if err != nil { + return nil, fmt.Errorf("%T.Extensions: %w", o, err) + } + if err := json.Unmarshal(exts, &raw); err != nil { + return nil, fmt.Errorf("%T(raw extensions): %w", o, err) + } + s := intSchema(*o) + fields, err := json.Marshal(&s) + if err != nil { + return nil, fmt.Errorf("%T: %w", o, err) + } + if err := json.Unmarshal(fields, &raw); err != nil { + return nil, fmt.Errorf("%T(raw fields): %w", o, err) + } + data, err := json.Marshal(&raw) + if err != nil { + return nil, fmt.Errorf("%T(raw): %w", o, err) + } + return data, nil +} + +// UnmarshalJSON implements json.Unmarshaler interface. +func (o *Schema) UnmarshalJSON(data []byte) error { + var raw map[string]json.RawMessage + if err := json.Unmarshal(data, &raw); err != nil { + return fmt.Errorf("%T: %w", o, err) + } + exts := make(map[string]any) + keys := getFields(reflect.TypeOf(o), "json") + for name, value := range raw { + if _, ok := keys[name]; !ok { + var v any + if err := json.Unmarshal(value, &v); err != nil { + return fmt.Errorf("%T.Extensions.%s: %w", o, name, err) + } + exts[name] = v + delete(raw, name) + } + } + fields, err := json.Marshal(&raw) + if err != nil { + return fmt.Errorf("%T(raw): %w", o, err) + } + var s intSchema + if err := json.Unmarshal(fields, &s); err != nil { + return fmt.Errorf("%T: %w", o, err) + } + s.Extensions = exts + *o = Schema(s) + return nil +} + +// MarshalYAML implements yaml.Marshaler interface. +func (o *Schema) MarshalYAML() (any, error) { + var raw map[string]any + exts, err := yaml.Marshal(&o.Extensions) + if err != nil { + return nil, fmt.Errorf("%T.Extensions: %w", o, err) + } + if err := yaml.Unmarshal(exts, &raw); err != nil { + return nil, fmt.Errorf("%T(raw extensions): %w", o, err) + } + s := intSchema(*o) + fields, err := yaml.Marshal(&s) + if err != nil { + return nil, fmt.Errorf("%T: %w", o, err) + } + if err := yaml.Unmarshal(fields, &raw); err != nil { + return nil, fmt.Errorf("%T(raw fields): %w", o, err) + } + return raw, nil +} + +// UnmarshalYAML implements yaml.Unmarshaler interface. +func (o *Schema) UnmarshalYAML(node *yaml.Node) error { + var raw map[string]any + if err := node.Decode(&raw); err != nil { + return fmt.Errorf("%T: %w", o, err) + } + exts := make(map[string]any) + keys := getFields(reflect.TypeOf(o), "json") + for name, value := range raw { + if _, ok := keys[name]; !ok { + exts[name] = value + delete(raw, name) + } + } + fields, err := yaml.Marshal(&raw) + if err != nil { + return fmt.Errorf("%T(raw): %w", o, err) + } + var s intSchema + if err := yaml.Unmarshal(fields, &s); err != nil { + return fmt.Errorf("%T: %w", o, err) + } + s.Extensions = exts + *o = Schema(s) + return nil +} diff --git a/vendor/github.com/sv-tools/openapi/spec/security-requirement.go b/vendor/github.com/sv-tools/openapi/spec/security-requirement.go new file mode 100644 index 00000000..814f651d --- /dev/null +++ b/vendor/github.com/sv-tools/openapi/spec/security-requirement.go @@ -0,0 +1,21 @@ +package spec + +// SecurityRequirement is the lists of the required security schemes to execute this operation. +// The name used for each property MUST correspond to a security scheme declared in the Security Schemes under the Components Object. +// Security Requirement Objects that contain multiple schemes require that all schemes MUST be satisfied for a request to be authorized. +// This enables support for scenarios where multiple query parameters or HTTP headers are required to convey security information. +// When a list of Security Requirement Objects is defined on the OpenAPI Object or Operation Object, +// only one of the Security Requirement Objects in the list needs to be satisfied to authorize the request. +// +// https://spec.openapis.org/oas/v3.1.0#security-requirement-object +// +// Example: +// +// api_key: [] +type SecurityRequirement map[string][]string + +// NewSecurityRequirement creates SecurityRequirement object. +func NewSecurityRequirement() SecurityRequirement { + o := make(map[string][]string) + return o +} diff --git a/vendor/github.com/sv-tools/openapi/spec/security-scheme.go b/vendor/github.com/sv-tools/openapi/spec/security-scheme.go new file mode 100644 index 00000000..3267c492 --- /dev/null +++ b/vendor/github.com/sv-tools/openapi/spec/security-scheme.go @@ -0,0 +1,85 @@ +package spec + +const ( + TypeApiKey = "apiKey" + TypeHTTP = "http" + TypeMutualTLS = "mutualTLS" + TypeOAuth2 = "oauth2" + TypeOpenIDConnect = "openIdConnect" +) + +// SecurityScheme defines a security scheme that can be used by the operations. +// Supported schemes are HTTP authentication, an API key (either as a header, a cookie parameter or as a query parameter), +// mutual TLS (use of a client certificate), OAuth2’s common flows (implicit, password, client credentials and authorization code) +// as defined in [RFC6749], and OpenID Connect Discovery. +// Please note that as of 2020, the implicit flow is about to be deprecated by OAuth 2.0 Security Best Current Practice. +// Recommended for most use case is Authorization Code Grant flow with PKCE. +// +// https://spec.openapis.org/oas/v3.1.0#security-scheme-object +// +// Example: +// +// type: oauth2 +// flows: +// implicit: +// authorizationUrl: https://example.com/api/oauth/dialog +// scopes: +// write:pets: modify pets in your account +// read:pets: read your pets +type SecurityScheme struct { + // REQUIRED. + // The type of the security scheme. + // Valid values are "apiKey", "http", "mutualTLS", "oauth2", "openIdConnect". + // + // Applies To: any + Type string `json:"type" yaml:"type"` + // A description for security scheme. + // CommonMark syntax MAY be used for rich text representation. + // + // Applies To: any + Description string `json:"description,omitempty" yaml:"description,omitempty"` + // REQUIRED. + // The name of the header, query or cookie parameter to be used. + // + // Applies To: apiKey + Name string `json:"name,omitempty" yaml:"name,omitempty"` + // REQUIRED. + // The location of the API key. + //Valid values are "query", "header" or "cookie". + // + // Applies To: apiKey + In string `json:"in,omitempty" yaml:"in,omitempty"` + // REQUIRED. + // The name of the HTTP Authorization scheme to be used in the Authorization header as defined in [RFC7235]. + // The values used SHOULD be registered in the IANA Authentication Scheme registry. + // + // Applies To: http + Scheme string `json:"scheme,omitempty" yaml:"scheme,omitempty"` + // A hint to the client to identify how the bearer token is formatted. + // Bearer tokens are usually generated by an authorization server, so this information is primarily for documentation purposes. + // + // Applies To: http ("bearer") + BearerFormat string `json:"bearerFormat,omitempty" yaml:"bearerFormat,omitempty"` + // REQUIRED. + // An object containing configuration information for the flow types supported. + // + // Applies To: oauth2 + Flows *Extendable[OAuthFlows] `json:"flows,omitempty" yaml:"flows,omitempty"` + // REQUIRED. + // OpenId Connect URL to discover OAuth2 configuration values. + // This MUST be in the form of a URL. + // The OpenID Connect standard requires the use of TLS. + // + // Applies To: openIdConnect + OpenIDConnectURL string `json:"openIdConnectUrl,omitempty" yaml:"openIdConnectUrl,omitempty"` +} + +// NewSecuritySchemeSpec creates SecurityScheme object. +func NewSecuritySchemeSpec() *RefOrSpec[Extendable[SecurityScheme]] { + return NewRefOrSpec[Extendable[SecurityScheme]](nil, NewExtendable(&SecurityScheme{})) +} + +// NewSecuritySchemeRef creates a Ref object. +func NewSecuritySchemeRef(ref *Ref) *RefOrSpec[Extendable[SecurityScheme]] { + return NewRefOrSpec[Extendable[SecurityScheme]](ref, nil) +} diff --git a/vendor/github.com/sv-tools/openapi/spec/server.go b/vendor/github.com/sv-tools/openapi/spec/server.go new file mode 100644 index 00000000..6768346d --- /dev/null +++ b/vendor/github.com/sv-tools/openapi/spec/server.go @@ -0,0 +1,34 @@ +package spec + +// Server is an object representing a Server. +// +// https://spec.openapis.org/oas/v3.1.0#server-object +// +// Example: +// +// servers: +// - url: https://development.gigantic-server.com/v1 +// description: Development server +// - url: https://staging.gigantic-server.com/v1 +// description: Staging server +// - url: https://api.gigantic-server.com/v1 +// description: Production server +type Server struct { + // A map between a variable name and its value. + // The value is used for substitution in the server’s URL template. + Variables map[string]*Extendable[ServerVariable] `json:"variables,omitempty" yaml:"variables,omitempty"` + // REQUIRED. + // A URL to the target host. + // This URL supports Server Variables and MAY be relative, to indicate that the host location is relative + // to the location where the OpenAPI document is being served. + // Variable substitutions will be made when a variable is named in {brackets}. + URL string `json:"url" yaml:"url"` + // An optional string describing the host designated by the URL. + // CommonMark syntax MAY be used for rich text representation. + Description string `json:"description,omitempty" yaml:"description,omitempty"` +} + +// NewServer creates Server object. +func NewServer() *Extendable[Server] { + return NewExtendable(&Server{}) +} diff --git a/vendor/github.com/sv-tools/openapi/spec/server_variable.go b/vendor/github.com/sv-tools/openapi/spec/server_variable.go new file mode 100644 index 00000000..526706d7 --- /dev/null +++ b/vendor/github.com/sv-tools/openapi/spec/server_variable.go @@ -0,0 +1,24 @@ +package spec + +// ServerVariable is an object representing a Server Variable for server URL template substitution. +// +// https://spec.openapis.org/oas/v3.1.0#server-variable-object +type ServerVariable struct { + // REQUIRED. + // The default value to use for substitution, which SHALL be sent if an alternate value is not supplied. + // Note this behavior is different than the Schema Object’s treatment of default values, + // because in those cases parameter values are optional. + // If the enum is defined, the value MUST exist in the enum’s values. + Default string `json:"default" yaml:"default"` + // An optional description for the server variable. + // CommonMark syntax MAY be used for rich text representation. + Description string `json:"description,omitempty" yaml:"description,omitempty"` + // An enumeration of string values to be used if the substitution options are from a limited set. + // The array MUST NOT be empty. + Enum []string `json:"enum,omitempty" yaml:"enum,omitempty"` +} + +// NewServerVariable creates ServerVariable object. +func NewServerVariable() *Extendable[ServerVariable] { + return NewExtendable(&ServerVariable{}) +} diff --git a/vendor/github.com/sv-tools/openapi/spec/single_or_array.go b/vendor/github.com/sv-tools/openapi/spec/single_or_array.go new file mode 100644 index 00000000..721a3ed8 --- /dev/null +++ b/vendor/github.com/sv-tools/openapi/spec/single_or_array.go @@ -0,0 +1,61 @@ +package spec + +import ( + "encoding/json" + + "gopkg.in/yaml.v3" +) + +// SingleOrArray holds list or single value +type SingleOrArray[T any] []T + +// NewSingleOrArray creates SingleOrArray object. +func NewSingleOrArray[T any](v ...T) SingleOrArray[T] { + return append([]T{}, v...) +} + +// UnmarshalJSON implements json.Unmarshaler interface. +func (o *SingleOrArray[T]) UnmarshalJSON(data []byte) error { + var ret []T + if json.Unmarshal(data, &ret) != nil { + var s T + if err := json.Unmarshal(data, &s); err != nil { + return err + } + ret = []T{s} + } + *o = ret + return nil +} + +// MarshalJSON implements json.Marshaler interface. +func (o SingleOrArray[T]) MarshalJSON() ([]byte, error) { + var v any = []T(o) + if len(o) == 1 { + v = o[0] + } + return json.Marshal(&v) +} + +// UnmarshalYAML implements yaml.Unmarshaler interface. +func (o *SingleOrArray[T]) UnmarshalYAML(node *yaml.Node) error { + var ret []T + if node.Decode(&ret) != nil { + var s T + if err := node.Decode(&s); err != nil { + return err + } + ret = []T{s} + } + *o = ret + return nil +} + +// MarshalYAML implements yaml.Marshaler interface. +func (o SingleOrArray[T]) MarshalYAML() (any, error) { + var v any = []T(o) + if len(o) == 1 { + v = o[0] + } + return v, nil +} diff --git a/vendor/github.com/sv-tools/openapi/spec/tag.go b/vendor/github.com/sv-tools/openapi/spec/tag.go new file mode 100644 index 00000000..3a79d39c --- /dev/null +++ b/vendor/github.com/sv-tools/openapi/spec/tag.go @@ -0,0 +1,26 @@ +package spec + +// Tag adds metadata to a single tag that is used by the Operation Object. +// It is not mandatory to have a Tag Object per tag defined in the Operation Object instances. +// +// https://spec.openapis.org/oas/v3.1.0#tag-object +// +// Example: +// +// name: pet +// description: Pets operations +type Tag struct { + // Additional external documentation for this tag. + ExternalDocs *Extendable[ExternalDocs] `json:"externalDocs,omitempty" yaml:"externalDocs,omitempty"` + // REQUIRED. + // The name of the tag. + Name string `json:"name" yaml:"name"` + // A description for the tag. + // CommonMark syntax MAY be used for rich text representation. + Description string `json:"description,omitempty" yaml:"description,omitempty"` +} + +// NewTag creates Tag object. +func NewTag() *Extendable[Tag] { + return NewExtendable(&Tag{}) +} diff --git a/vendor/github.com/sv-tools/openapi/spec/type_formats.go b/vendor/github.com/sv-tools/openapi/spec/type_formats.go new file mode 100644 index 00000000..5c367ae2 --- /dev/null +++ b/vendor/github.com/sv-tools/openapi/spec/type_formats.go @@ -0,0 +1,94 @@ +package spec + +const ( + // ******* Built-in OpenAPI formats ******* + // + // https://spec.openapis.org/oas/v3.1.0#data-types + + Int32Format = "int32" + Int64Format = "int64" + FloatFormat = "float" + DoubleFormat = "double" + PasswordFormat = "password" + + // ******* Built-in JSON Schema formats ******* + // + // https://json-schema.org/understanding-json-schema/reference/string.html#built-in-formats + + // DateTimeFormat is date and time together, for example, 2018-11-13T20:20:39+00:00. + DateTimeFormat = "date-time" + // TimeFormat is time, for example, 20:20:39+00:00 + TimeFormat = "time" + // DateFormat is date, for example, 2018-11-13. + DateFormat = "date" + // DurationFormat is a duration as defined by the ISO 8601 ABNF for “duration”. + // For example, P3D expresses a duration of 3 days. + // + // https://datatracker.ietf.org/doc/html/rfc3339#appendix-A + DurationFormat = "duration" + // EmailFormat is internet email address, see RFC 5321, section 4.1.2. + // + // https://tools.ietf.org/html/rfc5321#section-4.1.2 + EmailFormat = "email" + // IDNEmailFormat is the internationalized form of an Internet email address, see RFC 6531. + // + // https://tools.ietf.org/html/rfc6531 + IDNEmailFormat = "idn-email" + // HostnameFormat is internet host name, see RFC 1123, section 2.1. + // + // https://datatracker.ietf.org/doc/html/rfc1123#section-2.1 + HostnameFormat = "hostname" + // IDNHostnameFormat is an internationalized Internet host name, see RFC5890, section 2.3.2.3. + // + // https://tools.ietf.org/html/rfc6531 + IDNHostnameFormat = "idn-hostname" + // IPv4Format is IPv4 address, according to dotted-quad ABNF syntax as defined in RFC 2673, section 3.2. + // + // https://tools.ietf.org/html/rfc2673#section-3.2 + IPv4Format = "ipv4" + // IPv6Format is IPv6 address, as defined in RFC 2373, section 2.2. + // + // https://tools.ietf.org/html/rfc2373#section-2.2 + IPv6Format = "ipv6" + // UUIDFormat is a Universally Unique Identifier as defined by RFC 4122. + // Example: 3e4666bf-d5e5-4aa7-b8ce-cefe41c7568a + // + // RFC 4122 + UUIDFormat = "uuid" + // URIFormat is a universal resource identifier (URI), according to RFC3986. + // + // https://tools.ietf.org/html/rfc3986 + URIFormat = "uri" + // URIReferenceFormat is a URI Reference (either a URI or a relative-reference), according to RFC3986, section 4.1. + // + // https://tools.ietf.org/html/rfc3986#section-4.1 + URIReferenceFormat = "uri-reference" + // IRIFormat is the internationalized equivalent of a “uri”, according to RFC3987. + // + // https://tools.ietf.org/html/rfc3987 + IRIFormat = "iri" + // IRIReferenceFormat is The internationalized equivalent of a “uri-reference”, according to RFC3987 + // + // https://tools.ietf.org/html/rfc3987 + IRIReferenceFormat = "iri-reference" + // URITemplateFormat is a URI Template (of any level) according to RFC6570. + // If you don’t already know what a URI Template is, you probably don’t need this value. + // + // https://tools.ietf.org/html/rfc6570 + URITemplateFormat = "uri-template" + // JsonPointerFormat is a JSON Pointer, according to RFC6901. + // There is more discussion on the use of JSON Pointer within JSON Schema in Structuring a complex schema. + // Note that this should be used only when the entire string contains only JSON Pointer content, e.g. /foo/bar. + // JSON Pointer URI fragments, e.g. #/foo/bar/ should use "uri-reference". + // + // https://tools.ietf.org/html/rfc6901 + JsonPointerFormat = "json-pointer" + // RelativeJsonPointerFormat is a relative JSON pointer. + // + // https://tools.ietf.org/html/draft-handrews-relative-json-pointer-01 + RelativeJsonPointerFormat = "relative-json-pointer" + // RegexFormat is a regular expression, which should be valid according to the ECMA 262 dialect. + // + // https://www.ecma-international.org/publications/files/ECMA-ST/Ecma-262.pdf + RegexFormat = "regex" +) diff --git a/vendor/github.com/sv-tools/openapi/spec/types.go b/vendor/github.com/sv-tools/openapi/spec/types.go new file mode 100644 index 00000000..1dd46127 --- /dev/null +++ b/vendor/github.com/sv-tools/openapi/spec/types.go @@ -0,0 +1,57 @@ +package spec + +const ( + // ******* Type-specific keywords ******* + // + // https://json-schema.org/understanding-json-schema/reference/type.html + + // StringType is used for strings of text. It may contain Unicode characters. + // + // https://json-schema.org/understanding-json-schema/reference/string.html#string + StringType = "string" + // NumberType is used for any numeric type, either integers or floating point numbers. + // + // https://json-schema.org/understanding-json-schema/reference/numeric.html#number + NumberType = "number" + // IntegerType is used for integral numbers. + // JSON does not have distinct types for integers and floating-point values. + // Therefore, the presence or absence of a decimal point is not enough to distinguish between integers and non-integers. + // For example, 1 and 1.0 are two ways to represent the same value in JSON. + // JSON Schema considers that value an integer no matter which representation was used. + // + // https://json-schema.org/understanding-json-schema/reference/numeric.html#integer + IntegerType = "integer" + // ObjectType is the mapping type in JSON. + // They map “keys” to “values”. + // In JSON, the “keys” must always be strings. + // Each of these pairs is conventionally referred to as a “property”. + // + // https://json-schema.org/understanding-json-schema/reference/object.html#object + ObjectType = "object" + // ArrayType is used for ordered elements. + // In JSON, each element in an array may be of a different type. + // + // https://json-schema.org/understanding-json-schema/reference/array.html#array + ArrayType = "array" + // BooleanType matches only two special values: true and false. + // Note that values that evaluate to true or false, such as 1 and 0, are not accepted by the schema. + // + // https://json-schema.org/understanding-json-schema/reference/boolean.html#boolean + BooleanType = "boolean" + // NullType has only one acceptable value: null. + // + // https://json-schema.org/understanding-json-schema/reference/null.html#null + NullType = "null" + + // ******* Media: string-encoding non-JSON data ******* + // + // https://json-schema.org/understanding-json-schema/reference/non_json_data.html + + SevenBitEncoding = "7bit" + EightBitEncoding = "8bit" + BinaryEncoding = "binary" + QuotedPrintableEncoding = "quoted-printable" + Base16Encoding = "base16" + Base32Encoding = "base32" + Base64Encoding = "base64" +) diff --git a/vendor/github.com/sv-tools/openapi/spec/xml.go b/vendor/github.com/sv-tools/openapi/spec/xml.go new file mode 100644 index 00000000..88ee3959 --- /dev/null +++ b/vendor/github.com/sv-tools/openapi/spec/xml.go @@ -0,0 +1,52 @@ +package spec + +// XML is a metadata object that allows for more fine-tuned XML model definitions. +// When using arrays, XML element names are not inferred (for singular/plural forms) and the name property SHOULD +// be used to add that information. +// See examples for expected behavior. +// +// https://spec.openapis.org/oas/v3.1.0#xml-object +// +// Example: +// +// Person: +// type: object +// properties: +// id: +// type: integer +// format: int32 +// xml: +// attribute: true +// name: +// type: string +// xml: +// namespace: https://example.com/schema/sample +// prefix: sample +// +// +// example +// +type XML struct { + // Replaces the name of the element/attribute used for the described schema property. + // When defined within items, it will affect the name of the individual XML elements within the list. + // When defined alongside type being array (outside the items), it will affect the wrapping element and only if wrapped is true. + // If wrapped is false, it will be ignored. + Name string `json:"name,omitempty" yaml:"name,omitempty"` + // The URI of the namespace definition. + // This MUST be in the form of an absolute URI. + Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"` + // The prefix to be used for the name. + Prefix string `json:"prefix,omitempty" yaml:"prefix,omitempty"` + // Declares whether the property definition translates to an attribute instead of an element. Default value is false. + Attribute bool `json:"attribute,omitempty" yaml:"attribute,omitempty"` + // MAY be used only for an array definition. + // Signifies whether the array is wrapped (for example, ) or unwrapped (). + // Default value is false. + // The definition takes effect only when defined alongside type being array (outside the items). + Wrapped bool `json:"wrapped,omitempty" yaml:"wrapped,omitempty"` +} + +// NewXML creates XML object. +func NewXML() *Extendable[XML] { + return NewExtendable(&XML{}) +} diff --git a/vendor/github.com/swaggo/echo-swagger/README.md b/vendor/github.com/swaggo/echo-swagger/README.md index d645ba6c..f7bd4b5c 100644 --- a/vendor/github.com/swaggo/echo-swagger/README.md +++ b/vendor/github.com/swaggo/echo-swagger/README.md @@ -7,34 +7,42 @@ echo middleware to automatically generate RESTful API documentation with Swagger [![Go Report Card](https://goreportcard.com/badge/github.com/swaggo/echo-swagger)](https://goreportcard.com/report/github.com/swaggo/echo-swagger) [![Release](https://img.shields.io/github/release/swaggo/echo-swagger.svg?style=flat-square)](https://github.com/swaggo/echo-swagger/releases) - ## Usage ### Start using it + 1. Add comments to your API source code, [See Declarative Comments Format](https://github.com/swaggo/swag#declarative-comments-format). 2. Download [Swag](https://github.com/swaggo/swag) for Go by using: + ```sh $ go get -d github.com/swaggo/swag/cmd/swag -# 1.16 or newer +# 1.21 or newer $ go install github.com/swaggo/swag/cmd/swag@latest ``` + 3. Run the [Swag](https://github.com/swaggo/swag) in your Go project root folder which contains `main.go` file, [Swag](https://github.com/swaggo/swag) will parse comments and generate required files(`docs` folder and `docs/doc.go`). + ```sh_ "github.com/swaggo/echo-swagger/v2/example/docs" $ swag init ``` + 4. Download [echo-swagger](https://github.com/swaggo/echo-swagger) by using: + ```sh $ go get -u github.com/swaggo/echo-swagger ``` And import following in your code: + ```go import "github.com/swaggo/echo-swagger" // echo-swagger middleware ``` ### Canonical example: +## OpenAPI Specification (OAS) 2.0 + ```go package main @@ -66,7 +74,41 @@ func main() { e.Logger.Fatal(e.Start(":1323")) } +``` +## OpenAPI Specification (OAS) 3.0 + +```go +package main + +import ( + "github.com/labstack/echo/v4" + "github.com/swaggo/echo-swagger" + + _ "github.com/swaggo/echo-swagger/example/docs" // docs is generated by Swag CLI, you have to import it. +) + +// @title Swagger Example API +// @version 1.0 +// @description This is a sample server Petstore server. +// @termsOfService http://swagger.io/terms/ + +// @contact.name API Support +// @contact.url http://www.swagger.io/support +// @contact.email support@swagger.io + +// @license.name Apache 2.0 +// @license.url http://www.apache.org/licenses/LICENSE-2.0.html + +// @host petstore.swagger.io +// @BasePath /v2 +func main() { + e := echo.New() + + e.GET("/swagger/*", echoSwagger.WrapHandlerV3) + + e.Logger.Fatal(e.Start(":1323")) +} ``` 5. Run it, and browser to http://localhost:1323/swagger/index.html, you can see Swagger 2.0 Api documents. diff --git a/vendor/github.com/swaggo/echo-swagger/swagger.go b/vendor/github.com/swaggo/echo-swagger/swagger.go index b8dc937d..352ffe6d 100644 --- a/vendor/github.com/swaggo/echo-swagger/swagger.go +++ b/vendor/github.com/swaggo/echo-swagger/swagger.go @@ -1,15 +1,19 @@ package echoSwagger import ( + "errors" "html/template" + "io" "net/http" + "os" "path/filepath" "regexp" - "github.com/ghodss/yaml" "github.com/labstack/echo/v4" swaggerFiles "github.com/swaggo/files/v2" "github.com/swaggo/swag" + swagV2 "github.com/swaggo/swag/v2" + "sigs.k8s.io/yaml" ) // Config stores echoSwagger configuration variables. @@ -119,7 +123,10 @@ func newConfig(configFns ...func(*Config)) *Config { } // WrapHandler wraps swaggerFiles.Handler and returns echo.HandlerFunc -var WrapHandler = EchoWrapHandler() +var ( + WrapHandler = EchoWrapHandler() + WrapHandlerV3 = EchoWrapHandlerV3() +) // EchoWrapHandler wraps `http.Handler` into `echo.HandlerFunc`. func EchoWrapHandler(options ...func(*Config)) echo.HandlerFunc { @@ -130,6 +137,80 @@ func EchoWrapHandler(options ...func(*Config)) echo.HandlerFunc { var re = regexp.MustCompile(`^(.*/)([^?].*)?[?|.]*$`) + return func(c echo.Context) error { + if c.Request().Method != http.MethodGet { + return c.String(http.StatusMethodNotAllowed, http.StatusText(http.StatusMethodNotAllowed)) + } + + matches := re.FindStringSubmatch(c.Request().RequestURI) + path := matches[2] + + switch filepath.Ext(path) { + case ".html": + c.Response().Header().Set("Content-Type", "text/html; charset=utf-8") + case ".css": + c.Response().Header().Set("Content-Type", "text/css; charset=utf-8") + case ".js": + c.Response().Header().Set("Content-Type", "application/javascript") + case ".json": + c.Response().Header().Set("Content-Type", "application/json; charset=utf-8") + case ".yaml": + c.Response().Header().Set("Content-Type", "text/plain; charset=utf-8") + case ".png": + c.Response().Header().Set("Content-Type", "image/png") + } + + switch path { + case "": + return c.Redirect(http.StatusMovedPermanently, matches[1]+"/"+"index.html") + case "index.html": + pr, pw := io.Pipe() + go func() { + defer pw.Close() + _ = index.Execute(pw, config) + }() + return c.Stream(http.StatusOK, "text/html; charset=utf-8", pr) + case "doc.json": + doc, err := swag.ReadDoc(config.InstanceName) + if err != nil { + return c.String(http.StatusInternalServerError, err.Error()) + } + return c.String(http.StatusOK, doc) + case "doc.yaml": + jsonString, err := swag.ReadDoc(config.InstanceName) + if err != nil { + return c.String(http.StatusInternalServerError, err.Error()) + } + doc, err := yaml.JSONToYAML([]byte(jsonString)) + if err != nil { + return c.String(http.StatusInternalServerError, err.Error()) + } + return c.String(http.StatusOK, string(doc)) + } + c.Request().URL.Path = matches[2] + + f, err := swaggerFiles.FS.Open(matches[2]) + if errors.Is(err, os.ErrNotExist) { + // If the file is not found, return 404 + return c.String(http.StatusNotFound, http.StatusText(http.StatusNotFound)) + } else if err != nil { + return c.String(http.StatusNotFound, err.Error()) + } + defer f.Close() + + return c.Stream(http.StatusOK, c.Response().Header().Get("Content-Type"), f) + } +} + +// EchoWrapHandler wraps `http.Handler` into `echo.HandlerFunc`. +func EchoWrapHandlerV3(options ...func(*Config)) echo.HandlerFunc { + config := newConfig(options...) + + // create a template with name + index, _ := template.New("swagger_index.html").Parse(indexTemplate) + + var re = regexp.MustCompile(`^(.*/)([^?].*)?[?|.]*$`) + return func(c echo.Context) error { if c.Request().Method != http.MethodGet { return echo.NewHTTPError(http.StatusMethodNotAllowed, http.StatusText(http.StatusMethodNotAllowed)) @@ -165,7 +246,7 @@ func EchoWrapHandler(options ...func(*Config)) echo.HandlerFunc { case "index.html": _ = index.Execute(c.Response().Writer, config) case "doc.json": - doc, err := swag.ReadDoc(config.InstanceName) + doc, err := swagV2.ReadDoc(config.InstanceName) if err != nil { c.Error(err) @@ -174,7 +255,7 @@ func EchoWrapHandler(options ...func(*Config)) echo.HandlerFunc { _, _ = c.Response().Writer.Write([]byte(doc)) case "doc.yaml": - jsonString, err := swag.ReadDoc(config.InstanceName) + jsonString, err := swagV2.ReadDoc(config.InstanceName) if err != nil { c.Error(err) diff --git a/vendor/github.com/swaggo/swag/Dockerfile b/vendor/github.com/swaggo/swag/Dockerfile index 410fe31e..5ea91343 100644 --- a/vendor/github.com/swaggo/swag/Dockerfile +++ b/vendor/github.com/swaggo/swag/Dockerfile @@ -1,7 +1,7 @@ # Dockerfile References: https://docs.docker.com/engine/reference/builder/ # Start from the latest golang base image -FROM golang:1.20-alpine as builder +FROM --platform=$BUILDPLATFORM golang:1.21-alpine as builder # Set the Current Working Directory inside the container WORKDIR /app @@ -15,12 +15,18 @@ RUN go mod download # Copy the source from the current directory to the Working Directory inside the container COPY . . +# Configure go compiler target platform +ARG TARGETOS +ARG TARGETARCH +ENV GOARCH=$TARGETARCH \ + GOOS=$TARGETOS + # Build the Go app RUN CGO_ENABLED=0 GOOS=linux go build -v -a -installsuffix cgo -o swag cmd/swag/main.go ######## Start a new stage from scratch ####### -FROM scratch +FROM --platform=$TARGETPLATFORM scratch WORKDIR /code/ diff --git a/vendor/github.com/swaggo/swag/Makefile b/vendor/github.com/swaggo/swag/Makefile index 0d8175da..85dc3623 100644 --- a/vendor/github.com/swaggo/swag/Makefile +++ b/vendor/github.com/swaggo/swag/Makefile @@ -17,8 +17,6 @@ BINARY_NAME:=swag PACKAGES:=$(shell $(GOLIST) github.com/swaggo/swag github.com/swaggo/swag/cmd/swag github.com/swaggo/swag/gen github.com/swaggo/swag/format) GOFILES:=$(shell find . -name "*.go" -type f) -export GO111MODULE := on - all: test build .PHONY: build @@ -57,17 +55,8 @@ clean: deps: $(GOMODTIDY) -.PHONY: devel-deps -devel-deps: - GO111MODULE=off $(GOGET) -v -u \ - golang.org/x/lint/golint - -.PHONY: lint -lint: devel-deps - for PKG in $(PACKAGES); do golint -set_exit_status $$PKG || exit 1; done; - .PHONY: vet -vet: deps devel-deps +vet: deps $(GOVET) $(PACKAGES) .PHONY: fmt diff --git a/vendor/github.com/swaggo/swag/README.md b/vendor/github.com/swaggo/swag/README.md index 054e36ec..0d485c33 100644 --- a/vendor/github.com/swaggo/swag/README.md +++ b/vendor/github.com/swaggo/swag/README.md @@ -31,7 +31,8 @@ Swag converts Go annotations to Swagger Documentation 2.0. We've created a varie - [User defined structure with an array type](#user-defined-structure-with-an-array-type) - [Function scoped struct declaration](#function-scoped-struct-declaration) - [Model composition in response](#model-composition-in-response) - - [Add a headers in response](#add-a-headers-in-response) + - [Add request headers](#add-request-headers) + - [Add response headers](#add-response-headers) - [Use multiple path params](#use-multiple-path-params) - [Example value of struct](#example-value-of-struct) - [SchemaExample of body](#schemaexample-of-body) @@ -55,7 +56,7 @@ Swag converts Go annotations to Swagger Documentation 2.0. We've created a varie ```sh go install github.com/swaggo/swag/cmd/swag@latest ``` -To build from source you need [Go](https://golang.org/dl/) (1.18 or newer). +To build from source you need [Go](https://golang.org/dl/) (1.19 or newer). Alternatively you can run the docker image: ```sh @@ -103,6 +104,7 @@ OPTIONS: --outputTypes value, --ot value Output types of generated files (docs.go, swagger.json, swagger.yaml) like go,json,yaml (default: "go,json,yaml") --parseVendor Parse go files in 'vendor' folder, disabled by default (default: false) --parseDependency, --pd Parse go files inside dependency folder, disabled by default (default: false) + --parseDependencyLevel, --pdl Enhancement of '--parseDependency', parse go files inside dependency folder, 0 disabled, 1 only parse models, 2 only parse operations, 3 parse all (default: 0) --markdownFiles value, --md value Parse folder containing markdown files to use as description, disabled by default --codeExampleFiles value, --cef value Parse folder containing code example files to use for the x-codeSamples extension, disabled by default --parseInternal Parse go files in internal packages, disabled by default (default: false) @@ -113,9 +115,10 @@ OPTIONS: --overridesFile value File to read global type overrides from. (default: ".swaggo") --parseGoList Parse dependency via 'go list' (default: true) --tags value, -t value A comma-separated list of tags to filter the APIs for which the documentation is generated.Special case if the tag is prefixed with the '!' character then the APIs with that tag will be excluded - --templateDelims value, --td value Provide custom delimeters for Go template generation. The format is leftDelim,rightDelim. For example: "[[,]]" + --templateDelims value, --td value Provide custom delimiters for Go template generation. The format is leftDelim,rightDelim. For example: "[[,]]" --collectionFormat value, --cf value Set default collection format (default: "csv") --state value Initial state for the state machine (default: ""), @HostState in root file, @State in other files + --parseFuncBody Parse API info within body of functions in go files, disabled by default (default: false) --help, -h show help (default: false) ``` @@ -417,6 +420,7 @@ When a short string in your documentation is insufficient, or you need images, c | description.markdown | A short description of the application. Parsed from the api.md file. This is an alternative to @description |// @description.markdown No value needed, this parses the description from api.md | | tag.name | Name of a tag.| // @tag.name This is the name of the tag | | tag.description.markdown | Description of the tag this is an alternative to tag.description. The description will be read from a file named like tagname.md | // @tag.description.markdown | +| tag.x-name | The extension key, must be start by x- and take only string value | // @x-example-key value | ## API Operation @@ -467,6 +471,7 @@ Besides that, `swag` also accepts aliases for some MIME Types as follows: | png | image/png | | jpeg | image/jpeg | | gif | image/gif | +| event-stream | text/event-stream | @@ -536,6 +541,7 @@ type Foo struct { Field Name | Type | Description ---|:---:|--- validate | `string` | Determines the validation for the parameter. Possible values are: `required,optional`. +json | `string` | JSON tag options. The `omitempty` option will mark the field as not required. default | * | Declares the value of the parameter that the server will use if none is provided, for example a "count" to control the number of results per page might default to 100 if not supplied by the client in the request. (Note: "default" has no meaning for required parameters.) See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-6.2. Unlike JSON Schema this value MUST conform to the defined [`type`](#parameterType) for this parameter. maximum | `number` | See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.1.2. minimum | `number` | See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.1.3. @@ -645,7 +651,14 @@ type DeepObject struct { //in `proto` package } @success 200 {object} jsonresult.JSONResult{data1=proto.Order{data=proto.DeepObject},data2=[]proto.Order{data=[]proto.DeepObject}} "desc" ``` -### Add a headers in response +### Add request headers + +```go +// @Param X-MyHeader header string true "MyHeader must be set for valid response" +// @Param X-API-VERSION header string true "API version eg.: 1.0" +``` + +### Add response headers ```go // @Success 200 {string} string "ok" @@ -886,18 +899,48 @@ Each API operation. // @Security ApiKeyAuth ``` -Make it AND condition +Make it OR condition ```go // @Security ApiKeyAuth // @Security OAuth2Application[write, admin] ``` -Make it OR condition +Make it AND condition ```go -// @Security ApiKeyAuth || firebase -// @Security OAuth2Application[write, admin] || APIKeyAuth +// @Security ApiKeyAuth && firebase +// @Security OAuth2Application[write, admin] && APIKeyAuth +``` + +### Generate enum types from enum constants + +You can generate enums from ordered constants. Each enum variant can have a comment, an override name, or both. This works with both iota-defined and manually defined constants. + +```go +type Difficulty string + +const ( + Easy Difficulty = "easy" // You can add a comment to the enum variant. + Medium Difficulty = "medium" // @name MediumDifficulty + Hard Difficulty = "hard" // @name HardDifficulty You can have a name override and a comment. +) + +type Class int + +const ( + First Class = iota // @name FirstClass + Second // Name override and comment rules apply here just as above. + Third // @name ThirdClass This one has a name override and a comment. +) + +// There is no need to add `enums:"..."` to the fields, it is automatically generated from the ordered consts. +type Quiz struct { + Difficulty Difficulty + Class Class + Questions []string + Answers []string +} ``` @@ -931,7 +974,7 @@ func GetPosts(w http.ResponseWriter, r *http.Request) { _ = web.GenericNestedResponse[types.Post]{} } ``` -See [this file](https://github.com/swaggo/swag/blob/master/testdata/generics_nested/api/api.go) for more details +See [this file](https://github.com/swaggo/swag/blob/master/testdata/generics_nested/api/api.go) for more details and other examples. ### Change the default Go Template action delimiters @@ -946,6 +989,17 @@ swag init -g http/api.go -td "[[,]]" ``` The new delimiter is a string with the format "``,``". +### Parse Internal and Dependency Packages + +If the struct is defined in a dependency package, use `--parseDependency`. + +If the struct is defined in your main project, use `--parseInternal`. + +if you want to include both internal and from dependencies use both flags +``` +swag init --parseDependency --parseInternal +``` + ## About the Project This project was inspired by [yvasiyarov/swagger](https://github.com/yvasiyarov/swagger) but we simplified the usage and added support a variety of [web frameworks](#supported-web-frameworks). Gopher image source is [tenntenn/gopher-stickers](https://github.com/tenntenn/gopher-stickers). It has licenses [creative commons licensing](http://creativecommons.org/licenses/by/3.0/deed.en). ## Contributors diff --git a/vendor/github.com/swaggo/swag/README_pt.md b/vendor/github.com/swaggo/swag/README_pt.md index 7f95066b..f3d8708e 100644 --- a/vendor/github.com/swaggo/swag/README_pt.md +++ b/vendor/github.com/swaggo/swag/README_pt.md @@ -54,7 +54,7 @@ Swag converte anotações Go para Documentação Swagger 2.0. Criámos uma varie ```sh go install github.com/swaggo/swag/cmd/swag@latest ``` -Para construir a partir da fonte é necessário [Go](https://golang.org/dl/) (1.18 ou mais recente). +Para construir a partir da fonte é necessário [Go](https://golang.org/dl/) (1.19 ou mais recente). Ou descarregar um binário pré-compilado a partir da [página de lançamento](https://github.com/swaggo/swag/releases). @@ -445,6 +445,7 @@ Além disso, `swag` também aceita pseudónimos para alguns tipos de MIME, como | png | image/png | | jpeg | image/jpeg | | gif | image/gif | +| event-stream | text/event-stream | @@ -870,18 +871,18 @@ Cada operação API. // @Security ApiKeyAuth ``` -Faça-o AND condicione-o +Faça-o OR condicione-o ```go // @Security ApiKeyAuth // @Security OAuth2Application[write, admin] ``` -Faça-o OR condição +Faça-o AND condição ```go -// @Security ApiKeyAuth || firebase -// @Security OAuth2Application[write, admin] || APIKeyAuth +// @Security ApiKeyAuth && firebase +// @Security OAuth2Application[write, admin] && APIKeyAuth ``` diff --git a/vendor/github.com/swaggo/swag/README_zh-CN.md b/vendor/github.com/swaggo/swag/README_zh-CN.md index 87d600b5..0f831d5c 100644 --- a/vendor/github.com/swaggo/swag/README_zh-CN.md +++ b/vendor/github.com/swaggo/swag/README_zh-CN.md @@ -9,7 +9,7 @@ [![Go Report Card](https://goreportcard.com/badge/github.com/swaggo/swag)](https://goreportcard.com/report/github.com/swaggo/swag) [![codebeat badge](https://codebeat.co/badges/71e2f5e5-9e6b-405d-baf9-7cc8b5037330)](https://codebeat.co/projects/github-com-swaggo-swag-master) [![Go Doc](https://godoc.org/github.com/swaggo/swagg?status.svg)](https://godoc.org/github.com/swaggo/swag) -[![Backers on Open Collective](https://opencollective.com/swag/backers/badge.svg)](#backers) +[![Backers on Open Collective](https://opencollective.com/swag/backers/badge.svg)](#backers) [![Sponsors on Open Collective](https://opencollective.com/swag/sponsors/badge.svg)](#sponsors) [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fswaggo%2Fswag.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fswaggo%2Fswag?ref=badge_shield) [![Release](https://img.shields.io/github/release/swaggo/swag.svg?style=flat-square)](https://github.com/swaggo/swag/releases) @@ -50,7 +50,7 @@ Swag将Go的注释转换为Swagger2.0文档。我们为流行的 [Go Web Framewo go install github.com/swaggo/swag/cmd/swag@latest ``` -从源码开始构建的话,需要有Go环境(1.18及以上版本)。 +从源码开始构建的话,需要有Go环境(1.19及以上版本)。 或者从github的release页面下载预编译好的二进制文件。 @@ -90,6 +90,7 @@ OPTIONS: --output value, -o value 文件(swagger.json, swagger.yaml and doc.go)输出目录 (默认: "./docs") --parseVendor 是否解析vendor目录里的go源文件,默认不 --parseDependency 是否解析依赖目录中的go源文件,默认不 + --parseDependencyLevel, --pdl 对'--parseDependency'参数进行增强, 是否解析依赖目录中的go源文件, 0 不解析, 1 只解析对象模型, 2 只解析API, 3 对象模型和API都解析 (default: 0) --markdownFiles value, --md value 指定API的描述信息所使用的markdown文件所在的目录 --generatedTime 是否输出时间到输出文件docs.go的顶部,默认是 --codeExampleFiles value, --cef value 解析包含用于 x-codeSamples 扩展的代码示例文件的文件夹,默认禁用 @@ -298,7 +299,7 @@ swag init ## 格式化说明 -可以针对Swag的注释自动格式化,就像`go fmt`。 +可以针对Swag的注释自动格式化,就像`go fmt`。 此处查看格式化结果 [here](https://github.com/swaggo/swag/tree/master/example/celler). 示例: @@ -416,6 +417,7 @@ Example [celler/controller](https://github.com/swaggo/swag/tree/master/example/c | png | image/png | | jpeg | image/jpeg | | gif | image/gif | +| event-stream | text/event-stream | ## 参数类型 @@ -731,8 +733,7 @@ type Resp struct { 使用AND条件。 ```go -// @Security ApiKeyAuth -// @Security OAuth2Application[write, admin] +// @Security ApiKeyAuth && OAuth2Application[write, admin] ``` ## 项目相关 diff --git a/vendor/github.com/swaggo/swag/const.go b/vendor/github.com/swaggo/swag/const.go index 83755103..a23d8bd7 100644 --- a/vendor/github.com/swaggo/swag/const.go +++ b/vendor/github.com/swaggo/swag/const.go @@ -19,6 +19,19 @@ type ConstVariable struct { Pkg *PackageDefinitions } +// VariableName gets the name for this const variable, taking into account comment overrides. +func (cv *ConstVariable) VariableName() string { + if ignoreNameOverride(cv.Name.Name) { + return cv.Name.Name[1:] + } + + if overriddenName := nameOverride(cv.Comment); overriddenName != "" { + return overriddenName + } + + return cv.Name.Name +} + var escapedChars = map[uint8]uint8{ 'n': '\n', 'r': '\r', diff --git a/vendor/github.com/swaggo/swag/enums.go b/vendor/github.com/swaggo/swag/enums.go index 38658f20..300787bc 100644 --- a/vendor/github.com/swaggo/swag/enums.go +++ b/vendor/github.com/swaggo/swag/enums.go @@ -1,8 +1,9 @@ package swag const ( - enumVarNamesExtension = "x-enum-varnames" - enumCommentsExtension = "x-enum-comments" + enumVarNamesExtension = "x-enum-varnames" + enumCommentsExtension = "x-enum-comments" + enumDescriptionsExtension = "x-enum-descriptions" ) // EnumValue a model to record an enum consts variable diff --git a/vendor/github.com/swaggo/swag/field_parser.go b/vendor/github.com/swaggo/swag/field_parser.go index 9b24e787..4f1aac09 100644 --- a/vendor/github.com/swaggo/swag/field_parser.go +++ b/vendor/github.com/swaggo/swag/field_parser.go @@ -18,6 +18,7 @@ var _ FieldParser = &tagBaseFieldParser{p: nil, field: nil, tag: ""} const ( requiredLabel = "required" optionalLabel = "optional" + omitEmptyLabel = "omitempty" swaggerTypeTag = "swaggertype" swaggerIgnoreTag = "swaggerignore" ) @@ -65,38 +66,41 @@ func (ps *tagBaseFieldParser) ShouldSkip() bool { return false } -func (ps *tagBaseFieldParser) FieldName() (string, error) { - var name string +func (ps *tagBaseFieldParser) FieldNames() ([]string, error) { + if len(ps.field.Names) <= 1 { + // if embedded but with a json/form name ?? + if ps.field.Tag != nil { + // json:"tag,hoge" + name := strings.TrimSpace(strings.Split(ps.tag.Get(jsonTag), ",")[0]) + if name != "" { + return []string{name}, nil + } - if ps.field.Tag != nil { - // json:"tag,hoge" - name = strings.TrimSpace(strings.Split(ps.tag.Get(jsonTag), ",")[0]) - if name != "" { - return name, nil + // use "form" tag over json tag + name = ps.FormName() + if name != "" { + return []string{name}, nil + } } - - // use "form" tag over json tag - name = ps.FormName() - if name != "" { - return name, nil + if len(ps.field.Names) == 0 { + return nil, nil } } - - if ps.field.Names == nil { - return "", nil - } - - switch ps.p.PropNamingStrategy { - case SnakeCase: - return toSnakeCase(ps.field.Names[0].Name), nil - case PascalCase: - return ps.field.Names[0].Name, nil - default: - return toLowerCamelCase(ps.field.Names[0].Name), nil + var names = make([]string, 0, len(ps.field.Names)) + for _, name := range ps.field.Names { + switch ps.p.PropNamingStrategy { + case SnakeCase: + names = append(names, toSnakeCase(name.Name)) + case PascalCase: + names = append(names, name.Name) + default: + names = append(names, toLowerCamelCase(name.Name)) + } } + return names, nil } -func (ps *tagBaseFieldParser) firstTagValue(tag string) string { +func (ps *tagBaseFieldParser) FirstTagValue(tag string) string { if ps.field.Tag != nil { return strings.TrimRight(strings.TrimSpace(strings.Split(ps.tag.Get(tag), ",")[0]), "[]") } @@ -104,15 +108,15 @@ func (ps *tagBaseFieldParser) firstTagValue(tag string) string { } func (ps *tagBaseFieldParser) FormName() string { - return ps.firstTagValue(formTag) + return ps.FirstTagValue(formTag) } func (ps *tagBaseFieldParser) HeaderName() string { - return ps.firstTagValue(headerTag) + return ps.FirstTagValue(headerTag) } func (ps *tagBaseFieldParser) PathName() string { - return ps.firstTagValue(uriTag) + return ps.FirstTagValue(uriTag) } func toSnakeCase(in string) string { @@ -169,6 +173,7 @@ func (ps *tagBaseFieldParser) CustomSchema() (*spec.Schema, error) { } type structField struct { + title string schemaType string arrayType string formatType string @@ -274,6 +279,7 @@ func (ps *tagBaseFieldParser) complementSchema(schema *spec.Schema, types []stri field := &structField{ schemaType: types[0], formatType: ps.tag.Get(formatTag), + title: ps.tag.Get(titleTag), } if len(types) > 1 && (types[0] == ARRAY || types[0] == OBJECT) { @@ -414,6 +420,7 @@ func (ps *tagBaseFieldParser) complementSchema(schema *spec.Schema, types []stri if field.schemaType != ARRAY { schema.Format = field.formatType } + schema.Title = field.title extensionsTagValue := ps.tag.Get(extensionsTag) if extensionsTagValue != "" { @@ -527,6 +534,15 @@ func (ps *tagBaseFieldParser) IsRequired() (bool, error) { } } + jsonTag := ps.tag.Get(jsonTag) + if jsonTag != "" { + for _, val := range strings.Split(jsonTag, ",") { + if val == omitEmptyLabel { + return false, nil + } + } + } + return ps.p.RequiredByDefault, nil } diff --git a/vendor/github.com/swaggo/swag/formatter.go b/vendor/github.com/swaggo/swag/formatter.go index 511e3a82..1074a3bf 100644 --- a/vendor/github.com/swaggo/swag/formatter.go +++ b/vendor/github.com/swaggo/swag/formatter.go @@ -12,6 +12,8 @@ import ( "sort" "strings" "text/tabwriter" + + "golang.org/x/tools/imports" ) // Check of @Param @Success @Failure @Response @Header @@ -68,8 +70,11 @@ func (f *Formatter) Format(fileName string, contents []byte) ([]byte, error) { for _, comment := range ast.Comments { formatFuncDoc(fileSet, comment.List, &edits) } - - return edits.apply(contents), nil + formatted, err := imports.Process(fileName, edits.apply(contents), nil) + if err != nil { + return nil, err + } + return formatted, nil } type edit struct { diff --git a/vendor/github.com/swaggo/swag/generics.go b/vendor/github.com/swaggo/swag/generics.go index 07344bba..80e93a90 100644 --- a/vendor/github.com/swaggo/swag/generics.go +++ b/vendor/github.com/swaggo/swag/generics.go @@ -61,6 +61,7 @@ func (pkgDefs *PackagesDefinitions) getTypeFromGenericParam(genericParam string, Enums: typeSpecDef.Enums, PkgPath: typeSpecDef.PkgPath, ParentSpec: typeSpecDef.ParentSpec, + SchemaName: "array_" + typeSpecDef.SchemaName, NotUnique: false, } } @@ -96,9 +97,9 @@ func (pkgDefs *PackagesDefinitions) getTypeFromGenericParam(genericParam string, Enums: typeSpecDef.Enums, PkgPath: typeSpecDef.PkgPath, ParentSpec: typeSpecDef.ParentSpec, + SchemaName: "map_" + parts[0] + "_" + typeSpecDef.SchemaName, NotUnique: false, } - } if IsGolangPrimitiveType(genericParam) { return &TypeSpecDef{ @@ -106,6 +107,7 @@ func (pkgDefs *PackagesDefinitions) getTypeFromGenericParam(genericParam string, Name: ast.NewIdent(genericParam), Type: ast.NewIdent(genericParam), }, + SchemaName: genericParam, } } return pkgDefs.FindTypeSpec(genericParam, file) @@ -155,14 +157,27 @@ func (pkgDefs *PackagesDefinitions) parametrizeGenericType(file *ast.File, origi } name = fmt.Sprintf("%s%s-", string(IgnoreNameOverridePrefix), original.TypeName()) + schemaName := fmt.Sprintf("%s-", original.SchemaName) + var nameParts []string + var schemaNameParts []string + for _, def := range formals { if specDef, ok := genericParamTypeDefs[def.Name]; ok { - nameParts = append(nameParts, specDef.TypeName()) + nameParts = append(nameParts, specDef.Name) + + schemaNamePart := specDef.Name + + if specDef.TypeSpec != nil { + schemaNamePart = specDef.TypeSpec.SchemaName + } + + schemaNameParts = append(schemaNameParts, schemaNamePart) } } name += normalizeGenericTypeName(strings.Join(nameParts, "-")) + schemaName += normalizeGenericTypeName(strings.Join(schemaNameParts, "-")) if typeSpec, ok := pkgDefs.uniqueDefinitions[name]; ok { return typeSpec @@ -180,6 +195,7 @@ func (pkgDefs *PackagesDefinitions) parametrizeGenericType(file *ast.File, origi Doc: original.TypeSpec.Doc, Assign: original.TypeSpec.Assign, }, + SchemaName: schemaName, } pkgDefs.uniqueDefinitions[name] = parametrizedTypeSpec diff --git a/vendor/github.com/swaggo/swag/golist.go b/vendor/github.com/swaggo/swag/golist.go index fa0b2cd9..bce3234f 100644 --- a/vendor/github.com/swaggo/swag/golist.go +++ b/vendor/github.com/swaggo/swag/golist.go @@ -22,7 +22,7 @@ func listPackages(ctx context.Context, dir string, env []string, args ...string) var stderrBuf bytes.Buffer cmd.Stderr = &stderrBuf defer func() { - if stderrBuf.Len() > 0 { + if (finalErr != nil) && (stderrBuf.Len() > 0) { finalErr = fmt.Errorf("%v\n%s", finalErr, stderrBuf.Bytes()) } }() diff --git a/vendor/github.com/swaggo/swag/operation.go b/vendor/github.com/swaggo/swag/operation.go index 169510ff..f3b77614 100644 --- a/vendor/github.com/swaggo/swag/operation.go +++ b/vendor/github.com/swaggo/swag/operation.go @@ -47,9 +47,11 @@ var mimeTypeAliases = map[string]string{ "png": "image/png", "jpeg": "image/jpeg", "gif": "image/gif", + "event-stream": "text/event-stream", } var mimeTypePattern = regexp.MustCompile("^[^/]+/[^/]+$") +var securityPairSepPattern = regexp.MustCompile(`\|\||&&`) // || for compatibility with old version, && for clarity // NewOperation creates a new Operation with default properties. // map[int]Response. @@ -202,7 +204,7 @@ func (operation *Operation) ParseDescriptionComment(lineRemainder string) { return } - operation.Description += "\n" + lineRemainder + operation.Description = AppendDescription(operation.Description, lineRemainder) } // ParseMetadata parse metadata. @@ -253,7 +255,7 @@ func (operation *Operation) ParseParamComment(commentLine string, astFile *ast.F name := matches[1] paramType := matches[2] - refType := TransToValidSchemeType(matches[3]) + refType, format := TransToValidSchemeTypeWithFormat(matches[3]) // Detect refType objectType := OBJECT @@ -261,7 +263,7 @@ func (operation *Operation) ParseParamComment(commentLine string, astFile *ast.F if strings.HasPrefix(refType, "[]") { objectType = ARRAY refType = strings.TrimPrefix(refType, "[]") - refType = TransToValidSchemeType(refType) + refType, format = TransToValidSchemeTypeWithFormat(refType) } else if IsPrimitiveType(refType) || paramType == "formData" && refType == "file" { objectType = PRIMITIVE @@ -274,16 +276,16 @@ func (operation *Operation) ParseParamComment(commentLine string, astFile *ast.F if objectType == OBJECT { objectType = PRIMITIVE } - refType = TransToValidSchemeType(schema.Type[0]) + refType, format = TransToValidSchemeTypeWithFormat(schema.Type[0]) enums = schema.Enum } } requiredText := strings.ToLower(matches[4]) required := requiredText == "true" || requiredText == requiredLabel - description := matches[5] + description := strings.Join(strings.Split(matches[5], "\\n"), "\n") - param := createParameter(paramType, description, name, objectType, refType, required, enums, operation.parser.collectionFormatInQuery) + param := createParameter(paramType, description, name, objectType, refType, format, required, enums, operation.parser.collectionFormatInQuery) switch paramType { case "path", "header", "query", "formData": @@ -321,8 +323,8 @@ func (operation *Operation) ParseParamComment(commentLine string, astFile *ast.F nameOverrideType = "formData" } // load overridden type specific name from extensions if exists - if nameVal, ok := item.Schema.Extensions[nameOverrideType]; ok { - name = nameVal.(string) + if nameVal, ok := item.Schema.Extensions.GetString(nameOverrideType); ok { + name = nameVal } switch { @@ -334,16 +336,23 @@ func (operation *Operation) ParseParamComment(commentLine string, astFile *ast.F if len(itemSchema.Type) == 0 { itemSchema = operation.parser.getUnderlyingSchema(prop.Items.Schema) } + if itemSchema == nil { + continue + } if len(itemSchema.Type) == 0 { continue } if !IsSimplePrimitiveType(itemSchema.Type[0]) { continue } - param = createParameter(paramType, prop.Description, name, prop.Type[0], itemSchema.Type[0], findInSlice(schema.Required, item.Name), itemSchema.Enum, operation.parser.collectionFormatInQuery) + collectionFormat := operation.parser.collectionFormatInQuery + if cfv, ok := prop.Extensions.GetString(collectionFormatTag); ok { + collectionFormat = cfv + } + param = createParameter(paramType, prop.Description, name, prop.Type[0], itemSchema.Type[0], format, findInSlice(schema.Required, item.Name), itemSchema.Enum, collectionFormat) case IsSimplePrimitiveType(prop.Type[0]): - param = createParameter(paramType, prop.Description, name, PRIMITIVE, prop.Type[0], findInSlice(schema.Required, item.Name), nil, operation.parser.collectionFormatInQuery) + param = createParameter(paramType, prop.Description, name, PRIMITIVE, prop.Type[0], format, findInSlice(schema.Required, item.Name), nil, operation.parser.collectionFormatInQuery) default: operation.parser.debug.Printf("skip field [%s] in %s is not supported type for %s", name, refType, paramType) continue @@ -383,7 +392,7 @@ func (operation *Operation) ParseParamComment(commentLine string, astFile *ast.F param.Schema = schema } default: - return fmt.Errorf("%s is not supported paramType", paramType) + return fmt.Errorf("not supported paramType: %s", paramType) } err := operation.parseParamAttribute(commentLine, objectType, refType, paramType, ¶m) @@ -408,6 +417,7 @@ const ( exampleTag = "example" schemaExampleTag = "schemaExample" formatTag = "format" + titleTag = "title" validateTag = "validate" minimumTag = "minimum" maximumTag = "maximum" @@ -703,7 +713,7 @@ func parseMimeTypeList(mimeTypeList string, typeList *[]string, format string) e return nil } -var routerPattern = regexp.MustCompile(`^(/[\w./\-{}+:$]*)[[:blank:]]+\[(\w+)]`) +var routerPattern = regexp.MustCompile(`^(/[\w./\-{}\(\)+:$~]*)[[:blank:]]+\[(\w+)]`) // ParseRouterComment parses comment for given `router` comment string. func (operation *Operation) ParseRouterComment(commentLine string, deprecated bool) error { @@ -739,7 +749,7 @@ func (operation *Operation) ParseSecurityComment(commentLine string) error { securitySource = commentLine[strings.Index(commentLine, "@Security")+1:] ) - for _, securityOption := range strings.Split(securitySource, "||") { + for _, securityOption := range securityPairSepPattern.Split(securitySource, -1) { securityOption = strings.TrimSpace(securityOption) left, right := strings.Index(securityOption, "["), strings.Index(securityOption, "]") @@ -837,13 +847,11 @@ func parseObjectSchema(parser *Parser, refType string, astFile *ast.File) (*spec case refType == NIL: return nil, nil case refType == INTERFACE: - return PrimitiveSchema(OBJECT), nil + return &spec.Schema{}, nil case refType == ANY: - return PrimitiveSchema(OBJECT), nil + return &spec.Schema{}, nil case IsGolangPrimitiveType(refType): - refType = TransToValidSchemeType(refType) - - return PrimitiveSchema(refType), nil + return TransToValidPrimitiveSchema(refType), nil case IsPrimitiveType(refType): return PrimitiveSchema(refType), nil case strings.HasPrefix(refType, "[]"): @@ -1178,7 +1186,7 @@ func (operation *Operation) AddResponse(code int, response *spec.Response) { } // createParameter returns swagger spec.Parameter for given paramType, description, paramName, schemaType, required. -func createParameter(paramType, description, paramName, objectType, schemaType string, required bool, enums []interface{}, collectionFormat string) spec.Parameter { +func createParameter(paramType, description, paramName, objectType, schemaType string, format string, required bool, enums []interface{}, collectionFormat string) spec.Parameter { // //five possible parameter types. query, path, body, header, form result := spec.Parameter{ ParamProps: spec.ParamProps{ @@ -1202,12 +1210,14 @@ func createParameter(paramType, description, paramName, objectType, schemaType s Enum: enums, }, SimpleSchema: spec.SimpleSchema{ - Type: schemaType, + Type: schemaType, + Format: format, }, } case PRIMITIVE, OBJECT: result.Type = schemaType result.Enum = enums + result.Format = format } return result } diff --git a/vendor/github.com/swaggo/swag/packages.go b/vendor/github.com/swaggo/swag/packages.go index 69a1b052..e03c0272 100644 --- a/vendor/github.com/swaggo/swag/packages.go +++ b/vendor/github.com/swaggo/swag/packages.go @@ -93,7 +93,7 @@ func (pkgDefs *PackagesDefinitions) RangeFiles(handle func(info *AstFileInfo) er for _, info := range pkgDefs.files { // ignore package path prefix with 'vendor' or $GOROOT, // because the router info of api will not be included these files. - if strings.HasPrefix(info.PackagePath, "vendor") || strings.HasPrefix(info.Path, runtime.GOROOT()) { + if strings.HasPrefix(info.PackagePath, "vendor") || (runtime.GOROOT() != "" && strings.HasPrefix(info.Path, runtime.GOROOT()+string(filepath.Separator))) { continue } sortedFiles = append(sortedFiles, info) @@ -146,7 +146,7 @@ func (pkgDefs *PackagesDefinitions) parseTypesFromFile(astFile *ast.File, packag parsedSchemas[typeSpecDef] = &Schema{ PkgPath: typeSpecDef.PkgPath, Name: astFile.Name.Name, - Schema: PrimitiveSchema(TransToValidSchemeType(idt.Name)), + Schema: TransToValidPrimitiveSchema(idt.Name), } } @@ -166,6 +166,8 @@ func (pkgDefs *PackagesDefinitions) parseTypesFromFile(astFile *ast.File, packag pkgDefs.uniqueDefinitions[fullName] = nil anotherTypeDef.NotUnique = true pkgDefs.uniqueDefinitions[anotherTypeDef.TypeName()] = anotherTypeDef + anotherTypeDef.SetSchemaName() + typeSpecDef.NotUnique = true fullName = typeSpecDef.TypeName() pkgDefs.uniqueDefinitions[fullName] = typeSpecDef @@ -174,6 +176,8 @@ func (pkgDefs *PackagesDefinitions) parseTypesFromFile(astFile *ast.File, packag pkgDefs.uniqueDefinitions[fullName] = typeSpecDef } + typeSpecDef.SetSchemaName() + if pkgDefs.packages[typeSpecDef.PkgPath] == nil { pkgDefs.packages[typeSpecDef.PkgPath] = NewPackageDefinitions(astFile.Name.Name, typeSpecDef.PkgPath).AddTypeSpec(typeSpecDef.Name(), typeSpecDef) } else if _, ok = pkgDefs.packages[typeSpecDef.PkgPath].TypeDefinitions[typeSpecDef.Name()]; !ok { @@ -192,6 +196,7 @@ func (pkgDefs *PackagesDefinitions) parseFunctionScopedTypesFromFile(astFile *as for _, astDeclaration := range astFile.Decls { funcDeclaration, ok := astDeclaration.(*ast.FuncDecl) if ok && funcDeclaration.Body != nil { + functionScopedTypes := make(map[string]*TypeSpecDef) for _, stmt := range funcDeclaration.Body.List { if declStmt, ok := (stmt).(*ast.DeclStmt); ok { if genDecl, ok := (declStmt.Decl).(*ast.GenDecl); ok && genDecl.Tok == token.TYPE { @@ -208,7 +213,28 @@ func (pkgDefs *PackagesDefinitions) parseFunctionScopedTypesFromFile(astFile *as parsedSchemas[typeSpecDef] = &Schema{ PkgPath: typeSpecDef.PkgPath, Name: astFile.Name.Name, - Schema: PrimitiveSchema(TransToValidSchemeType(idt.Name)), + Schema: TransToValidPrimitiveSchema(idt.Name), + } + } + + fullName := typeSpecDef.TypeName() + if structType, ok := typeSpecDef.TypeSpec.Type.(*ast.StructType); ok { + for _, field := range structType.Fields.List { + var idt *ast.Ident + var ok bool + switch field.Type.(type) { + case *ast.Ident: + idt, ok = field.Type.(*ast.Ident) + case *ast.StarExpr: + idt, ok = field.Type.(*ast.StarExpr).X.(*ast.Ident) + case *ast.ArrayType: + idt, ok = field.Type.(*ast.ArrayType).Elt.(*ast.Ident) + } + if ok && !IsGolangPrimitiveType(idt.Name) { + if functype, ok := functionScopedTypes[idt.Name]; ok { + idt.Name = functype.TypeName() + } + } } } @@ -216,8 +242,6 @@ func (pkgDefs *PackagesDefinitions) parseFunctionScopedTypesFromFile(astFile *as pkgDefs.uniqueDefinitions = make(map[string]*TypeSpecDef) } - fullName := typeSpecDef.TypeName() - anotherTypeDef, ok := pkgDefs.uniqueDefinitions[fullName] if ok { if anotherTypeDef == nil { @@ -228,14 +252,19 @@ func (pkgDefs *PackagesDefinitions) parseFunctionScopedTypesFromFile(astFile *as pkgDefs.uniqueDefinitions[fullName] = nil anotherTypeDef.NotUnique = true pkgDefs.uniqueDefinitions[anotherTypeDef.TypeName()] = anotherTypeDef + anotherTypeDef.SetSchemaName() + typeSpecDef.NotUnique = true fullName = typeSpecDef.TypeName() pkgDefs.uniqueDefinitions[fullName] = typeSpecDef } } else { pkgDefs.uniqueDefinitions[fullName] = typeSpecDef + functionScopedTypes[typeSpec.Name.Name] = typeSpecDef } + typeSpecDef.SetSchemaName() + if pkgDefs.packages[typeSpecDef.PkgPath] == nil { pkgDefs.packages[typeSpecDef.PkgPath] = NewPackageDefinitions(astFile.Name.Name, typeSpecDef.PkgPath).AddTypeSpec(fullName, typeSpecDef) } else if _, ok = pkgDefs.packages[typeSpecDef.PkgPath].TypeDefinitions[fullName]; !ok { @@ -353,7 +382,7 @@ func (pkgDefs *PackagesDefinitions) collectConstEnums(parsedSchemas map[*TypeSpe continue } - //delete it from parsed schemas, and will parse it again + // delete it from parsed schemas, and will parse it again if _, ok = parsedSchemas[typeDef]; ok { delete(parsedSchemas, typeDef) } @@ -362,21 +391,15 @@ func (pkgDefs *PackagesDefinitions) collectConstEnums(parsedSchemas map[*TypeSpe typeDef.Enums = make([]EnumValue, 0) } - name := constVar.Name.Name + name := constVar.VariableName() if _, ok = constVar.Value.(ast.Expr); ok { continue } enumValue := EnumValue{ - key: name, - Value: constVar.Value, - } - if constVar.Comment != nil && len(constVar.Comment.List) > 0 { - enumValue.Comment = constVar.Comment.List[0].Text - enumValue.Comment = strings.TrimPrefix(enumValue.Comment, "//") - enumValue.Comment = strings.TrimPrefix(enumValue.Comment, "/*") - enumValue.Comment = strings.TrimSuffix(enumValue.Comment, "*/") - enumValue.Comment = strings.TrimSpace(enumValue.Comment) + key: name, + Value: constVar.Value, + Comment: commentWithoutNameOverride(constVar.Comment), } typeDef.Enums = append(typeDef.Enums, enumValue) } @@ -469,7 +492,7 @@ func (pkgDefs *PackagesDefinitions) findPackagePathFromImports(pkg string, file } break } else if imp.Name.Name == "_" && len(pkg) > 0 { - //for unused types + // for unused types pd, ok := pkgDefs.packages[path] if ok { if pd.Name == pkg { @@ -558,17 +581,27 @@ func (pkgDefs *PackagesDefinitions) FindTypeSpec(typeName string, file *ast.File return typeDef } - //in case that comment //@name renamed the type with a name without a dot - typeDef, ok = pkgDefs.uniqueDefinitions[typeName] - if ok { - return typeDef - } - name := parts[0] typeDef, ok = pkgDefs.uniqueDefinitions[fullTypeName(file.Name.Name, name)] if !ok { pkgPaths, externalPkgPaths := pkgDefs.findPackagePathFromImports("", file) typeDef = pkgDefs.findTypeSpecFromPackagePaths(pkgPaths, externalPkgPaths, name) } - return pkgDefs.parametrizeGenericType(file, typeDef, typeName) + + if typeDef != nil { + return pkgDefs.parametrizeGenericType(file, typeDef, typeName) + } + + // in case that comment //@name renamed the type with a name without a dot + for k, v := range pkgDefs.uniqueDefinitions { + if v == nil { + pkgDefs.debug.Printf("%s TypeSpecDef is nil", k) + continue + } + if v.SchemaName == typeName { + return v + } + } + + return nil } diff --git a/vendor/github.com/swaggo/swag/parser.go b/vendor/github.com/swaggo/swag/parser.go index 604f8278..6d550cb0 100644 --- a/vendor/github.com/swaggo/swag/parser.go +++ b/vendor/github.com/swaggo/swag/parser.go @@ -179,6 +179,12 @@ type Parser struct { // HostState is the state of the host HostState string + + // ParseFuncBody whether swag should parse api info inside of funcs + ParseFuncBody bool + + // UseStructName Dont use those ugly full-path names when using dependency flag + UseStructName bool } // FieldParserFactory create FieldParser. @@ -187,7 +193,8 @@ type FieldParserFactory func(ps *Parser, field *ast.Field) FieldParser // FieldParser parse struct field. type FieldParser interface { ShouldSkip() bool - FieldName() (string, error) + FieldNames() ([]string, error) + FirstTagValue(tag string) string FormName() string HeaderName() string PathName() string @@ -257,6 +264,13 @@ func SetParseDependency(parseDependency int) func(*Parser) { } } +// SetUseStructName sets whether to strip the full-path definition name. +func SetUseStructName(useStructName bool) func(*Parser) { + return func(p *Parser) { + p.UseStructName = useStructName + } +} + // SetMarkdownFileDirectory sets the directory to search for markdown files. func SetMarkdownFileDirectory(directoryPath string) func(*Parser) { return func(p *Parser) { @@ -511,7 +525,7 @@ func (parser *Parser) ParseGeneralAPIInfo(mainAPIFile string) error { func parseGeneralAPIInfo(parser *Parser, comments []string) error { previousAttribute := "" - + var tag *spec.Tag // parsing classic meta data model for line := 0; line < len(comments); line++ { commentLine := comments[line] @@ -532,8 +546,7 @@ func parseGeneralAPIInfo(parser *Parser, comments []string) error { setSwaggerInfo(parser.swagger, attr, value) case descriptionAttr: if previousAttribute == attribute { - parser.swagger.Info.Description += "\n" + value - + parser.swagger.Info.Description = AppendDescription(parser.swagger.Info.Description, value) continue } @@ -572,42 +585,43 @@ func parseGeneralAPIInfo(parser *Parser, comments []string) error { case "@schemes": parser.swagger.Schemes = strings.Split(value, " ") case "@tag.name": - parser.swagger.Tags = append(parser.swagger.Tags, spec.Tag{ - TagProps: spec.TagProps{ - Name: value, - }, - }) + if parser.matchTag(value) { + parser.swagger.Tags = append(parser.swagger.Tags, spec.Tag{ + TagProps: spec.TagProps{ + Name: value, + }, + }) + tag = &parser.swagger.Tags[len(parser.swagger.Tags)-1] + } else { + tag = nil + } case "@tag.description": - tag := parser.swagger.Tags[len(parser.swagger.Tags)-1] - tag.TagProps.Description = value - replaceLastTag(parser.swagger.Tags, tag) + if tag != nil { + tag.TagProps.Description = value + } case "@tag.description.markdown": - tag := parser.swagger.Tags[len(parser.swagger.Tags)-1] + if tag != nil { + commentInfo, err := getMarkdownForTag(tag.TagProps.Name, parser.markdownFileDir) + if err != nil { + return err + } - commentInfo, err := getMarkdownForTag(tag.TagProps.Name, parser.markdownFileDir) - if err != nil { - return err + tag.TagProps.Description = string(commentInfo) } - - tag.TagProps.Description = string(commentInfo) - replaceLastTag(parser.swagger.Tags, tag) case "@tag.docs.url": - tag := parser.swagger.Tags[len(parser.swagger.Tags)-1] - tag.TagProps.ExternalDocs = &spec.ExternalDocumentation{ - URL: value, - Description: "", + if tag != nil { + tag.TagProps.ExternalDocs = &spec.ExternalDocumentation{ + URL: value, + } } - - replaceLastTag(parser.swagger.Tags, tag) case "@tag.docs.description": - tag := parser.swagger.Tags[len(parser.swagger.Tags)-1] - if tag.TagProps.ExternalDocs == nil { - return fmt.Errorf("%s needs to come after a @tags.docs.url", attribute) + if tag != nil { + if tag.TagProps.ExternalDocs == nil { + return fmt.Errorf("%s needs to come after a @tags.docs.url", attribute) + } + + tag.TagProps.ExternalDocs.Description = value } - - tag.TagProps.ExternalDocs.Description = value - replaceLastTag(parser.swagger.Tags, tag) - case secBasicAttr, secAPIKeyAttr, secApplicationAttr, secImplicitAttr, secPasswordAttr, secAccessCodeAttr: scheme, err := parseSecAttributes(attribute, comments, &line) if err != nil { @@ -672,6 +686,21 @@ func parseGeneralAPIInfo(parser *Parser, comments []string) error { parser.swagger.Extensions[attribute[1:]] = valueJSON } + } else if strings.HasPrefix(attribute, "@tag.x-") { + extensionName := attribute[5:] + + if len(value) == 0 { + return fmt.Errorf("annotation %s need a value", attribute) + } + + if tag.Extensions == nil { + tag.Extensions = make(map[string]interface{}) + } + + // tag.Extensions.Add(extensionName, value) works wrong (transforms extensionName to lower case) + // needed to save case for ReDoc + // https://redocly.com/docs/api-reference-docs/specification-extensions/x-display-name/ + tag.Extensions[extensionName] = value } } @@ -773,7 +802,10 @@ loopline: // Not mandatory field if securityAttr == descriptionAttr { - description = value + if description != "" { + description += "\n" + } + description += value } // next securityDefinitions @@ -820,7 +852,7 @@ loopline: func parseSecurity(commentLine string) map[string][]string { securityMap := make(map[string][]string) - for _, securityOption := range strings.Split(commentLine, "||") { + for _, securityOption := range securityPairSepPattern.Split(commentLine, -1) { securityOption = strings.TrimSpace(securityOption) left, right := strings.Index(securityOption, "["), strings.Index(securityOption, "]") @@ -881,6 +913,13 @@ func isGeneralAPIComment(comments []string) bool { } func getMarkdownForTag(tagName string, dirPath string) ([]byte, error) { + if tagName == "" { + // this happens when parsing the @description.markdown attribute + // it will be called properly another time with tagName="api" + // so we can safely return an empty byte slice here + return make([]byte, 0), nil + } + dirEntries, err := os.ReadDir(dirPath) if err != nil { return nil, err @@ -893,11 +932,12 @@ func getMarkdownForTag(tagName string, dirPath string) ([]byte, error) { fileName := entry.Name() - if !strings.Contains(fileName, ".md") { - continue + expectedFileName := tagName + if !strings.HasSuffix(tagName, ".md") { + expectedFileName = tagName + ".md" } - if strings.Contains(fileName, tagName) { + if fileName == expectedFileName { fullPath := filepath.Join(dirPath, fileName) commentInfo, err := os.ReadFile(fullPath) @@ -943,6 +983,27 @@ func getTagsFromComment(comment string) (tags []string) { } +func (parser *Parser) matchTag(tag string) bool { + if len(parser.tags) == 0 { + return true + } + + if _, has := parser.tags["!"+tag]; has { + return false + } + if _, has := parser.tags[tag]; has { + return true + } + + // If all tags are negation then we should return true + for key := range parser.tags { + if key[0] != '!' { + return false + } + } + return true +} + func (parser *Parser) matchTags(comments []*ast.Comment) (match bool) { if len(parser.tags) == 0 { return true @@ -989,34 +1050,85 @@ func matchExtension(extensionToMatch string, comments []*ast.Comment) (match boo return true } +func getFuncDoc(decl any) (*ast.CommentGroup, bool) { + switch astDecl := decl.(type) { + case *ast.FuncDecl: // func name() {} + return astDecl.Doc, true + case *ast.GenDecl: // var name = namePointToFuncDirectlyOrIndirectly + if astDecl.Tok != token.VAR { + return nil, false + } + if len(astDecl.Specs) == 0 { + return nil, false + } + varSpec, ok := astDecl.Specs[0].(*ast.ValueSpec) + if !ok || len(varSpec.Values) != 1 { + return nil, false + } + _, ok = getFuncDoc(varSpec) + return astDecl.Doc, ok + case *ast.ValueSpec: + if len(astDecl.Values) == 0 { + return nil, false + } + value, ok := astDecl.Values[0].(*ast.Ident) + if !ok || value == nil || value.Obj == nil || value.Obj.Decl == nil { + return nil, false + } + _, ok = getFuncDoc(value.Obj.Decl) + return astDecl.Doc, ok + } + return nil, false +} + // ParseRouterAPIInfo parses router api info for given astFile. func (parser *Parser) ParseRouterAPIInfo(fileInfo *AstFileInfo) error { -DeclsLoop: - for _, astDescription := range fileInfo.File.Decls { - if (fileInfo.ParseFlag & ParseOperations) == ParseNone { - continue - } - astDeclaration, ok := astDescription.(*ast.FuncDecl) - if ok && astDeclaration.Doc != nil && astDeclaration.Doc.List != nil { - if parser.matchTags(astDeclaration.Doc.List) && - matchExtension(parser.parseExtension, astDeclaration.Doc.List) { - // for per 'function' comment, create a new 'Operation' object - operation := NewOperation(parser, SetCodeExampleFilesDirectory(parser.codeExampleFilesDir)) - for _, comment := range astDeclaration.Doc.List { - err := operation.ParseComment(comment.Text, fileInfo.File) - if err != nil { - return fmt.Errorf("ParseComment error in file %s :%+v", fileInfo.Path, err) - } - if operation.State != "" && operation.State != parser.HostState { - continue DeclsLoop - } - } - err := processRouterOperation(parser, operation) - if err != nil { + if (fileInfo.ParseFlag & ParseOperations) == ParseNone { + return nil + } + + // parse File.Comments instead of File.Decls.Doc if ParseFuncBody flag set to "true" + if parser.ParseFuncBody { + for _, astComments := range fileInfo.File.Comments { + if astComments.List != nil { + if err := parser.parseRouterAPIInfoComment(astComments.List, fileInfo); err != nil { return err } } } + + return nil + } + + for _, decl := range fileInfo.File.Decls { + funcDoc, ok := getFuncDoc(decl) + if ok && funcDoc != nil && funcDoc.List != nil { + if err := parser.parseRouterAPIInfoComment(funcDoc.List, fileInfo); err != nil { + return err + } + } + } + + return nil +} + +func (parser *Parser) parseRouterAPIInfoComment(comments []*ast.Comment, fileInfo *AstFileInfo) error { + if parser.matchTags(comments) && matchExtension(parser.parseExtension, comments) { + // for per 'function' comment, create a new 'Operation' object + operation := NewOperation(parser, SetCodeExampleFilesDirectory(parser.codeExampleFilesDir)) + for _, comment := range comments { + err := operation.ParseComment(comment.Text, fileInfo.File) + if err != nil { + return fmt.Errorf("ParseComment error in file %s for comment: '%s': %+v", fileInfo.Path, comment.Text, err) + } + if operation.State != "" && operation.State != parser.HostState { + return nil + } + } + err := processRouterOperation(parser, operation) + if err != nil { + return err + } } return nil @@ -1119,7 +1231,7 @@ func (parser *Parser) getTypeSchema(typeName string, file *ast.File, ref bool) ( return &spec.Schema{}, nil } if IsGolangPrimitiveType(typeName) { - return PrimitiveSchema(TransToValidSchemeType(typeName)), nil + return TransToValidPrimitiveSchema(typeName), nil } schemaType, err := convertFromSpecificToPrimitive(typeName) @@ -1220,13 +1332,23 @@ func (parser *Parser) ParseDefinition(typeSpecDef *TypeSpecDef) (*Schema, error) parser.debug.Printf("Skipping '%s', recursion detected.", typeName) return &Schema{ - Name: typeName, + Name: typeSpecDef.SchemaName, PkgPath: typeSpecDef.PkgPath, Schema: PrimitiveSchema(OBJECT), }, ErrRecursiveParseStruct } + if parser.UseStructName { + schemaName := strings.Split(typeSpecDef.SchemaName, ".") + if len(schemaName) > 1 { + typeSpecDef.SchemaName = schemaName[len(schemaName)-1] + typeName = typeSpecDef.SchemaName + } else { + parser.debug.Printf("Could not strip type name of %s", typeName) + } + } + parser.structStack = append(parser.structStack, typeSpecDef) parser.debug.Printf("Generating %s", typeName) @@ -1238,15 +1360,20 @@ func (parser *Parser) ParseDefinition(typeSpecDef *TypeSpecDef) (*Schema, error) } if definition.Description == "" { - fillDefinitionDescription(definition, typeSpecDef.File, typeSpecDef) + err = parser.fillDefinitionDescription(definition, typeSpecDef.File, typeSpecDef) + if err != nil { + return nil, err + } } if len(typeSpecDef.Enums) > 0 { var varnames []string var enumComments = make(map[string]string) + var enumDescriptions = make([]string, 0, len(typeSpecDef.Enums)) for _, value := range typeSpecDef.Enums { definition.Enum = append(definition.Enum, value.Value) varnames = append(varnames, value.key) + enumDescriptions = append(enumDescriptions, value.Comment) if len(value.Comment) > 0 { enumComments[value.key] = value.Comment } @@ -1257,11 +1384,18 @@ func (parser *Parser) ParseDefinition(typeSpecDef *TypeSpecDef) (*Schema, error) definition.Extensions[enumVarNamesExtension] = varnames if len(enumComments) > 0 { definition.Extensions[enumCommentsExtension] = enumComments + definition.Extensions[enumDescriptionsExtension] = enumDescriptions } } + schemaName := typeName + + if typeSpecDef.SchemaName != "" { + schemaName = typeSpecDef.SchemaName + } + sch := Schema{ - Name: typeName, + Name: schemaName, PkgPath: typeSpecDef.PkgPath, Schema: definition, } @@ -1282,7 +1416,7 @@ func fullTypeName(parts ...string) string { // fillDefinitionDescription additionally fills fields in definition (spec.Schema) // TODO: If .go file contains many types, it may work for a long time -func fillDefinitionDescription(definition *spec.Schema, file *ast.File, typeSpecDef *TypeSpecDef) { +func (parser *Parser) fillDefinitionDescription(definition *spec.Schema, file *ast.File, typeSpecDef *TypeSpecDef) (err error) { if file == nil { return } @@ -1297,16 +1431,23 @@ func fillDefinitionDescription(definition *spec.Schema, file *ast.File, typeSpec if !ok || typeSpec != typeSpecDef.TypeSpec { continue } - - definition.Description = - extractDeclarationDescription(typeSpec.Doc, typeSpec.Comment, generalDeclaration.Doc) + var typeName string + if typeSpec.Name != nil { + typeName = typeSpec.Name.Name + } + definition.Description, err = + parser.extractDeclarationDescription(typeName, typeSpec.Doc, typeSpec.Comment, generalDeclaration.Doc) + if err != nil { + return + } } } + return nil } // extractDeclarationDescription gets first description // from attribute descriptionAttr in commentGroups (ast.CommentGroup) -func extractDeclarationDescription(commentGroups ...*ast.CommentGroup) string { +func (parser *Parser) extractDeclarationDescription(typeName string, commentGroups ...*ast.CommentGroup) (string, error) { var description string for _, commentGroup := range commentGroups { @@ -1321,9 +1462,23 @@ func extractDeclarationDescription(commentGroups ...*ast.CommentGroup) string { if len(commentText) == 0 { continue } - attribute := FieldsByAnySpace(commentText, 2)[0] + fields := FieldsByAnySpace(commentText, 2) + attribute := fields[0] - if strings.ToLower(attribute) != descriptionAttr { + if attr := strings.ToLower(attribute); attr == descriptionMarkdownAttr { + if len(fields) > 1 { + typeName = fields[1] + } + if typeName == "" { + continue + } + desc, err := getMarkdownForTag(typeName, parser.markdownFileDir) + if err != nil { + return "", err + } + // if found markdown description, we will only use the markdown file content + return string(desc), nil + } else if attr != descriptionAttr { if !isHandlingDescription { continue } @@ -1336,7 +1491,7 @@ func extractDeclarationDescription(commentGroups ...*ast.CommentGroup) string { } } - return strings.TrimLeft(description, " ") + return strings.TrimLeft(description, " "), nil } // parseTypeExpr parses given type expression that corresponds to the type under @@ -1441,20 +1596,20 @@ func (parser *Parser) parseStructField(file *ast.File, field *ast.Field) (map[st return nil, nil, nil } - fieldName, err := ps.FieldName() + fieldNames, err := ps.FieldNames() if err != nil { return nil, nil, err } - if fieldName == "" { + if len(fieldNames) == 0 { typeName, err := getFieldType(file, field.Type, nil) if err != nil { - return nil, nil, fmt.Errorf("%s: %w", fieldName, err) + return nil, nil, err } schema, err := parser.getTypeSchema(typeName, file, false) if err != nil { - return nil, nil, fmt.Errorf("%s: %w", fieldName, err) + return nil, nil, err } if len(schema.Type) > 0 && schema.Type[0] == OBJECT { @@ -1476,7 +1631,7 @@ func (parser *Parser) parseStructField(file *ast.File, field *ast.Field) (map[st schema, err := ps.CustomSchema() if err != nil { - return nil, nil, fmt.Errorf("%s: %w", fieldName, err) + return nil, nil, fmt.Errorf("%v: %w", fieldNames, err) } if schema == nil { @@ -1490,40 +1645,45 @@ func (parser *Parser) parseStructField(file *ast.File, field *ast.Field) (map[st } if err != nil { - return nil, nil, fmt.Errorf("%s: %w", fieldName, err) + return nil, nil, fmt.Errorf("%v: %w", fieldNames, err) } } err = ps.ComplementSchema(schema) if err != nil { - return nil, nil, fmt.Errorf("%s: %w", fieldName, err) + return nil, nil, fmt.Errorf("%v: %w", fieldNames, err) } var tagRequired []string required, err := ps.IsRequired() if err != nil { - return nil, nil, fmt.Errorf("%s: %w", fieldName, err) + return nil, nil, fmt.Errorf("%v: %w", fieldNames, err) } if required { - tagRequired = append(tagRequired, fieldName) + tagRequired = append(tagRequired, fieldNames...) } - if schema.Extensions == nil { - schema.Extensions = make(spec.Extensions) - } if formName := ps.FormName(); len(formName) > 0 { - schema.Extensions["formData"] = formName + schema.AddExtension("formData", formName) } if headerName := ps.HeaderName(); len(headerName) > 0 { - schema.Extensions["header"] = headerName + schema.AddExtension("header", headerName) } if pathName := ps.PathName(); len(pathName) > 0 { - schema.Extensions["path"] = pathName + schema.AddExtension("path", pathName) } - - return map[string]spec.Schema{fieldName: *schema}, tagRequired, nil + if len(schema.Type) > 0 && schema.Type[0] == ARRAY { + if collectionFormat := ps.FirstTagValue(collectionFormatTag); len(collectionFormat) > 0 { + schema.AddExtension(collectionFormatTag, collectionFormat) + } + } + fields := make(map[string]spec.Schema) + for _, name := range fieldNames { + fields[name] = *schema + } + return fields, tagRequired, nil } func getFieldType(file *ast.File, field ast.Expr, genericParamTypeDefs map[string]*genericTypeSpec) (string, error) { @@ -1609,10 +1769,6 @@ func (parser *Parser) GetSchemaTypePath(schema *spec.Schema, depth int) []string return []string{ANY} } -func replaceLastTag(slice []spec.Tag, element spec.Tag) { - slice = append(slice[:len(slice)-1], element) -} - // defineTypeOfExample example value define the type (object and array unsupported). func defineTypeOfExample(schemaType, arrayType, exampleValue string) (interface{}, error) { switch schemaType { diff --git a/vendor/github.com/swaggo/swag/schema.go b/vendor/github.com/swaggo/swag/schema.go index b3a5b38c..64474eb7 100644 --- a/vendor/github.com/swaggo/swag/schema.go +++ b/vendor/github.com/swaggo/swag/schema.go @@ -4,6 +4,9 @@ import ( "errors" "fmt" "github.com/go-openapi/spec" + "go/ast" + "regexp" + "strings" ) const ( @@ -75,6 +78,44 @@ func IsNumericType(typeName string) bool { return typeName == INTEGER || typeName == NUMBER } +// TransToValidPrimitiveSchema transfer golang basic type to swagger schema with format considered. +func TransToValidPrimitiveSchema(typeName string) *spec.Schema { + switch typeName { + case "int", "uint": + return &spec.Schema{SchemaProps: spec.SchemaProps{Type: []string{INTEGER}}} + case "uint8", "int8", "uint16", "int16", "byte", "int32", "uint32", "rune": + return &spec.Schema{SchemaProps: spec.SchemaProps{Type: []string{INTEGER}, Format: "int32"}} + case "uint64", "int64": + return &spec.Schema{SchemaProps: spec.SchemaProps{Type: []string{INTEGER}, Format: "int64"}} + case "float32", "float64": + return &spec.Schema{SchemaProps: spec.SchemaProps{Type: []string{NUMBER}, Format: typeName}} + case "bool": + return &spec.Schema{SchemaProps: spec.SchemaProps{Type: []string{BOOLEAN}}} + case "string": + return &spec.Schema{SchemaProps: spec.SchemaProps{Type: []string{STRING}}} + } + return &spec.Schema{SchemaProps: spec.SchemaProps{Type: []string{typeName}}} +} + +// TransToValidSchemeTypeWithFormat indicates type will transfer golang basic type to swagger supported type with format. +func TransToValidSchemeTypeWithFormat(typeName string) (string, string) { + switch typeName { + case "int", "uint": + return INTEGER, "" + case "uint8", "int8", "uint16", "int16", "byte", "int32", "uint32", "rune": + return INTEGER, "int32" + case "uint64", "int64": + return INTEGER, "int64" + case "float32", "float64": + return NUMBER, typeName + case "bool": + return BOOLEAN, "" + case "string": + return STRING, "" + } + return typeName, "" +} + // TransToValidSchemeType indicates type will transfer golang basic type to swagger supported type. func TransToValidSchemeType(typeName string) string { switch typeName { @@ -134,6 +175,44 @@ func ignoreNameOverride(name string) bool { return len(name) != 0 && name[0] == IgnoreNameOverridePrefix } +var overrideNameRegex = regexp.MustCompile(`(?i)^@name\s+(\S+)`) + +func nameOverride(commentGroup *ast.CommentGroup) string { + if commentGroup == nil { + return "" + } + + // get alias from comment '// @name ' + for _, comment := range commentGroup.List { + trimmedComment := strings.TrimSpace(strings.TrimLeft(comment.Text, "/")) + texts := overrideNameRegex.FindStringSubmatch(trimmedComment) + if len(texts) > 1 { + return texts[1] + } + } + + return "" +} + +func commentWithoutNameOverride(commentGroup *ast.CommentGroup) string { + if commentGroup == nil { + return "" + } + + commentBuilder := strings.Builder{} + for _, comment := range commentGroup.List { + commentText := comment.Text + commentText = strings.TrimPrefix(commentText, "//") + commentText = strings.TrimPrefix(commentText, "/*") + commentText = strings.TrimSuffix(commentText, "*/") + commentText = strings.TrimSpace(commentText) + commentText = overrideNameRegex.ReplaceAllString(commentText, "") + commentText = strings.TrimSpace(commentText) + commentBuilder.WriteString(commentText) + } + return commentBuilder.String() +} + // IsComplexSchema whether a schema is complex and should be a ref schema func IsComplexSchema(schema *spec.Schema) bool { // a enum type should be complex diff --git a/vendor/github.com/swaggo/swag/types.go b/vendor/github.com/swaggo/swag/types.go index 0076a6b4..d7fe3222 100644 --- a/vendor/github.com/swaggo/swag/types.go +++ b/vendor/github.com/swaggo/swag/types.go @@ -3,7 +3,6 @@ package swag import ( "go/ast" "go/token" - "regexp" "strings" "github.com/go-openapi/spec" @@ -30,6 +29,8 @@ type TypeSpecDef struct { PkgPath string ParentSpec ast.Decl + SchemaName string + NotUnique bool } @@ -46,20 +47,6 @@ func (t *TypeSpecDef) Name() string { func (t *TypeSpecDef) TypeName() string { if ignoreNameOverride(t.TypeSpec.Name.Name) { return t.TypeSpec.Name.Name[1:] - } else if t.TypeSpec.Comment != nil { - // get alias from comment '// @name ' - const regexCaseInsensitive = "(?i)" - reTypeName, err := regexp.Compile(regexCaseInsensitive + `^@name\s+(\S+)`) - if err != nil { - panic(err) - } - for _, comment := range t.TypeSpec.Comment.List { - trimmedComment := strings.TrimSpace(strings.TrimLeft(comment.Text, "/")) - texts := reTypeName.FindStringSubmatch(trimmedComment) - if len(texts) > 1 { - return texts[1] - } - } } var names []string @@ -86,6 +73,19 @@ func (t *TypeSpecDef) FullPath() string { return t.PkgPath + "." + t.Name() } +func (t *TypeSpecDef) Alias() string { + return nameOverride(t.TypeSpec.Comment) +} + +func (t *TypeSpecDef) SetSchemaName() { + if alias := t.Alias(); alias != "" { + t.SchemaName = alias + return + } + + t.SchemaName = t.TypeName() +} + // AstFileInfo information of an ast.File. type AstFileInfo struct { //FileSet the FileSet object which is used to parse this go source file diff --git a/vendor/github.com/swaggo/swag/utils.go b/vendor/github.com/swaggo/swag/utils.go index df31ff2e..6edf54f2 100644 --- a/vendor/github.com/swaggo/swag/utils.go +++ b/vendor/github.com/swaggo/swag/utils.go @@ -1,6 +1,9 @@ package swag -import "unicode" +import ( + "strings" + "unicode" +) // FieldsFunc split a string s by a func splitter into max n parts func FieldsFunc(s string, f func(rune2 rune) bool, n int) []string { @@ -53,3 +56,12 @@ func FieldsFunc(s string, f func(rune2 rune) bool, n int) []string { func FieldsByAnySpace(s string, n int) []string { return FieldsFunc(s, unicode.IsSpace, n) } + +// AppendDescription appends a new string to the existing description, treating +// a trailing backslash as a line continuation. +func AppendDescription(current, addition string) string { + if strings.HasSuffix(current, "\\") { + return current[:len(current)-1] + addition + } + return current + "\n" + addition +} diff --git a/vendor/github.com/swaggo/swag/v2/.gitignore b/vendor/github.com/swaggo/swag/v2/.gitignore new file mode 100644 index 00000000..b24be87c --- /dev/null +++ b/vendor/github.com/swaggo/swag/v2/.gitignore @@ -0,0 +1,27 @@ +dist +testdata/simple*/docs +testdata/quotes/docs +testdata/quotes/quotes.so +testdata/delims/docs +testdata/delims/delims.so +example/basic/docs/* +example/celler/docs/* +cover.out + + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out +.idea +.vscode + +# Etc +.DS_Store + +/swag +/swag.exe +cmd/swag/docs/* + +.vscode/launch.json \ No newline at end of file diff --git a/vendor/github.com/swaggo/swag/v2/.goreleaser.yml b/vendor/github.com/swaggo/swag/v2/.goreleaser.yml new file mode 100644 index 00000000..a431d5db --- /dev/null +++ b/vendor/github.com/swaggo/swag/v2/.goreleaser.yml @@ -0,0 +1,30 @@ +build: + main: cmd/swag/main.go + goos: + - linux + - darwin + goarch: + - amd64 + - arm64 + - 386 + env: + - CGO_ENABLED=0 + +archives: + - id: foo + name_template: >- + {{ .ProjectName }}_ + {{- .Version }}_ + {{- if eq .Os "linux"}}Linux{{ else if eq .Os "darwin"}}Darwin{{ else }}{{ .Os }}{{ end }}_ + {{- if eq .Arch "386" }}i386{{ else if eq .Arch "amd64" }}x86_64{{ else }}{{ .Arch }}{{ end }} + +checksum: + name_template: 'checksums.txt' +snapshot: + name_template: "{{ .Tag }}-next" +changelog: + sort: asc + filters: + exclude: + - '^docs:' + - '^test:' diff --git a/vendor/github.com/swaggo/swag/v2/CODE_OF_CONDUCT.md b/vendor/github.com/swaggo/swag/v2/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..717d7952 --- /dev/null +++ b/vendor/github.com/swaggo/swag/v2/CODE_OF_CONDUCT.md @@ -0,0 +1,46 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at [gitter.im/swaggo/swag](https://gitter.im/swaggo/swag).The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/swaggo/swag/v2/CONTRIBUTING.md b/vendor/github.com/swaggo/swag/v2/CONTRIBUTING.md new file mode 100644 index 00000000..f3a3a15b --- /dev/null +++ b/vendor/github.com/swaggo/swag/v2/CONTRIBUTING.md @@ -0,0 +1,16 @@ +# Contributing + +When contributing to this repository, please first discuss the change you wish to make via issue, +email, or any other method with the owners of this repository before making a change. + +Please note we have a code of conduct, please follow it in all your interactions with the project. + +## Pull Request Process + +1. Fork it +2. Create your feature branch (`git checkout -b my-new-feature`) +3. Commit your changes (`git commit -am 'Add some feature'`) +4. Push to the branch (`git push origin my-new-feature`) +5. Create new Pull Request + +Please make an issue first if the change is likely to increase. diff --git a/vendor/github.com/swaggo/swag/v2/Dockerfile b/vendor/github.com/swaggo/swag/v2/Dockerfile new file mode 100644 index 00000000..5ea91343 --- /dev/null +++ b/vendor/github.com/swaggo/swag/v2/Dockerfile @@ -0,0 +1,36 @@ +# Dockerfile References: https://docs.docker.com/engine/reference/builder/ + +# Start from the latest golang base image +FROM --platform=$BUILDPLATFORM golang:1.21-alpine as builder + +# Set the Current Working Directory inside the container +WORKDIR /app + +# Copy go mod and sum files +COPY go.mod go.sum ./ + +# Download all dependencies. Dependencies will be cached if the go.mod and go.sum files are not changed +RUN go mod download + +# Copy the source from the current directory to the Working Directory inside the container +COPY . . + +# Configure go compiler target platform +ARG TARGETOS +ARG TARGETARCH +ENV GOARCH=$TARGETARCH \ + GOOS=$TARGETOS + +# Build the Go app +RUN CGO_ENABLED=0 GOOS=linux go build -v -a -installsuffix cgo -o swag cmd/swag/main.go + + +######## Start a new stage from scratch ####### +FROM --platform=$TARGETPLATFORM scratch + +WORKDIR /code/ + +# Copy the Pre-built binary file from the previous stage +COPY --from=builder /app/swag /bin/swag + +ENTRYPOINT ["/bin/swag"] diff --git a/vendor/github.com/swaggo/swag/v2/Makefile b/vendor/github.com/swaggo/swag/v2/Makefile new file mode 100644 index 00000000..126c7589 --- /dev/null +++ b/vendor/github.com/swaggo/swag/v2/Makefile @@ -0,0 +1,78 @@ +GOCMD:=$(shell which go) +GOLINT:=$(shell which golint) +GOIMPORT:=$(shell which goimports) +GOFMT:=$(shell which gofmt) +GOBUILD:=$(GOCMD) build +GOINSTALL:=$(GOCMD) install +GOCLEAN:=$(GOCMD) clean +GOTEST:=$(GOCMD) test +GOMODTIDY:=$(GOCMD) mod tidy +GOGET:=$(GOCMD) get +GOLIST:=$(GOCMD) list +GOVET:=$(GOCMD) vet +GOPATH:=$(shell $(GOCMD) env GOPATH) +u := $(if $(update),-u) + +BINARY_NAME:=swag +PACKAGES:=$(shell $(GOLIST) github.com/swaggo/swag/v2 github.com/swaggo/swag/v2/cmd/swag github.com/swaggo/swag/v2/gen github.com/swaggo/swag/v2/format) +GOFILES:=$(shell find . -name "*.go" -type f) + +all: test build + +.PHONY: build +build: deps + $(GOBUILD) -o $(BINARY_NAME) ./cmd/swag + +.PHONY: install +install: deps + $(GOINSTALL) ./cmd/swag + +.PHONY: test +test: + echo "mode: count" > coverage.out + for PKG in $(PACKAGES); do \ + $(GOCMD) test -v -covermode=count -coverprofile=profile.out $$PKG > tmp.out; \ + cat tmp.out; \ + if grep -q "^--- FAIL" tmp.out; then \ + rm tmp.out; \ + exit 1; \ + elif grep -q "build failed" tmp.out; then \ + rm tmp.out; \ + exit; \ + fi; \ + if [ -f profile.out ]; then \ + cat profile.out | grep -v "mode:" >> coverage.out; \ + rm profile.out; \ + fi; \ + done + +.PHONY: clean +clean: + $(GOCLEAN) + rm -f $(BINARY_NAME) + +.PHONY: deps +deps: + $(GOMODTIDY) + +.PHONY: vet +vet: deps + $(GOVET) $(PACKAGES) + +.PHONY: fmt +fmt: + $(GOFMT) -s -w $(GOFILES) + +.PHONY: fmt-check +fmt-check: + @diff=$$($(GOFMT) -s -d $(GOFILES)); \ + if [ -n "$$diff" ]; then \ + echo "Please run 'make fmt' and commit the result:"; \ + echo "$${diff}"; \ + exit 1; \ + fi; + +.PHONY: view-covered +view-covered: + $(GOTEST) -coverprofile=cover.out $(TARGET) + $(GOCMD) tool cover -html=cover.out diff --git a/vendor/github.com/swaggo/swag/v2/PULL_REQUEST_TEMPLATE.md b/vendor/github.com/swaggo/swag/v2/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000..697de1f4 --- /dev/null +++ b/vendor/github.com/swaggo/swag/v2/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,8 @@ +**Describe the PR** +e.g. add cool parser. + +**Relation issue** +e.g. https://github.com/swaggo/swag/pull/118/files + +**Additional context** +Add any other context about the problem here. diff --git a/vendor/github.com/swaggo/swag/v2/README.md b/vendor/github.com/swaggo/swag/v2/README.md new file mode 100644 index 00000000..321bcfe2 --- /dev/null +++ b/vendor/github.com/swaggo/swag/v2/README.md @@ -0,0 +1,1036 @@ +# swag + +🌍 *[English](README.md) ∙ [简体中文](README_zh-CN.md) ∙ [Português](README_pt.md)* + + + +[![Build Status](https://github.com/swaggo/swag/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/features/actions) +[![Coverage Status](https://img.shields.io/codecov/c/github/swaggo/swag/master.svg)](https://codecov.io/gh/swaggo/swag) +[![Go Report Card](https://goreportcard.com/badge/github.com/swaggo/swag)](https://goreportcard.com/report/github.com/swaggo/swag) +[![codebeat badge](https://codebeat.co/badges/71e2f5e5-9e6b-405d-baf9-7cc8b5037330)](https://codebeat.co/projects/github-com-swaggo-swag-master) +[![Go Doc](https://godoc.org/github.com/swaggo/swagg?status.svg)](https://godoc.org/github.com/swaggo/swag) +[![Backers on Open Collective](https://opencollective.com/swag/backers/badge.svg)](#backers) +[![Sponsors on Open Collective](https://opencollective.com/swag/sponsors/badge.svg)](#sponsors) [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fswaggo%2Fswag.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fswaggo%2Fswag?ref=badge_shield) +[![Release](https://img.shields.io/github/release/swaggo/swag.svg?style=flat-square)](https://github.com/swaggo/swag/releases) + + +Swag converts Go annotations to Swagger Documentation 2.0. We've created a variety of plugins for popular [Go web frameworks](#supported-web-frameworks). This allows you to quickly integrate with an existing Go project (using Swagger UI). + +## Contents +- [swag](#swag) + - [Contents](#contents) + - [Getting started](#getting-started) + - [swag cli](#swag-cli) + - [Supported Web Frameworks](#supported-web-frameworks) + - [How to use it with Gin](#how-to-use-it-with-gin) + - [The swag formatter](#the-swag-formatter) + - [Implementation Status](#implementation-status) +- [Declarative Comments Format](#declarative-comments-format) + - [General API Info](#general-api-info) + - [Using markdown descriptions](#using-markdown-descriptions) + - [Open API V3.1.0+](#open-api-v310) + - [API Operation](#api-operation) + - [Mime Types](#mime-types) + - [Param Type](#param-type) + - [Data Type](#data-type) + - [Security](#security) + - [Attribute](#attribute) + - [Available](#available) + - [Future](#future) + - [Examples](#examples) + - [Descriptions over multiple lines](#descriptions-over-multiple-lines) + - [User defined structure with an array type](#user-defined-structure-with-an-array-type) + - [Function scoped struct declaration](#function-scoped-struct-declaration) + - [Model composition in response](#model-composition-in-response) + - [Add headers in request](#add-request-headers) + - [Add headers in response](#add-a-headers-in-response) + - [Use multiple path params](#use-multiple-path-params) + - [Add multiple paths](#add-multiple-paths) + - [Example value of struct](#example-value-of-struct) + - [SchemaExample of body](#schemaexample-of-body) + - [Description of struct](#description-of-struct) + - [Use swaggertype tag to supported custom type](#use-swaggertype-tag-to-supported-custom-type) + - [Use global overrides to support a custom type](#use-global-overrides-to-support-a-custom-type) + - [Use swaggerignore tag to exclude a field](#use-swaggerignore-tag-to-exclude-a-field) + - [Add extension info to struct field](#add-extension-info-to-struct-field) + - [Rename model to display](#rename-model-to-display) + - [How to use security annotations](#how-to-use-security-annotations) + - [Add a description for enum items](#add-a-description-for-enum-items) + - [Generate only specific docs file types](#generate-only-specific-docs-file-types) + - [How to use Go generic types](#how-to-use-generics) + - [Change the default Go Template action delimiters](#change-the-default-go-template-action-delimiters) + - [About the Project](#about-the-project) + - [Contributors](#contributors) + - [Backers](#backers) + - [Sponsors](#sponsors) + - [License](#license) + +## Getting started + +1. Add comments to your API source code, See [Declarative Comments Format](#declarative-comments-format). + +2. Install swag by using: +```sh +go install github.com/swaggo/swag/v2/cmd/swag@latest +``` +To build from source you need [Go](https://golang.org/dl/) (1.19 or newer). + +Alternatively you can run the docker image: +```sh +docker run --rm -v $(pwd):/code ghcr.io/swaggo/swag:latest +``` + +Or download a pre-compiled binary from the [release page](https://github.com/swaggo/swag/releases). + +3. Run `swag init` in the project's root folder which contains the `main.go` file. This will parse your comments and generate the required files (`docs` folder and `docs/docs.go`). +```sh +swag init +``` + + Make sure to import the generated `docs/docs.go` so that your specific configuration gets `init`'ed. If your General API annotations do not live in `main.go`, you can let swag know with `-g` flag. + ```go + import _ "example-module-name/docs" + ``` + ```sh + swag init -g http/api.go + ``` + +4. (optional) Use `swag fmt` format the SWAG comment. (Please upgrade to the latest version) + + ```sh + swag fmt + ``` + +## swag cli + +```sh +swag init -h +Swag version: v2.0.0 +NAME: + swag init - Create docs.go + +USAGE: + swag init [command options] [arguments...] + +OPTIONS: + --quiet, -q Make the logger quiet. (default: false) + --generalInfo value, -g value Go file path in which 'swagger general API Info' is written (default: "main.go") + --dir value, -d value Directories you want to parse,comma separated and general-info file must be in the first one (default: "./") + --exclude value Exclude directories and files when searching, comma separated + --propertyStrategy value, -p value Property Naming Strategy like snakecase,camelcase,pascalcase (default: "camelcase") + --output value, -o value Output directory for all the generated files(swagger.json, swagger.yaml and docs.go) (default: "./docs") + --outputTypes value, --ot value Output types of generated files (docs.go, swagger.json, swagger.yaml) like go,json,yaml (default: "go,json,yaml") + --parseVendor Parse go files in 'vendor' folder, disabled by default (default: false) + --parseDependency, --pd Parse go files inside dependency folder, disabled by default (default: false) + --parseDependencyLevel, --pdl Enhancement of '--parseDependency', parse go files inside dependency folder, 0 disabled, 1 only parse models, 2 only parse operations, 3 parse all (default: 0) + --markdownFiles value, --md value Parse folder containing markdown files to use as description, disabled by default + --codeExampleFiles value, --cef value Parse folder containing code example files to use for the x-codeSamples extension, disabled by default + --parseInternal Parse go files in internal packages, disabled by default (default: false) + --generatedTime Generate timestamp at the top of docs.go, disabled by default (default: false) + --parseDepth value Dependency parse depth (default: 100) + --requiredByDefault Set validation required for all fields by default (default: false) + --instanceName value This parameter can be used to name different swagger document instances. It is optional. + --overridesFile value File to read global type overrides from. (default: ".swaggo") + --parseGoList Parse dependency via 'go list' (default: true) + --parseExtension value Parse only those operations that match given extension + --tags value, -t value A comma-separated list of tags to filter the APIs for which the documentation is generated.Special case if the tag is prefixed with the '!' character then the APIs with that tag will be excluded + --v3.1 Generate OpenAPI V3.1 spec (default: false) + --templateDelims value, --td value Provide custom delimeters for Go template generation. The format is leftDelim,rightDelim. For example: "[[,]]" + --collectionFormat value, --cf value Set default collection format (default: "csv") + --state value Initial state for the state machine (default: ""), @HostState in root file, @State in other files + --parseFuncBody Parse API info within body of functions in go files, disabled by default (default: false) + --packageName --output A package name of docs.go, using output directory name by default (check --output option) + --collectionFormat value, --cf value Set default collection format (default: "csv") + --help, -h show help +``` + +```bash +swag fmt -h +NAME: + swag fmt - format swag comments + +USAGE: + swag fmt [command options] [arguments...] + +OPTIONS: + --dir value, -d value Directories you want to parse,comma separated and general-info file must be in the first one (default: "./") + --exclude value Exclude directories and files when searching, comma separated + --generalInfo value, -g value Go file path in which 'swagger general API Info' is written (default: "main.go") + --help, -h show help (default: false) + +``` + +## Supported Web Frameworks + +- [gin](http://github.com/swaggo/gin-swagger) +- [echo](http://github.com/swaggo/echo-swagger) +- [buffalo](https://github.com/swaggo/buffalo-swagger) +- [net/http](https://github.com/swaggo/http-swagger) +- [gorilla/mux](https://github.com/swaggo/http-swagger) +- [go-chi/chi](https://github.com/swaggo/http-swagger) +- [flamingo](https://github.com/i-love-flamingo/swagger) +- [fiber](https://github.com/gofiber/swagger) +- [atreugo](https://github.com/Nerzal/atreugo-swagger) +- [hertz](https://github.com/hertz-contrib/swagger) + +## How to use it with Gin + +Find the example source code [here](https://github.com/swaggo/swag/tree/master/example/celler). + +Finish the steps in [Getting started](#getting-started) +1. After using `swag init` to generate Swagger 2.0 docs, import the following packages: +```go +import "github.com/swaggo/gin-swagger" // gin-swagger middleware +import "github.com/swaggo/files" // swagger embed files +``` + +2. Add [General API](#general-api-info) annotations in `main.go` code: + +```go +// @title Swagger Example API +// @version 1.0 +// @description This is a sample server celler server. +// @termsOfService http://swagger.io/terms/ + +// @contact.name API Support +// @contact.url http://www.swagger.io/support +// @contact.email support@swagger.io + +// @license.name Apache 2.0 +// @license.url http://www.apache.org/licenses/LICENSE-2.0.html + +// @host localhost:8080 +// @BasePath /api/v1 + +// @securityDefinitions.basic BasicAuth + +// @externalDocs.description OpenAPI +// @externalDocs.url https://swagger.io/resources/open-api/ +func main() { + r := gin.Default() + + c := controller.NewController() + + v1 := r.Group("/api/v1") + { + accounts := v1.Group("/accounts") + { + accounts.GET(":id", c.ShowAccount) + accounts.GET("", c.ListAccounts) + accounts.POST("", c.AddAccount) + accounts.DELETE(":id", c.DeleteAccount) + accounts.PATCH(":id", c.UpdateAccount) + accounts.POST(":id/images", c.UploadAccountImage) + } + //... + } + r.GET("/swagger/*any", ginSwagger.WrapHandler(swaggerFiles.Handler)) + r.Run(":8080") +} +//... +``` + +Additionally some general API info can be set dynamically. The generated code package `docs` exports `SwaggerInfo` variable which we can use to set the title, description, version, host and base path programmatically. Example using Gin: + +```go +package main + +import ( + "github.com/gin-gonic/gin" + "github.com/swaggo/files" + "github.com/swaggo/gin-swagger" + + "./docs" // docs is generated by Swag CLI, you have to import it. +) + +// @contact.name API Support +// @contact.url http://www.swagger.io/support +// @contact.email support@swagger.io + +// @license.name Apache 2.0 +// @license.url http://www.apache.org/licenses/LICENSE-2.0.html +func main() { + + // programmatically set swagger info + docs.SwaggerInfo.Title = "Swagger Example API" + docs.SwaggerInfo.Description = "This is a sample server Petstore server." + docs.SwaggerInfo.Version = "1.0" + docs.SwaggerInfo.Host = "petstore.swagger.io" + docs.SwaggerInfo.BasePath = "/v2" + docs.SwaggerInfo.Schemes = []string{"http", "https"} + + r := gin.New() + + // use ginSwagger middleware to serve the API docs + r.GET("/swagger/*any", ginSwagger.WrapHandler(swaggerFiles.Handler)) + + r.Run() +} +``` + +3. Add [API Operation](#api-operation) annotations in `controller` code + +``` go +package controller + +import ( + "fmt" + "net/http" + "strconv" + + "github.com/gin-gonic/gin" + "github.com/swaggo/swag/example/celler/httputil" + "github.com/swaggo/swag/example/celler/model" +) + +// ShowAccount godoc +// @Summary Show an account +// @Description get string by ID +// @Tags accounts +// @Accept json +// @Produce json +// @Param id path int true "Account ID" +// @Success 200 {object} model.Account +// @Failure 400 {object} httputil.HTTPError +// @Failure 404 {object} httputil.HTTPError +// @Failure 500 {object} httputil.HTTPError +// @Router /accounts/{id} [get] +func (c *Controller) ShowAccount(ctx *gin.Context) { + id := ctx.Param("id") + aid, err := strconv.Atoi(id) + if err != nil { + httputil.NewError(ctx, http.StatusBadRequest, err) + return + } + account, err := model.AccountOne(aid) + if err != nil { + httputil.NewError(ctx, http.StatusNotFound, err) + return + } + ctx.JSON(http.StatusOK, account) +} + +// ListAccounts godoc +// @Summary List accounts +// @Description get accounts +// @Tags accounts +// @Accept json +// @Produce json +// @Param q query string false "name search by q" Format(email) +// @Success 200 {array} model.Account +// @Failure 400 {object} httputil.HTTPError +// @Failure 404 {object} httputil.HTTPError +// @Failure 500 {object} httputil.HTTPError +// @Router /accounts [get] +func (c *Controller) ListAccounts(ctx *gin.Context) { + q := ctx.Request.URL.Query().Get("q") + accounts, err := model.AccountsAll(q) + if err != nil { + httputil.NewError(ctx, http.StatusNotFound, err) + return + } + ctx.JSON(http.StatusOK, accounts) +} +//... +``` + +```console +swag init +``` + +4. Run your app, and browse to http://localhost:8080/swagger/index.html. You will see Swagger 2.0 Api documents as shown below: + +![swagger_index.html](https://raw.githubusercontent.com/swaggo/swag/master/assets/swagger-image.png) + +## The swag formatter + +The Swag Comments can be automatically formatted, just like 'go fmt'. +Find the result of formatting [here](https://github.com/swaggo/swag/tree/master/example/celler). + +Usage: +```shell +swag fmt +``` + +Exclude folder: +```shell +swag fmt -d ./ --exclude ./internal +``` + +When using `swag fmt`, you need to ensure that you have a doc comment for the function to ensure correct formatting. +This is due to `swag fmt` indenting swag comments with tabs, which is only allowed *after* a standard doc comment. + +For example, use + +```go +// ListAccounts lists all existing accounts +// +// @Summary List accounts +// @Description get accounts +// @Tags accounts +// @Accept json +// @Produce json +// @Param q query string false "name search by q" Format(email) +// @Success 200 {array} model.Account +// @Failure 400 {object} httputil.HTTPError +// @Failure 404 {object} httputil.HTTPError +// @Failure 500 {object} httputil.HTTPError +// @Router /accounts [get] +func (c *Controller) ListAccounts(ctx *gin.Context) { +``` + +## Implementation Status + +[Swagger 2.0 document](https://swagger.io/docs/specification/2-0/basic-structure/) + +- [x] Basic Structure +- [x] API Host and Base Path +- [x] Paths and Operations +- [x] Describing Parameters +- [x] Describing Request Body +- [x] Describing Responses +- [x] MIME Types +- [x] Authentication + - [x] Basic Authentication + - [x] API Keys +- [x] Adding Examples +- [x] File Upload +- [x] Enums +- [x] Grouping Operations With Tags +- [ ] Swagger Extensions + +# Declarative Comments Format + +## General API Info + +**Example** +[celler/main.go](https://github.com/swaggo/swag/blob/master/example/celler/main.go) + +| annotation | description | example | +|-------------|--------------------------------------------|---------------------------------| +| title | **Required.** The title of the application.| // @title Swagger Example API | +| version | **Required.** Provides the version of the application API.| // @version 1.0 | +| description | A short description of the application. |// @description This is a sample server celler server. | +| tag.name | Name of a tag.| // @tag.name This is the name of the tag | +| tag.description | Description of the tag | // @tag.description Cool Description | +| tag.docs.url | Url of the external Documentation of the tag | // @tag.docs.url https://example.com| +| tag.docs.description | Description of the external Documentation of the tag| // @tag.docs.description Best example documentation | +| termsOfService | The Terms of Service for the API.| // @termsOfService http://swagger.io/terms/ | +| contact.name | The contact information for the exposed API.| // @contact.name API Support | +| contact.url | The URL pointing to the contact information. MUST be in the format of a URL. | // @contact.url http://www.swagger.io/support| +| contact.email| The email address of the contact person/organization. MUST be in the format of an email address.| // @contact.email support@swagger.io | +| license.name | **Required.** The license name used for the API.|// @license.name Apache 2.0| +| license.url | A URL to the license used for the API. MUST be in the format of a URL. | // @license.url http://www.apache.org/licenses/LICENSE-2.0.html | +| host | The host (name or ip) serving the API. | // @host localhost:8080 | +| BasePath | The base path on which the API is served. | // @BasePath /api/v1 | +| accept | A list of MIME types the APIs can consume. Note that Accept only affects operations with a request body, such as POST, PUT and PATCH. Value MUST be as described under [Mime Types](#mime-types). | // @accept json | +| produce | A list of MIME types the APIs can produce. Value MUST be as described under [Mime Types](#mime-types). | // @produce json | +| query.collection.format | The default collection(array) param format in query,enums:csv,multi,pipes,tsv,ssv. If not set, csv is the default.| // @query.collection.format multi +| schemes | The transfer protocol for the operation that separated by spaces. | // @schemes http https | +| externalDocs.description | Description of the external document. | // @externalDocs.description OpenAPI | +| externalDocs.url | URL of the external document. | // @externalDocs.url https://swagger.io/resources/open-api/ | +| x-name | The extension key, must be start by x- and take only json value | // @x-example-key {"key": "value"} | + +### Using markdown descriptions +When a short string in your documentation is insufficient, or you need images, code examples and things like that you may want to use markdown descriptions. In order to use markdown descriptions use the following annotations. + + +| annotation | description | example | +|-------------|--------------------------------------------|---------------------------------| +| title | **Required.** The title of the application.| // @title Swagger Example API | +| version | **Required.** Provides the version of the application API.| // @version 1.0 | +| description.markdown | A short description of the application. Parsed from the api.md file. This is an alternative to @description |// @description.markdown No value needed, this parses the description from api.md | +| tag.name | Name of a tag.| // @tag.name This is the name of the tag | +| tag.description.markdown | Description of the tag this is an alternative to tag.description. The description will be read from a file named like tagname.md | // @tag.description.markdown | + +## Open API V3.1.0+ + +The following annotations are only available if you set the -v3.1 flag in the CLI. + +| annotation | description | example | +|-------------|--------------------------------------------|---------------------------------| +| servers.url | The URL of a server| // @servers.url https://petstore.example.com/api/v1 | +| servers.description | The description of a server| // @servers.description Production API | + +## API Operation + +**Example** +[celler/controller](https://github.com/swaggo/swag/tree/master/example/celler/controller) + + +| annotation | description | +|----------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| description | A verbose explanation of the operation behavior. | +| description.markdown | A short description of the application. The description will be read from a file. E.g. `@description.markdown details` will load `details.md` | // @description.file endpoint.description.markdown | +| id | A unique string used to identify the operation. Must be unique among all API operations. | +| tags | A list of tags to each API operation that separated by commas. | +| summary | A short summary of what the operation does. | +| accept | A list of MIME types the APIs can consume. Note that Accept only affects operations with a request body, such as POST, PUT and PATCH. Value MUST be as described under [Mime Types](#mime-types). | +| produce | A list of MIME types the APIs can produce. Value MUST be as described under [Mime Types](#mime-types). | +| param | Parameters that separated by spaces. `param name`,`param type`,`data type`,`is mandatory?`,`comment` `attribute(optional)` | +| security | [Security](#security) to each API operation. | +| success | Success response that separated by spaces. `return code or default`,`{param type}`,`data type`,`comment` | +| failure | Failure response that separated by spaces. `return code or default`,`{param type}`,`data type`,`comment` | +| response | As same as `success` and `failure` | +| header | Header in response that separated by spaces. `return code`,`{param type}`,`data type`,`comment` | +| router | Path definition that separated by spaces. `path`,`[httpMethod]` | +| deprecatedrouter | As same as router, but deprecated. | +| x-name | The extension key, must be start by x- and take only json value. | +| x-codeSample | Optional Markdown usage. take `file` as parameter. This will then search for a file named like the summary in the given folder. | +| deprecated | Mark endpoint as deprecated. | +| servers.url | (Only for -v3.1 on the CLI) The URL of a server that will override the base one for this operation | +| servers.description | (Only for -v3.1 on the CLI) The description of a server that will override the base one for this operation | + + +## Mime Types + +`swag` accepts all MIME Types which are in the correct format, that is, match `*/*`. +Besides that, `swag` also accepts aliases for some MIME Types as follows: + +| Alias | MIME Type | +|-----------------------|-----------------------------------| +| json | application/json | +| xml | text/xml | +| plain | text/plain | +| html | text/html | +| mpfd | multipart/form-data | +| x-www-form-urlencoded | application/x-www-form-urlencoded | +| json-api | application/vnd.api+json | +| json-stream | application/x-json-stream | +| octet-stream | application/octet-stream | +| png | image/png | +| jpeg | image/jpeg | +| gif | image/gif | + + + +## Param Type + +- query +- path +- header +- body +- formData + +## Data Type + +- string (string) +- integer (int, uint, uint32, uint64) +- number (float32) +- boolean (bool) +- file (param data type when uploading) +- user defined struct + +## Security +| annotation | description | parameters | example | +|------------|-------------|------------|---------| +| securitydefinitions.basic | [Basic](https://swagger.io/docs/specification/2-0/authentication/basic-authentication/) auth. | | // @securityDefinitions.basic BasicAuth | +| securitydefinitions.apikey | [API key](https://swagger.io/docs/specification/2-0/authentication/api-keys/) auth. | in, name, description | // @securityDefinitions.apikey ApiKeyAuth | +| securitydefinitions.oauth2.application | [OAuth2 application](https://swagger.io/docs/specification/authentication/oauth2/) auth. | tokenUrl, scope, description | // @securitydefinitions.oauth2.application OAuth2Application | +| securitydefinitions.oauth2.implicit | [OAuth2 implicit](https://swagger.io/docs/specification/authentication/oauth2/) auth. | authorizationUrl, scope, description | // @securitydefinitions.oauth2.implicit OAuth2Implicit | +| securitydefinitions.oauth2.password | [OAuth2 password](https://swagger.io/docs/specification/authentication/oauth2/) auth. | tokenUrl, scope, description | // @securitydefinitions.oauth2.password OAuth2Password | +| securitydefinitions.oauth2.accessCode | [OAuth2 access code](https://swagger.io/docs/specification/authentication/oauth2/) auth. | tokenUrl, authorizationUrl, scope, description | // @securitydefinitions.oauth2.accessCode OAuth2AccessCode | +| securitydefinitions.bearerauth | [Bearer Authentication](https://swagger.io/docs/specification/authentication/bearer-authentication/) auth. supported in Swagger v3.x| | // @securitydefinitions.bearerauth BearerAuth | + + +| parameters annotation | example | +|---------------------------------|-------------------------------------------------------------------------| +| in | // @in header | +| name | // @name Authorization | +| tokenUrl | // @tokenUrl https://example.com/oauth/token | +| authorizationurl | // @authorizationurl https://example.com/oauth/authorize | +| scope.hoge | // @scope.write Grants write access | +| description | // @description OAuth protects our entity endpoints | + +## Attribute + +```go +// @Param enumstring query string false "string enums" Enums(A, B, C) +// @Param enumint query int false "int enums" Enums(1, 2, 3) +// @Param enumnumber query number false "int enums" Enums(1.1, 1.2, 1.3) +// @Param string query string false "string valid" minlength(5) maxlength(10) +// @Param int query int false "int valid" minimum(1) maximum(10) +// @Param default query string false "string default" default(A) +// @Param example query string false "string example" example(string) +// @Param collection query []string false "string collection" collectionFormat(multi) +// @Param extensions query []string false "string collection" extensions(x-example=test,x-nullable) +``` + +It also works for the struct fields: + +```go +type Foo struct { + Bar string `minLength:"4" maxLength:"16" example:"random string"` + Baz int `minimum:"10" maximum:"20" default:"15"` + Qux []string `enums:"foo,bar,baz"` +} +``` + +### Available + +Field Name | Type | Description +---|:---:|--- +validate | `string` | Determines the validation for the parameter. Possible values are: `required,optional`. +default | * | Declares the value of the parameter that the server will use if none is provided, for example a "count" to control the number of results per page might default to 100 if not supplied by the client in the request. (Note: "default" has no meaning for required parameters.) See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-6.2. Unlike JSON Schema this value MUST conform to the defined [`type`](#parameterType) for this parameter. +maximum | `number` | See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.1.2. +minimum | `number` | See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.1.3. +multipleOf | `number` | See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.1.1. +maxLength | `integer` | See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.2.1. +minLength | `integer` | See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.2.2. +enums | [\*] | See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.5.1. +format | `string` | The extending format for the previously mentioned [`type`](#parameterType). See [Data Type Formats](https://swagger.io/specification/v2/#dataTypeFormat) for further details. +collectionFormat | `string` |Determines the format of the array if type array is used. Possible values are:
  • `csv` - comma separated values `foo,bar`.
  • `ssv` - space separated values `foo bar`.
  • `tsv` - tab separated values `foo\tbar`.
  • `pipes` - pipe separated values foo|bar.
  • `multi` - corresponds to multiple parameter instances instead of multiple values for a single instance `foo=bar&foo=baz`. This is valid only for parameters [`in`](#parameterIn) "query" or "formData".
Default value is `csv`. +example | * | Declares the example for the parameter value +extensions | `string` | Add extension to parameters. + +### Future + +Field Name | Type | Description +---|:---:|--- +pattern | `string` | See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.2.3. +maxItems | `integer` | See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.3.2. +minItems | `integer` | See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.3.3. +uniqueItems | `boolean` | See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.3.4. + +## Examples + +### Descriptions over multiple lines + +You can add descriptions spanning multiple lines in either the general api description or routes definitions like so: + +```go +// @description This is the first line +// @description This is the second line +// @description And so forth. +``` + +### User defined structure with an array type + +```go +// @Success 200 {array} model.Account <-- This is a user defined struct. +``` + +```go +package model + +type Account struct { + ID int `json:"id" example:"1"` + Name string `json:"name" example:"account name"` +} +``` + + +### Function scoped struct declaration + +You can declare your request response structs inside a function body. +You must have to follow the naming convention `.. `. + +```go +package main + +// @Param request body main.MyHandler.request true "query params" +// @Success 200 {object} main.MyHandler.response +// @Router /test [post] +func MyHandler() { + type request struct { + RequestField string + } + + type response struct { + ResponseField string + } +} +``` + + +### Model composition in response +```go +// JSONResult's data field will be overridden by the specific type proto.Order +@success 200 {object} jsonresult.JSONResult{data=proto.Order} "desc" +``` + +```go +type JSONResult struct { + Code int `json:"code" ` + Message string `json:"message"` + Data interface{} `json:"data"` +} + +type Order struct { //in `proto` package + Id uint `json:"id"` + Data interface{} `json:"data"` +} +``` + +- also support array of objects and primitive types as nested response +```go +@success 200 {object} jsonresult.JSONResult{data=[]proto.Order} "desc" +@success 200 {object} jsonresult.JSONResult{data=string} "desc" +@success 200 {object} jsonresult.JSONResult{data=[]string} "desc" +``` + +- overriding multiple fields. field will be added if not exists +```go +@success 200 {object} jsonresult.JSONResult{data1=string,data2=[]string,data3=proto.Order,data4=[]proto.Order} "desc" +``` +- overriding deep-level fields +```go +type DeepObject struct { //in `proto` package + ... +} +@success 200 {object} jsonresult.JSONResult{data1=proto.Order{data=proto.DeepObject},data2=[]proto.Order{data=[]proto.DeepObject}} "desc" +``` +### Add response request + +```go +// @Param X-MyHeader header string true "MyHeader must be set for valid response" +// @Param X-API-VERSION header string true "API version eg.: 1.0" +``` + +### Add response headers + +```go +// @Success 200 {string} string "ok" +// @failure 400 {string} string "error" +// @response default {string} string "other error" +// @Header 200 {string} Location "/entity/1" +// @Header 200,400,default {string} Token "token" +// @Header all {string} Token2 "token2" +``` + +### Use multiple path params + +```go +/// ... +// @Param group_id path int true "Group ID" +// @Param account_id path int true "Account ID" +// ... +// @Router /examples/groups/{group_id}/accounts/{account_id} [get] +``` + +### Add multiple paths + +```go +/// ... +// @Param group_id path int true "Group ID" +// @Param user_id path int true "User ID" +// ... +// @Router /examples/groups/{group_id}/user/{user_id}/address [put] +// @Router /examples/user/{user_id}/address [put] +``` + +### Example value of struct + +```go +type Account struct { + ID int `json:"id" example:"1"` + Name string `json:"name" example:"account name"` + PhotoUrls []string `json:"photo_urls" example:"http://test/image/1.jpg,http://test/image/2.jpg"` +} +``` + +### SchemaExample of body + +```go +// @Param email body string true "message/rfc822" SchemaExample(Subject: Testmail\r\n\r\nBody Message\r\n) +``` + +### Description of struct + +```go +// Account model info +// @Description User account information +// @Description with user id and username +type Account struct { + // ID this is userid + ID int `json:"id"` + Name string `json:"name"` // This is Name +} +``` + +[#708](https://github.com/swaggo/swag/issues/708) The parser handles only struct comments starting with `@Description` attribute. +But it writes all struct field comments as is. + +So, generated swagger doc as follows: +```json +"Account": { + "type":"object", + "description": "User account information with user id and username" + "properties": { + "id": { + "type": "integer", + "description": "ID this is userid" + }, + "name": { + "type":"string", + "description": "This is Name" + } + } +} +``` + +### Use swaggertype tag to supported custom type +[#201](https://github.com/swaggo/swag/issues/201#issuecomment-475479409) + +```go +type TimestampTime struct { + time.Time +} + +///implement encoding.JSON.Marshaler interface +func (t *TimestampTime) MarshalJSON() ([]byte, error) { + bin := make([]byte, 16) + bin = strconv.AppendInt(bin[:0], t.Time.Unix(), 10) + return bin, nil +} + +func (t *TimestampTime) UnmarshalJSON(bin []byte) error { + v, err := strconv.ParseInt(string(bin), 10, 64) + if err != nil { + return err + } + t.Time = time.Unix(v, 0) + return nil +} +/// + +type Account struct { + // Override primitive type by simply specifying it via `swaggertype` tag + ID sql.NullInt64 `json:"id" swaggertype:"integer"` + + // Override struct type to a primitive type 'integer' by specifying it via `swaggertype` tag + RegisterTime TimestampTime `json:"register_time" swaggertype:"primitive,integer"` + + // Array types can be overridden using "array," format + Coeffs []big.Float `json:"coeffs" swaggertype:"array,number"` +} +``` + +[#379](https://github.com/swaggo/swag/issues/379) +```go +type CerticateKeyPair struct { + Crt []byte `json:"crt" swaggertype:"string" format:"base64" example:"U3dhZ2dlciByb2Nrcw=="` + Key []byte `json:"key" swaggertype:"string" format:"base64" example:"U3dhZ2dlciByb2Nrcw=="` +} +``` +generated swagger doc as follows: +```go +"api.MyBinding": { + "type":"object", + "properties":{ + "crt":{ + "type":"string", + "format":"base64", + "example":"U3dhZ2dlciByb2Nrcw==" + }, + "key":{ + "type":"string", + "format":"base64", + "example":"U3dhZ2dlciByb2Nrcw==" + } + } +} + +``` + +### Use global overrides to support a custom type + +If you are using generated files, the [`swaggertype`](#use-swaggertype-tag-to-supported-custom-type) or `swaggerignore` tags may not be possible. + +By passing a mapping to swag with `--overridesFile` you can tell swag to use one type in place of another wherever it appears. By default, if a `.swaggo` file is present in the current directory it will be used. + +Go code: +```go +type MyStruct struct { + ID sql.NullInt64 `json:"id"` + Name sql.NullString `json:"name"` +} +``` + +`.swaggo`: +``` +// Replace all NullInt64 with int +replace database/sql.NullInt64 int + +// Don't include any fields of type database/sql.NullString in the swagger docs +skip database/sql.NullString +``` + +Possible directives are comments (beginning with `//`), `replace path/to/a.type path/to/b.type`, and `skip path/to/a.type`. + +(Note that the full paths to any named types must be provided to prevent problems when multiple packages define a type with the same name) + +Rendered: +```go +"types.MyStruct": { + "id": "integer" +} +``` + + +### Use swaggerignore tag to exclude a field + +```go +type Account struct { + ID string `json:"id"` + Name string `json:"name"` + Ignored int `swaggerignore:"true"` +} +``` + +### Add extension info to struct field + +```go +type Account struct { + ID string `json:"id" extensions:"x-nullable,x-abc=def,!x-omitempty"` // extensions fields must start with "x-" +} +``` + +generate swagger doc as follows: + +```go +"Account": { + "type": "object", + "properties": { + "id": { + "type": "string", + "x-nullable": true, + "x-abc": "def", + "x-omitempty": false + } + } +} +``` +### Rename model to display + +```golang +type Resp struct { + Code int +}//@name Response +``` + +### How to use security annotations + +General API info. + +```go +// @securityDefinitions.basic BasicAuth + +// @securitydefinitions.oauth2.application OAuth2Application +// @tokenUrl https://example.com/oauth/token +// @scope.write Grants write access +// @scope.admin Grants read and write access to administrative information +``` + +Each API operation. + +```go +// @Security ApiKeyAuth +``` + +Make it AND condition + +```go +// @Security ApiKeyAuth +// @Security OAuth2Application[write, admin] +``` + +Make it OR condition + +```go +// @Security ApiKeyAuth || firebase +// @Security OAuth2Application[write, admin] || APIKeyAuth +``` + + +### Add a description for enum items + +```go +type Example struct { + // Sort order: + // * asc - Ascending, from A to Z. + // * desc - Descending, from Z to A. + Order string `enums:"asc,desc"` +} +``` + +### Generate only specific docs file types + +By default `swag` command generates Swagger specification in three different files/file types: +- docs.go +- swagger.json +- swagger.yaml + +If you would like to limit a set of file types which should be generated you can use `--outputTypes` (short `-ot`) flag. Default value is `go,json,yaml` - output types separated with comma. To limit output only to `go` and `yaml` files, you would write `go,yaml`. With complete command that would be `swag init --outputTypes go,yaml`. + +### How to use Generics + +```go +// @Success 200 {object} web.GenericNestedResponse[types.Post] +// @Success 204 {object} web.GenericNestedResponse[types.Post, Types.AnotherOne] +// @Success 201 {object} web.GenericNestedResponse[web.GenericInnerType[types.Post]] +func GetPosts(w http.ResponseWriter, r *http.Request) { + _ = web.GenericNestedResponse[types.Post]{} +} +``` +See [this file](https://github.com/swaggo/swag/blob/master/testdata/generics_nested/api/api.go) for more details +and other examples. + +### Change the default Go Template action delimiters +[#980](https://github.com/swaggo/swag/issues/980) +[#1177](https://github.com/swaggo/swag/issues/1177) + +If your swagger annotations or struct fields contain "{{" or "}}", the template generation will most likely fail, as these are the default delimiters for [go templates](https://pkg.go.dev/text/template#Template.Delims). + +To make the generation work properly, you can change the default delimiters with `-td`. For example: +```console +swag init -g http/api.go -td "[[,]]" +``` +The new delimiter is a string with the format "``,``". + +### Parse Internal and Dependency Packages + +If the struct is defined in a dependency package, use `--parseDependency`. + +If the struct is defined in your main project, use `--parseInternal`. + +if you want to include both internal and from dependencies use both flags +``` +swag init --parseDependency --parseInternal +``` + +## About the Project +This project was inspired by [yvasiyarov/swagger](https://github.com/yvasiyarov/swagger) but we simplified the usage and added support a variety of [web frameworks](#supported-web-frameworks). Gopher image source is [tenntenn/gopher-stickers](https://github.com/tenntenn/gopher-stickers). It has licenses [creative commons licensing](http://creativecommons.org/licenses/by/3.0/deed.en). +## Contributors + +This project exists thanks to all the people who contribute. [[Contribute](CONTRIBUTING.md)]. + + + +## Backers + +Thank you to all our backers! 🙏 [[Become a backer](https://opencollective.com/swag#backer)] + + + + +## Sponsors + +Support this project by becoming a sponsor. Your logo will show up here with a link to your website. [[Become a sponsor](https://opencollective.com/swag#sponsor)] + + + + + + + + + + + + + + + +## License +[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fswaggo%2Fswag.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fswaggo%2Fswag?ref=badge_large) diff --git a/vendor/github.com/swaggo/swag/v2/README_pt.md b/vendor/github.com/swaggo/swag/v2/README_pt.md new file mode 100644 index 00000000..060b2c97 --- /dev/null +++ b/vendor/github.com/swaggo/swag/v2/README_pt.md @@ -0,0 +1,968 @@ +# swag + +🌍 *[English](README.md) ∙ [简体中文](README_zh-CN.md) ∙ [Português](README_pt.md)* + + + +[![Build Status](https://github.com/swaggo/swag/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/features/actions) +[![Coverage Status](https://img.shields.io/codecov/c/github/swaggo/swag/master.svg)](https://codecov.io/gh/swaggo/swag) +[![Go Report Card](https://goreportcard.com/badge/github.com/swaggo/swag)](https://goreportcard.com/report/github.com/swaggo/swag) +[![codebeat badge](https://codebeat.co/badges/71e2f5e5-9e6b-405d-baf9-7cc8b5037330)](https://codebeat.co/projects/github-com-swaggo-swag-master) +[![Go Doc](https://godoc.org/github.com/swaggo/swagg?status.svg)](https://godoc.org/github.com/swaggo/swag) +[![Backers on Open Collective](https://opencollective.com/swag/backers/badge.svg)](#backers) +[![Sponsors on Open Collective](https://opencollective.com/swag/sponsors/badge.svg)](#sponsors) [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fswaggo%2Fswag.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fswaggo%2Fswag?ref=badge_shield) +[![Release](https://img.shields.io/github/release/swaggo/swag.svg?style=flat-square)](https://github.com/swaggo/swag/releases) + +Swag converte anotações Go para Documentação Swagger 2.0. Criámos uma variedade de plugins para populares [Go web frameworks](#supported-web-frameworks). Isto permite uma integração rápida com um projecto Go existente (utilizando a Swagger UI). + +## Conteúdo +- [Começando](#começando) + - [Estruturas Web Suportadas](#estruturas-web-suportadas) + - [Como utilizá-lo com Gin](#como-como-ser-como-gin) + - [O formatador de swag](#a-formatação-de-swag) + - [Estado de Implementação](#implementação-estado) + - [Formato dos comentários declarativos](#formato-dos-comentarios-declarativos) + - [Informações Gerais API](#informações-gerais-api) + - [Operação API](#api-operacao) + - [Segurança](#seguranca) + - [Exemplos](#exemplos) + - [Descrições em múltiplas linhas](#descricoes-sobre-múltiplas-linhas) + - [Estrutura definida pelo utilizador com um tipo de matriz](#-estrutura-definida-pelo-utilizador-com-um-um-tipo) + - [Declaração de estruturação de funções](#function-scoped-struct-declaration) + - [Composição do modelo em resposta](#model-composição-em-resposta) + - [Adicionar um cabeçalho em resposta](#add-a-headers-in-response) + - [Utilizar parâmetros de caminhos múltiplos](#use-multiple-path-params) + - [Exemplo de valor de estrutura](#exemplo-do-valor-de-estrutura) + - [Schema Exemplo do corpo](#schemaexample-of-body) + - [Descrição da estrutura](#descrição-da-estrutura) + - [Usar etiqueta do tipo swaggertype para suportar o tipo personalizado](#use-swaggertype-tag-to-supported-custom-type) + - [Utilizar anulações globais para suportar um tipo personalizado](#use-global-overrides-to-support-a-custom-type) + - [Use swaggerignore tag para excluir um campo](#use-swaggerignore-tag-to-excluir-um-campo) + - [Adicionar informações de extensão ao campo de estruturação](#add-extension-info-to-struct-field) + - [Renomear modelo a expor](#renome-modelo-a-exibir) + - [Como utilizar as anotações de segurança](#como-utilizar-as-anotações-de-segurança) + - [Adicionar uma descrição para enumerar artigos](#add-a-description-for-enum-items) + - [Gerar apenas tipos de ficheiros de documentos específicos](#generate-only-specific-docs-file-file-types) + - [Como usar tipos genéricos](#como-usar-tipos-genéricos) +- [Sobre o projecto](#sobre-o-projecto) + +## Começando + +1. Adicione comentários ao código-fonte da API, consulte [Formato dos comentários declarativos](#declarative-comments-format). + +2. Descarregue o swag utilizando: +```sh +go install github.com/swaggo/swag/cmd/swag@latest +``` +Para construir a partir da fonte é necessário [Go](https://golang.org/dl/) (1.19 ou mais recente). + +Ou descarregar um binário pré-compilado a partir da [página de lançamento](https://github.com/swaggo/swag/releases). + +3. Executar `swag init` na pasta raiz do projecto que contém o ficheiro `main.go`. Isto irá analisar os seus comentários e gerar os ficheiros necessários (pasta `docs` e `docs/docs.go`). +```sh +swag init +``` + +Certifique-se de importar os `docs/docs.go` gerados para que a sua configuração específica fique "init" ed. Se as suas anotações API gerais não viverem em `main.go`, pode avisar a swag com a bandeira `-g`. +```sh +swag init -g http/api.go +``` + +4. (opcional) Utilizar o formato `swag fmt` no comentário SWAG. (Por favor, actualizar para a versão mais recente) + +```sh +swag fmt +``` + +## swag cli + +```sh +swag init -h +NOME: + swag init - Criar docs.go + +UTILIZAÇÃO: + swag init [opções de comando] [argumentos...] + +OPÇÕES: + --quiet, -q Fazer o logger ficar quiet (por padrão: falso) + --generalInfo valor, -g valor Go caminho do ficheiro em que 'swagger general API Info' está escrito (por padrão: "main.go") + --dir valor, -d valor Os directórios que deseja analisar, separados por vírgulas e de informação geral devem estar no primeiro (por padrão: "./") + --exclude valor Excluir directórios e ficheiros ao pesquisar, separados por vírgulas + -propertyStrategy da estratégia, -p valor da propriedadeEstratégia de nomeação de propriedades como snakecase,camelcase,pascalcase (por padrão: "camelcase") + --output de saída, -o valor directório de saída para todos os ficheiros gerados(swagger.json, swagger.yaml e docs.go) (por padrão: "./docs") + --outputTypes valor de saídaTypes, -- valor de saída Tipos de ficheiros gerados (docs.go, swagger.json, swagger.yaml) como go,json,yaml (por padrão: "go,json,yaml") + --parseVendor ParseVendor Parse go files na pasta 'vendor', desactivado por padrão (padrão: falso) + --parseInternal Parse go ficheiros em pacotes internos, desactivados por padrão (padrão: falso) + --generatedTime Gerar timestamp no topo dos docs.go, desactivado por padrão (padrão: falso) + --parteDepth value Dependência profundidade parse (por padrão: 100) + --templateDelims value, --td value fornecem delimitadores personalizados para a geração de modelos Go. O formato é leftDelim,rightDelim. Por exemplo: "[[,]]" + ... + + --help, -h mostrar ajuda (por padrão: falso) +``` + +```bash +swag fmt -h +NOME: + swag fmt - formato swag comentários + +UTILIZAÇÃO: + swag fmt [opções de comando] [argumentos...] + +OPÇÕES: + --dir valor, -d valor Os directórios que pretende analisar, separados por vírgulas e de informação geral devem estar no primeiro (por padrão: "./") + --excluir valor Excluir directórios e ficheiros ao pesquisar, separados por vírgulas + --generalInfo value, -g value Go file path in which 'swagger general API Info' is written (por padrão: "main.go") + --ajuda, -h mostrar ajuda (por padrão: falso) + +``` + +## Estruturas Web Suportadas + +- [gin](http://github.com/swaggo/gin-swagger) +- [echo](http://github.com/swaggo/echo-swagger) +- [buffalo](https://github.com/swaggo/buffalo-swagger) +- [net/http](https://github.com/swaggo/http-swagger) +- [gorilla/mux](https://github.com/swaggo/http-swagger) +- [go-chi/chi](https://github.com/swaggo/http-swagger) +- [flamingo](https://github.com/i-love-flamingo/swagger) +- [fiber](https://github.com/gofiber/swagger) +- [atreugo](https://github.com/Nerzal/atreugo-swagger) +- [hertz](https://github.com/hertz-contrib/swagger) + +## Como utilizá-lo com Gin + +Encontrar o código fonte de exemplo [aqui](https://github.com/swaggo/swag/tree/master/example/celler). + +1. Depois de utilizar `swag init` para gerar os documentos Swagger 2.0, importar os seguintes pacotes: +```go +import "github.com/swaggo/gin-swagger" // gin-swagger middleware +import "github.com/swaggo/files" // swagger embed files +``` + +2. Adicionar [Informações Gerais API](#general-api-info) anotações em código `main.go`: + + +```go +// @title Swagger Example API +// @version 1.0 +// @description This is a sample server celler server. +// @termsOfService http://swagger.io/terms/ + +// @contact.name API Support +// @contact.url http://www.swagger.io/support +// @contact.email support@swagger.io + +// @license.name Apache 2.0 +// @license.url http://www.apache.org/licenses/LICENSE-2.0.html + +// @host localhost:8080 +// @BasePath /api/v1 + +// @securityDefinitions.basic BasicAuth + +// @externalDocs.description OpenAPI +// @externalDocs.url https://swagger.io/resources/open-api/ +func main() { + r := gin.Default() + + c := controller.NewController() + + v1 := r.Group("/api/v1") + { + accounts := v1.Group("/accounts") + { + accounts.GET(":id", c.ShowAccount) + accounts.GET("", c.ListAccounts) + accounts.POST("", c.AddAccount) + accounts.DELETE(":id", c.DeleteAccount) + accounts.PATCH(":id", c.UpdateAccount) + accounts.POST(":id/images", c.UploadAccountImage) + } + //... + } + r.GET("/swagger/*any", ginSwagger.WrapHandler(swaggerFiles.Handler)) + r.Run(":8080") +} +//... +``` + +Além disso, algumas informações API gerais podem ser definidas de forma dinâmica. O pacote de código gerado `docs` exporta a variável `SwaggerInfo` que podemos utilizar para definir programticamente o título, descrição, versão, hospedeiro e caminho base. Exemplo utilizando Gin: + +```go +package main + +import ( + "github.com/gin-gonic/gin" + "github.com/swaggo/files" + "github.com/swaggo/gin-swagger" + + "./docs" // docs is generated by Swag CLI, you have to import it. +) + +// @contact.name API Support +// @contact.url http://www.swagger.io/support +// @contact.email support@swagger.io + +// @license.name Apache 2.0 +// @license.url http://www.apache.org/licenses/LICENSE-2.0.html +func main() { + + // programmatically set swagger info + docs.SwaggerInfo.Title = "Swagger Example API" + docs.SwaggerInfo.Description = "This is a sample server Petstore server." + docs.SwaggerInfo.Version = "1.0" + docs.SwaggerInfo.Host = "petstore.swagger.io" + docs.SwaggerInfo.BasePath = "/v2" + docs.SwaggerInfo.Schemes = []string{"http", "https"} + + r := gin.New() + + // use ginSwagger middleware to serve the API docs + r.GET("/swagger/*any", ginSwagger.WrapHandler(swaggerFiles.Handler)) + + r.Run() +} +``` + +3. Adicionar [Operação API](#api-operacao) anotações em código `controller` + +```go +package controller + +import ( + "fmt" + "net/http" + "strconv" + + "github.com/gin-gonic/gin" + "github.com/swaggo/swag/example/celler/httputil" + "github.com/swaggo/swag/example/celler/model" +) + +// ShowAccount godoc +// @Summary Show an account +// @Description get string by ID +// @Tags accounts +// @Accept json +// @Produce json +// @Param id path int true "Account ID" +// @Success 200 {object} model.Account +// @Failure 400 {object} httputil.HTTPError +// @Failure 404 {object} httputil.HTTPError +// @Failure 500 {object} httputil.HTTPError +// @Router /accounts/{id} [get] +func (c *Controller) ShowAccount(ctx *gin.Context) { + id := ctx.Param("id") + aid, err := strconv.Atoi(id) + if err != nil { + httputil.NewError(ctx, http.StatusBadRequest, err) + return + } + account, err := model.AccountOne(aid) + if err != nil { + httputil.NewError(ctx, http.StatusNotFound, err) + return + } + ctx.JSON(http.StatusOK, account) +} + +// ListAccounts godoc +// @Summary List accounts +// @Description get accounts +// @Tags accounts +// @Accept json +// @Produce json +// @Param q query string false "name search by q" Format(email) +// @Success 200 {array} model.Account +// @Failure 400 {object} httputil.HTTPError +// @Failure 404 {object} httputil.HTTPError +// @Failure 500 {object} httputil.HTTPError +// @Router /accounts [get] +func (c *Controller) ListAccounts(ctx *gin.Context) { + q := ctx.Request.URL.Query().Get("q") + accounts, err := model.AccountsAll(q) + if err != nil { + httputil.NewError(ctx, http.StatusNotFound, err) + return + } + ctx.JSON(http.StatusOK, accounts) +} +//... +``` + +```console +swag init +``` + +4. Execute a sua aplicação, e navegue para http://localhost:8080/swagger/index.html. Verá os documentos Swagger 2.0 Api, como mostrado abaixo: + +![swagger_index.html](https://raw.githubusercontent.com/swaggo/swag/master/assets/swagger-image.png) + +## O formatador de swag + +Os Swag Comments podem ser formatados automaticamente, assim como 'go fmt'. +Encontre o resultado da formatação [aqui](https://github.com/swaggo/swag/tree/master/example/celler). + +Usage: +```shell +swag fmt +``` + +Exclude folder: +```shell +swag fmt -d ./ --exclude ./internal +``` + +Ao utilizar `swag fmt`, é necessário assegurar-se de que tem um comentário doc para a função a fim de assegurar uma formatação correcta. +Isto deve-se ao `swag fmt` que traça comentários swag com separadores, o que só é permitido *após* um comentário doc padrão. + +Por exemplo, utilizar + +```go +// ListAccounts lists all existing accounts +// +// @Summary List accounts +// @Description get accounts +// @Tags accounts +// @Accept json +// @Produce json +// @Param q query string false "name search by q" Format(email) +// @Success 200 {array} model.Account +// @Failure 400 {object} httputil.HTTPError +// @Failure 404 {object} httputil.HTTPError +// @Failure 500 {object} httputil.HTTPError +// @Router /accounts [get] +func (c *Controller) ListAccounts(ctx *gin.Context) { +``` + +## Estado de Implementação + +[Documento Swagger 2.0](https://swagger.io/docs/specification/2-0/basic-structure/) + +- [x] Estrutura básica +- [x] Hospedeiro API e Caminho Base +- [x] Caminhos e operações +- [x] Descrição dos parâmetros +- [x] Descrever o corpo do pedido +- [x] Descrição das respostas +- [x] Tipos MIME +- [x] Autenticação + - [x] Autenticação básica + - [x] Chaves API +- [x] Acrescentar exemplos +- [x] Carregamento de ficheiros +- [x] Enums +- [x] Operações de Agrupamento com Etiquetas +- Extensões Swagger + +## Formato dos comentários declarativos + +## Informações Gerais API + +**Exemplo** +[celler/main.go](https://github.com/swaggo/swag/blob/master/example/celler/main.go) + +| anotação | descrição | exemplo | +|-------------|--------------------------------------------|---------------------------------| +| title | **Obrigatório.** O título da aplicação.| // @title Swagger Example API | +| version | **Obrigatório.** Fornece a versão da aplicação API.| // @version 1.0 | +| description | Uma breve descrição da candidatura. |// @descrição Este é um servidor servidor de celas de amostra. | +| tag.name | Nome de uma tag.| // @tag.name Este é o nome da tag | +| tag.description | Descrição da tag | // @tag.description Cool Description | +| tag.docs.url | Url da Documentação externa da tag | // @tag.docs.url https://example.com| +| tag.docs.description | Descrição da documentação externa da tag| // @tag.docs.description Melhor exemplo de documentação | +| TermsOfService | Os Termos de Serviço para o API.| // @termsOfService http://swagger.io/terms/ | +| contact.name | A informação de contacto para a API exposta.| // @contacto.name Suporte API | +| contact.url | O URL que aponta para as informações de contacto. DEVE estar no formato de um URL. | // @contact.url http://www.swagger.io/support| +| contact.email| O endereço de email da pessoa/organização de contacto. DEVE estar no formato de um endereço de correio electrónico.| // @contact.email support@swagger.io | +| license.name | **Obrigatório.** O nome da licença utilizada para a API.|// @licença.name Apache 2.0| +| license.url | Um URL para a licença utilizada para a API. DEVE estar no formato de um URL. | // @license.url http://www.apache.org/licenses/LICENSE-2.0.html | +| host | O anfitrião (nome ou ip) que serve o API. | // @host localhost:8080 | +| BasePath | O caminho de base sobre o qual o API é servido. | // @BasePath /api/v1 | +| accept | Uma lista de tipos de MIME que os APIs podem consumir. Note que accept só afecta operações com um organismo de pedido, tais como POST, PUT e PATCH. O valor DEVE ser o descrito em [Tipos de Mime](#mime-types). | // @accept json | +| produce | Uma lista de tipos de MIME que os APIs podem produce. O valor DEVE ser o descrito em [Tipos de Mime](#mime-types). | // @produce json | +| query.collection.format | O formato padrão de param de colecção(array) em query,enums:csv,multi,pipes,tsv,ssv. Se não definido, csv é o padrão.| // @query.collection.format multi +| schemes | O protocolo de transferência para a operação que separou por espaços. | // @schemes http https | +| externalDocs.description | Descrição do documento externo. | // @externalDocs.description OpenAPI | +| externalDocs.url | URL do documento externo. | // @externalDocs.url https://swagger.io/resources/open-api/ | +| x-name | A chave de extensão, deve ser iniciada por x- e tomar apenas o valor json | // @x-example-key {"chave": "valor"} | + +### Usando descrições de remarcação para baixo +Quando uma pequena sequência na sua documentação é insuficiente, ou precisa de imagens, exemplos de códigos e coisas do género, pode querer usar descrições de marcação. Para utilizar as descrições markdown, utilize as seguintes anotações. + +| anotação | descrição | exemplo | +|-------------|--------------------------------------------|---------------------------------| +| title | **Obrigatório.** O título da aplicação.| // @title Swagger Example API | +| version | **Obrigatório.** Fornece a versão da aplicação API.| // @versão 1.0 | +| description.markdown | Uma breve descrição da candidatura. Parsed a partir do ficheiro api.md. Esta é uma alternativa a @description |// @description.markdown Sem valor necessário, isto analisa a descrição do ficheiro api.md |. +| tag.name | Nome de uma tag.| // @tag.name Este é o nome da tag | +| tag.description.markdown | Descrição da tag esta é uma alternativa à tag.description. A descrição será lida a partir de um ficheiro nomeado como tagname.md | // @tag.description.markdown | + +## Operação API + +**Exemplo** +[celler/controller](https://github.com/swaggo/swag/tree/master/example/celler/controller) + +| anotação | descrição | +|-------------|----------------------------------------------------------------------------------------------------------------------------| +| descrição | Uma explicação verbosa do comportamento da operação. | +| description.markdown | Uma breve descrição da candidatura. A descrição será lida a partir de um ficheiro. Por exemplo, `@description.markdown details` irá carregar `details.md`| // @description.file endpoint.description.markdown | +| id | Um fio único utilizado para identificar a operação. Deve ser única entre todas as operações API. | +| tags | Uma lista de tags para cada operação API que separou por vírgulas. | +| summary | Um breve resumo do que a operação faz. | +| accept | Uma lista de tipos de MIME que os APIs podem consumir. Note que accept só afecta operações com um organismo de pedido, tais como POST, PUT e PATCH. O valor DEVE ser o descrito em [Tipos de Mime](#mime-types). | +| produce | Uma lista de tipos de MIME que os APIs podem produce. O valor DEVE ser o descrito em [Tipos de Mime](#mime-types). | +| param | Parâmetros que se separaram por espaços. `param name`,`param type`,`data type`,`is mandatory?`,`comment` `attribute(optional)` | +| security | [Segurança](#security) para cada operação API. | +| success | resposta de sucesso que separou por espaços. `return code or default`,`{param type}`,`data type`,`comment` |. +| failure | Resposta de falha que separou por espaços. `return code or default`,`{param type}`,`data type`,`comment` | +| response | Igual ao `sucesso` e `falha` | +| header | Cabeçalho em resposta que separou por espaços. `código de retorno`,`{tipo de parâmetro}`,`tipo de dados`,`comentário` |. +| router | Definição do caminho que separou por espaços. caminho",`path`,`[httpMethod]` |[httpMethod]` | +| x-name | A chave de extensão, deve ser iniciada por x- e tomar apenas o valor json. | +| x-codeSample | Optional Markdown use. tomar `file` como parâmetro. Isto irá então procurar um ficheiro nomeado como o resumo na pasta dada. | +| deprecated | Marcar o ponto final como depreciado. | + +## Mime Types + +`swag` aceita todos os tipos MIME que estão no formato correcto, ou seja, correspondem `*/*`. +Além disso, `swag` também aceita pseudónimos para alguns tipos de MIME, como se segue: + + +| Alias | MIME Type | +|-----------------------|-----------------------------------| +| json | application/json | +| xml | text/xml | +| plain | text/plain | +| html | text/html | +| mpfd | multipart/form-data | +| x-www-form-urlencoded | application/x-www-form-urlencoded | +| json-api | application/vnd.api+json | +| json-stream | application/x-json-stream | +| octet-stream | application/octet-stream | +| png | image/png | +| jpeg | image/jpeg | +| gif | image/gif | + + + +## Tipo de parâmetro + +- query +- path +- header +- body +- formData + +## Tipo de dados + +- string (string) +- integer (int, uint, uint32, uint64) +- number (float32) +- boolean (bool) +- file (param data type when uploading) +- user defined struct + +## Segurança +| anotação | descrição | parâmetros | exemplo | +|------------|-------------|------------|---------| +| securitydefinitions.basic | [Basic](https://swagger.io/docs/specification/2-0/authentication/basic-authentication/) auth. | | // @securityDefinitions.basicAuth | [Básico]() +| securitydefinitions.apikey | [chave API](https://swagger.io/docs/specification/2-0/authentication/api-keys/) auth. | in, name, description | // @securityDefinitions.apikey ApiKeyAuth | +| securitydefinitions.oauth2.application | [Aplicação OAuth2](https://swagger.io/docs/specification/authentication/oauth2/) auth. | tokenUrl, scope, description | // @securitydefinitions.oauth2.application OAuth2Application | +| securitydefinitions.oauth2.implicit | [OAuth2 implicit](https://swagger.io/docs/specification/authentication/oauth2/) auth. | authorizationUrl, scope, description | // @securitydefinitions.oauth2.implicit OAuth2Implicit | [OAuth2Implicit]() +| securitydefinitions.oauth2.password | [OAuth2 password](https://swagger.io/docs/specification/authentication/oauth2/) auth. | tokenUrl, scope, description | // @securitydefinitions.oauth2.password OAuth2Password | +| securitydefinitions.oauth2.accessCode | [código de acesso OAuth2](https://swagger.io/docs/specification/authentication/oauth2/) auth. | tokenUrl, authorizationUrl, scope, description | // @securitydefinitions.oauth2.accessCode OAuth2AccessCode | [código de acesso OAuth2.accessCode]() + + +| anotação de parâmetros | exemplo | +|---------------------------------|-------------------------------------------------------------------------| +| in | // @in header | +| name | // @name Authorization | +| tokenUrl | // @tokenUrl https://example.com/oauth/token | +| authorizationurl | // @authorizationurl https://example.com/oauth/authorize | +| scope.hoge | // @scope.write Grants write access | +| description | // @descrição OAuth protege os pontos finais da nossa entidade | + +## Atributo + +```go +// @Param enumstring query string false "string enums" Enums(A, B, C) +// @Param enumint query int false "int enums" Enums(1, 2, 3) +// @Param enumnumber query number false "int enums" Enums(1.1, 1.2, 1.3) +// @Param string query string false "string valid" minlength(5) maxlength(10) +// @Param int query int false "int valid" minimum(1) maximum(10) +// @Param default query string false "string default" default(A) +// @Param example query string false "string example" example(string) +// @Param collection query []string false "string collection" collectionFormat(multi) +// @Param extensions query []string false "string collection" extensions(x-example=test,x-nullable) +``` + +It also works for the struct fields: + +```go +type Foo struct { + Bar string `minLength:"4" maxLength:"16" example:"random string"` + Baz int `minimum:"10" maximum:"20" default:"15"` + Qux []string `enums:"foo,bar,baz"` +} +``` + +### Disponível + +Nome do campo | Tipo | Descrição +---|:---:|--- +validate | `string` | Determina a validação para o parâmetro. Os valores possíveis são: `required,optional`. +default | * | Declara o valor do parâmetro que o servidor utilizará se nenhum for fornecido, por exemplo, uma "contagem" para controlar o número de resultados por página poderá ser por defeito de 100 se não for fornecido pelo cliente no pedido. (Nota: "por defeito" não tem significado para os parâmetros requeridos). +See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-6.2. Ao contrário do esquema JSON, este valor DEVE estar em conformidade com o definido [`type`](#parameterType) para este parâmetro. +maximum | `number` | Ver https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.1.2. +minimum | `number` | Ver https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.1.3. +multipleOf | `number` | Ver https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.1.1. +maxLength | `integer` | Ver https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.2.1. +minLength | `integer` | Ver https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.2.2. +enums | [\*] | Ver https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.5.1. +format | `string` | O formato de extensão para o anteriormente mencionado [`type`](#parameterType). Ver [Data Type Formats](https://swagger.io/specification/v2/#dataTypeFormat) para mais detalhes. +collectionFormat | `string` |Determina o formato da matriz se for utilizada uma matriz de tipos. Os valores possíveis são:
  • `csv` - valores separados por vírgulas `foo,bar`.
  • `ssv` - valores separados por espaço `foo bar`.
  • `tsv` - valores separados por tabulação `foo\tbar`.
  • `pipes` - valores separados por tubo foo|bar.
  • `multi` - corresponde a múltiplas instâncias de parâmetros em vez de múltiplos valores para uma única instância `foo=bar&foo=baz`. This is valid only for parameters [`in`](#parameterIn) "query" or "formData".
Default value is `csv`. +example | * | Declara o exemplo para o valor do parâmetro +extensions | `string` | Acrescentar extensão aos parâmetros. + +### Futuro + +Nome do campo | Tipo | Description +---|:---:|--- +pattern | `string` | See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.2.3. +maxItems | `integer` | See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.3.2. +minItems | `integer` | See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.3.3. +uniqueItems | `boolean` | See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.3.4. + +## Exemplos + + +### Descrições em múltiplas linhas + +É possível acrescentar descrições que abranjam várias linhas tanto na descrição geral da api como em definições de rotas como esta: + +```go +// @description This is the first line +// @description This is the second line +// @description And so forth. +``` + +### Estrutura definida pelo utilizador com um tipo de matriz + +```go +// @Success 200 {array} model.Account <-- This is a user defined struct. +``` + +```go +package model + +type Account struct { + ID int `json:"id" example:"1"` + Name string `json:"name" example:"account name"` +} +``` + + +### Declaração de estruturação de funções + +Pode declarar as estruturas de resposta do seu pedido dentro de um corpo funcional. +Deve ter de seguir a convenção de nomeação +`.. `. + +```go +package main + +// @Param request body main.MyHandler.request true "query params" +// @Success 200 {object} main.MyHandler.response +// @Router /test [post] +func MyHandler() { + type request struct { + RequestField string + } + + type response struct { + ResponseField string + } +} +``` + + +### Composição do modelo em resposta +```go +// JSONResult's data field will be overridden by the specific type proto.Order +@success 200 {object} jsonresult.JSONResult{data=proto.Order} "desc" +``` + +```go +type JSONResult struct { + Code int `json:"code" ` + Message string `json:"message"` + Data interface{} `json:"data"` +} + +type Order struct { //in `proto` package + Id uint `json:"id"` + Data interface{} `json:"data"` +} +``` + +- também suportam uma variedade de objectos e tipos primitivos como resposta aninhada +```go +@success 200 {object} jsonresult.JSONResult{data=[]proto.Order} "desc" +@success 200 {object} jsonresult.JSONResult{data=string} "desc" +@success 200 {object} jsonresult.JSONResult{data=[]string} "desc" +``` + +- campos múltiplos que se sobrepõem. campo será adicionado se não existir +```go +@success 200 {object} jsonresult.JSONResult{data1=string,data2=[]string,data3=proto.Order,data4=[]proto.Order} "desc" +``` +- overriding deep-level fields +```go +type DeepObject struct { //in `proto` package + ... +} +@success 200 {object} jsonresult.JSONResult{data1=proto.Order{data=proto.DeepObject},data2=[]proto.Order{data=[]proto.DeepObject}} "desc" +``` + +### Adicionar um cabeçalho em resposta + +```go +// @Success 200 {string} string "ok" +// @failure 400 {string} string "error" +// @response default {string} string "other error" +// @Header 200 {string} Location "/entity/1" +// @Header 200,400,default {string} Token "token" +// @Header all {string} Token2 "token2" +``` + + +### Utilizar parâmetros de caminhos múltiplos + +```go +/// ... +// @Param group_id path int true "Group ID" +// @Param account_id path int true "Account ID" +// ... +// @Router /examples/groups/{group_id}/accounts/{account_id} [get] +``` + +### Adicionar múltiplos caminhos + +```go +/// ... +// @Param group_id path int true "Group ID" +// @Param user_id path int true "User ID" +// ... +// @Router /examples/groups/{group_id}/user/{user_id}/address [put] +// @Router /examples/user/{user_id}/address [put] +``` + +### Exemplo de valor de estrutura + +```go +type Account struct { + ID int `json:"id" example:"1"` + Name string `json:"name" example:"account name"` + PhotoUrls []string `json:"photo_urls" example:"http://test/image/1.jpg,http://test/image/2.jpg"` +} +``` + +### Schema Exemplo do corpo + +```go +// @Param email body string true "message/rfc822" SchemaExample(Subject: Testmail\r\n\r\nBody Message\r\n) +``` + +### Descrição da estrutura + +```go +// Account model info +// @Description User account information +// @Description with user id and username +type Account struct { + // ID this is userid + ID int `json:"id"` + Name string `json:"name"` // This is Name +} +``` + +[#708](https://github.com/swaggo/swag/issues/708) O analisador trata apenas de comentários estruturais a partir de `@Description` attribute. + +Assim, gerou o doc. de swagger como se segue: +```json +"Account": { + "type":"object", + "description": "User account information with user id and username" + "properties": { + "id": { + "type": "integer", + "description": "ID this is userid" + }, + "name": { + "type":"string", + "description": "This is Name" + } + } +} +``` + +### Usar etiqueta do tipo swaggertype para suportar o tipo personalizado +[#201](https://github.com/swaggo/swag/issues/201#issuecomment-475479409) + +```go +type TimestampTime struct { + time.Time +} + +///implement encoding.JSON.Marshaler interface +func (t *TimestampTime) MarshalJSON() ([]byte, error) { + bin := make([]byte, 16) + bin = strconv.AppendInt(bin[:0], t.Time.Unix(), 10) + return bin, nil +} + +func (t *TimestampTime) UnmarshalJSON(bin []byte) error { + v, err := strconv.ParseInt(string(bin), 10, 64) + if err != nil { + return err + } + t.Time = time.Unix(v, 0) + return nil +} +/// + +type Account struct { + // Override primitive type by simply specifying it via `swaggertype` tag + ID sql.NullInt64 `json:"id" swaggertype:"integer"` + + // Override struct type to a primitive type 'integer' by specifying it via `swaggertype` tag + RegisterTime TimestampTime `json:"register_time" swaggertype:"primitive,integer"` + + // Array types can be overridden using "array," format + Coeffs []big.Float `json:"coeffs" swaggertype:"array,number"` +} +``` + +[#379](https://github.com/swaggo/swag/issues/379) +```go +type CerticateKeyPair struct { + Crt []byte `json:"crt" swaggertype:"string" format:"base64" example:"U3dhZ2dlciByb2Nrcw=="` + Key []byte `json:"key" swaggertype:"string" format:"base64" example:"U3dhZ2dlciByb2Nrcw=="` +} +``` +generated swagger doc as follows: +```go +"api.MyBinding": { + "type":"object", + "properties":{ + "crt":{ + "type":"string", + "format":"base64", + "example":"U3dhZ2dlciByb2Nrcw==" + }, + "key":{ + "type":"string", + "format":"base64", + "example":"U3dhZ2dlciByb2Nrcw==" + } + } +} + +``` + +### Utilizar anulações globais para suportar um tipo personalizado + +Se estiver a utilizar ficheiros gerados, as etiquetas [`swaggertype`](#use-swaggertype-tag-to-supported-custom-type) ou `swaggerignore` podem não ser possíveis. + +Ao passar um mapeamento para swag com `--overridesFile` pode dizer swag para utilizar um tipo no lugar de outro onde quer que apareça. Por defeito, se um ficheiro `.swaggo` estiver presente no directório actual, será utilizado. + +Go code: +```go +type MyStruct struct { + ID sql.NullInt64 `json:"id"` + Name sql.NullString `json:"name"` +} +``` + +`.swaggo`: +``` +// Substituir todos os NullInt64 por int +replace database/sql.NullInt64 int + +// Não inclua quaisquer campos do tipo base de database/sql. +NullString no swagger docs +skip database/sql.NullString +``` + +As directivas possíveis são comentários (começando por `//`), `replace path/to/a.type path/to/b.type`, e `skip path/to/a.type`. + +(Note que os caminhos completos para qualquer tipo nomeado devem ser fornecidos para evitar problemas quando vários pacotes definem um tipo com o mesmo nome) + +Entregue em: +```go +"types.MyStruct": { + "id": "integer" +} + +### Use swaggerignore tag para excluir um campo + +```go +type Account struct { + ID string `json:"id"` + Name string `json:"name"` + Ignored int `swaggerignore:"true"` +} +``` + + +### Adicionar informações de extensão ao campo de estruturação + +```go +type Account struct { + ID string `json:"id" extensions:"x-nullable,x-abc=def,!x-omitempty"` // extensions fields must start with "x-" +} +``` + +gerar doc. de swagger como se segue: + +```go +"Account": { + "type": "object", + "properties": { + "id": { + "type": "string", + "x-nullable": true, + "x-abc": "def", + "x-omitempty": false + } + } +} +``` + + +### Renomear modelo a expor + +```golang +type Resp struct { + Code int +}//@name Response +``` + +### Como utilizar as anotações de segurança + +Informações API gerais. + +```go +// @securityDefinitions.basic BasicAuth + +// @securitydefinitions.oauth2.application OAuth2Application +// @tokenUrl https://example.com/oauth/token +// @scope.write Grants write access +// @scope.admin Grants read and write access to administrative information +``` + +Cada operação API. + +```go +// @Security ApiKeyAuth +``` + +Faça-o AND condicione-o + +```go +// @Security ApiKeyAuth +// @Security OAuth2Application[write, admin] +``` + +Faça-o OR condição + +```go +// @Security ApiKeyAuth || firebase +// @Security OAuth2Application[write, admin] || APIKeyAuth +``` + + + +### Adicionar uma descrição para enumerar artigos + +```go +type Example struct { + // Sort order: + // * asc - Ascending, from A to Z. + // * desc - Descending, from Z to A. + Order string `enums:"asc,desc"` +} +``` + +### Gerar apenas tipos de ficheiros de documentos específicos + +Por defeito, o comando `swag` gera especificação Swagger em três tipos diferentes de ficheiros/arquivos: +- docs.go +- swagger.json +- swagger.yaml + +Se desejar limitar um conjunto de tipos de ficheiros que devem ser gerados pode utilizar a bandeira `--outputTypes` (short `-ot`). O valor por defeito é `go,json,yaml` - tipos de saída separados por vírgula. Para limitar a saída apenas a ficheiros `go` e `yaml`, escrever-se-ia `go,yaml'. Com comando completo que seria `swag init --outputTypes go,yaml`. + +### Como usar tipos genéricos + +```go +// @Success 200 {object} web.GenericNestedResponse[types.Post] +// @Success 204 {object} web.GenericNestedResponse[types.Post, Types.AnotherOne] +// @Success 201 {object} web.GenericNestedResponse[web.GenericInnerType[types.Post]] +func GetPosts(w http.ResponseWriter, r *http.Request) { + _ = web.GenericNestedResponse[types.Post]{} +} +``` +Para mais detalhes e outros exemplos, veja [esse arquivo](https://github.com/swaggo/swag/blob/master/testdata/generics_nested/api/api.go) + +### Alterar os delimitadores de acção padrão Go Template +[#980](https://github.com/swaggo/swag/issues/980) +[#1177](https://github.com/swaggo/swag/issues/1177) + +Se as suas anotações ou campos estruturantes contêm "{{" or "}}", a geração de modelos irá muito provavelmente falhar, uma vez que estes são os delimitadores por defeito para [go templates](https://pkg.go.dev/text/template#Template.Delims). + +Para que a geração funcione correctamente, pode alterar os delimitadores por defeito com `-td'. Por exemplo: +``console +swag init -g http/api.go -td "[[,]" +``` + +O novo delimitador é um fio com o formato "``,``". + +## Sobre o projecto +Este projecto foi inspirado por [yvasiyarov/swagger](https://github.com/yvasiyarov/swagger) mas simplificámos a utilização e acrescentámos apoio a uma variedade de [frameworks web](#estruturas-web-suportadas). A fonte de imagem Gopher é [tenntenn/gopher-stickers](https://github.com/tenntenn/gopher-stickers). Tem licenças [creative commons licensing](http://creativecommons.org/licenses/by/3.0/deed.en). + +## Contribuidores + +Este projecto existe graças a todas as pessoas que contribuem. [[Contribute](CONTRIBUTING.md)]. + + + +## Apoios + +Obrigado a todos os nossos apoiantes! 🙏 [[Become a backer](https://opencollective.com/swag#backer)] + + + + +## Patrocinadores + +Apoiar este projecto tornando-se um patrocinador. O seu logótipo aparecerá aqui com um link para o seu website. [[Become a sponsor](https://opencollective.com/swag#sponsor)] + + + + + + + + + + + + + + +## Licença +[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fswaggo%2Fswag.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fswaggo%2Fswag?ref=badge_large) diff --git a/vendor/github.com/swaggo/swag/v2/README_zh-CN.md b/vendor/github.com/swaggo/swag/v2/README_zh-CN.md new file mode 100644 index 00000000..24a219c9 --- /dev/null +++ b/vendor/github.com/swaggo/swag/v2/README_zh-CN.md @@ -0,0 +1,771 @@ +# swag + +🌍 *[English](README.md) ∙ [简体中文](README_zh-CN.md)* + + + +[![Travis Status](https://img.shields.io/travis/swaggo/swag/master.svg)](https://travis-ci.org/swaggo/swag) +[![Coverage Status](https://img.shields.io/codecov/c/github/swaggo/swag/master.svg)](https://codecov.io/gh/swaggo/swag) +[![Go Report Card](https://goreportcard.com/badge/github.com/swaggo/swag)](https://goreportcard.com/report/github.com/swaggo/swag) +[![codebeat badge](https://codebeat.co/badges/71e2f5e5-9e6b-405d-baf9-7cc8b5037330)](https://codebeat.co/projects/github-com-swaggo-swag-master) +[![Go Doc](https://godoc.org/github.com/swaggo/swagg?status.svg)](https://godoc.org/github.com/swaggo/swag) +[![Backers on Open Collective](https://opencollective.com/swag/backers/badge.svg)](#backers) +[![Sponsors on Open Collective](https://opencollective.com/swag/sponsors/badge.svg)](#sponsors) [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fswaggo%2Fswag.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fswaggo%2Fswag?ref=badge_shield) +[![Release](https://img.shields.io/github/release/swaggo/swag.svg?style=flat-square)](https://github.com/swaggo/swag/releases) + +Swag将Go的注释转换为Swagger2.0文档。我们为流行的 [Go Web Framework](#支持的Web框架) 创建了各种插件,这样可以与现有Go项目快速集成(使用Swagger UI)。 + +## 目录 + +- [快速开始](#快速开始) +- [支持的Web框架](#支持的web框架) +- [如何与Gin集成](#如何与gin集成) +- [格式化说明](#格式化说明) +- [开发现状](#开发现状) +- [声明式注释格式](#声明式注释格式) + - [通用API信息](#通用api信息) + - [API操作](#api操作) + - [安全性](#安全性) +- [样例](#样例) + - [多行的描述](#多行的描述) + - [用户自定义的具有数组类型的结构](#用户自定义的具有数组类型的结构) + - [响应对象中的模型组合](#响应对象中的模型组合) + - [在响应中增加头字段](#在响应中增加头字段) + - [使用多路径参数](#使用多路径参数) + - [结构体的示例值](#结构体的示例值) + - [结构体描述](#结构体描述) + - [使用`swaggertype`标签更改字段类型](#使用`swaggertype`标签更改字段类型) + - [使用`swaggerignore`标签排除字段](#使用swaggerignore标签排除字段) + - [将扩展信息添加到结构字段](#将扩展信息添加到结构字段) + - [对展示的模型重命名](#对展示的模型重命名) + - [如何使用安全性注释](#如何使用安全性注释) +- [项目相关](#项目相关) + +## 快速开始 + +1. 将注释添加到API源代码中,请参阅声明性注释格式。 +2. 使用如下命令下载swag: + +```bash +go install github.com/swaggo/swag/cmd/swag@latest +``` + +从源码开始构建的话,需要有Go环境(1.19及以上版本)。 + +或者从github的release页面下载预编译好的二进制文件。 + +3. 在包含`main.go`文件的项目根目录运行`swag init`。这将会解析注释并生成需要的文件(`docs`文件夹和`docs/docs.go`)。 + +```bash +swag init +``` + +确保导入了生成的`docs/docs.go`文件,这样特定的配置文件才会被初始化。如果通用API注释没有写在`main.go`中,可以使用`-g`标识符来告知swag。 + +```bash +swag init -g http/api.go +``` + +4. (可选) 使用`fmt`格式化 SWAG 注释。(请先升级到最新版本) + +```bash +swag fmt +``` + +## swag cli + +```bash +swag init -h +NAME: + swag init - Create docs.go + +USAGE: + swag init [command options] [arguments...] + +OPTIONS: + --generalInfo value, -g value API通用信息所在的go源文件路径,如果是相对路径则基于API解析目录 (默认: "main.go") + --dir value, -d value API解析目录 (默认: "./") + --exclude value 解析扫描时排除的目录,多个目录可用逗号分隔(默认:空) + --propertyStrategy value, -p value 结构体字段命名规则,三种:snakecase,camelcase,pascalcase (默认: "camelcase") + --output value, -o value 文件(swagger.json, swagger.yaml and doc.go)输出目录 (默认: "./docs") + --parseVendor 是否解析vendor目录里的go源文件,默认不 + --parseDependency 是否解析依赖目录中的go源文件,默认不 + --parseDependencyLevel, --pdl 对'--parseDependency'参数进行增强, 是否解析依赖目录中的go源文件, 0 不解析, 1 只解析对象模型, 2 只解析API, 3 对象模型和API都解析 (default: 0) + --markdownFiles value, --md value 指定API的描述信息所使用的markdown文件所在的目录 + --generatedTime 是否输出时间到输出文件docs.go的顶部,默认是 + --codeExampleFiles value, --cef value 解析包含用于 x-codeSamples 扩展的代码示例文件的文件夹,默认禁用 + --parseInternal 解析 internal 包中的go文件,默认禁用 + --parseDepth value 依赖解析深度 (默认: 100) + --instanceName value 设置文档实例名 (默认: "swagger") +``` + +```bash +swag fmt -h +NAME: + swag fmt - format swag comments + +USAGE: + swag fmt [command options] [arguments...] + +OPTIONS: + --dir value, -d value API解析目录 (默认: "./") + --exclude value 解析扫描时排除的目录,多个目录可用逗号分隔(默认:空) + --generalInfo value, -g value API通用信息所在的go源文件路径,如果是相对路径则基于API解析目录 (默认: "main.go") + --help, -h show help (default: false) + +``` + +## 支持的Web框架 + +- [gin](http://github.com/swaggo/gin-swagger) +- [echo](http://github.com/swaggo/echo-swagger) +- [buffalo](https://github.com/swaggo/buffalo-swagger) +- [net/http](https://github.com/swaggo/http-swagger) +- [gorilla/mux](https://github.com/swaggo/http-swagger) +- [go-chi/chi](https://github.com/swaggo/http-swagger) +- [flamingo](https://github.com/i-love-flamingo/swagger) +- [fiber](https://github.com/gofiber/swagger) +- [atreugo](https://github.com/Nerzal/atreugo-swagger) +- [hertz](https://github.com/hertz-contrib/swagger) + +## 如何与Gin集成 + +[点击此处](https://github.com/swaggo/swag/tree/master/example/celler)查看示例源代码。 + +1. 使用`swag init`生成Swagger2.0文档后,导入如下代码包: + +```go +import "github.com/swaggo/gin-swagger" // gin-swagger middleware +import "github.com/swaggo/files" // swagger embed files +``` + +2. 在`main.go`源代码中添加通用的API注释: + +```go +// @title Swagger Example API +// @version 1.0 +// @description This is a sample server celler server. +// @termsOfService http://swagger.io/terms/ + +// @contact.name API Support +// @contact.url http://www.swagger.io/support +// @contact.email support@swagger.io + +// @license.name Apache 2.0 +// @license.url http://www.apache.org/licenses/LICENSE-2.0.html + +// @host localhost:8080 +// @BasePath /api/v1 + +// @securityDefinitions.basic BasicAuth + +// @externalDocs.description OpenAPI +// @externalDocs.url https://swagger.io/resources/open-api/ +func main() { + r := gin.Default() + + c := controller.NewController() + + v1 := r.Group("/api/v1") + { + accounts := v1.Group("/accounts") + { + accounts.GET(":id", c.ShowAccount) + accounts.GET("", c.ListAccounts) + accounts.POST("", c.AddAccount) + accounts.DELETE(":id", c.DeleteAccount) + accounts.PATCH(":id", c.UpdateAccount) + accounts.POST(":id/images", c.UploadAccountImage) + } + //... + } + r.GET("/swagger/*any", ginSwagger.WrapHandler(swaggerFiles.Handler)) + r.Run(":8080") +} +//... +``` + +此外,可以动态设置一些通用的API信息。生成的代码包`docs`导出`SwaggerInfo`变量,使用该变量可以通过编码的方式设置标题、描述、版本、主机和基础路径。使用Gin的示例: + +```go +package main + +import ( + "github.com/gin-gonic/gin" + "github.com/swaggo/files" + "github.com/swaggo/gin-swagger" + + "./docs" // docs is generated by Swag CLI, you have to import it. +) + +// @contact.name API Support +// @contact.url http://www.swagger.io/support +// @contact.email support@swagger.io + +// @license.name Apache 2.0 +// @license.url http://www.apache.org/licenses/LICENSE-2.0.html +func main() { + + // programatically set swagger info + docs.SwaggerInfo.Title = "Swagger Example API" + docs.SwaggerInfo.Description = "This is a sample server Petstore server." + docs.SwaggerInfo.Version = "1.0" + docs.SwaggerInfo.Host = "petstore.swagger.io" + docs.SwaggerInfo.BasePath = "/v2" + docs.SwaggerInfo.Schemes = []string{"http", "https"} + + r := gin.New() + + // use ginSwagger middleware to serve the API docs + r.GET("/swagger/*any", ginSwagger.WrapHandler(swaggerFiles.Handler)) + + r.Run() +} +``` + +3. 在`controller`代码中添加API操作注释: + +```go +package controller + +import ( + "fmt" + "net/http" + "strconv" + + "github.com/gin-gonic/gin" + "github.com/swaggo/swag/example/celler/httputil" + "github.com/swaggo/swag/example/celler/model" +) + +// ShowAccount godoc +// @Summary Show an account +// @Description get string by ID +// @Tags accounts +// @Accept json +// @Produce json +// @Param id path int true "Account ID" +// @Success 200 {object} model.Account +// @Failure 400 {object} httputil.HTTPError +// @Failure 404 {object} httputil.HTTPError +// @Failure 500 {object} httputil.HTTPError +// @Router /accounts/{id} [get] +func (c *Controller) ShowAccount(ctx *gin.Context) { + id := ctx.Param("id") + aid, err := strconv.Atoi(id) + if err != nil { + httputil.NewError(ctx, http.StatusBadRequest, err) + return + } + account, err := model.AccountOne(aid) + if err != nil { + httputil.NewError(ctx, http.StatusNotFound, err) + return + } + ctx.JSON(http.StatusOK, account) +} + +// ListAccounts godoc +// @Summary List accounts +// @Description get accounts +// @Tags accounts +// @Accept json +// @Produce json +// @Param q query string false "name search by q" Format(email) +// @Success 200 {array} model.Account +// @Failure 400 {object} httputil.HTTPError +// @Failure 404 {object} httputil.HTTPError +// @Failure 500 {object} httputil.HTTPError +// @Router /accounts [get] +func (c *Controller) ListAccounts(ctx *gin.Context) { + q := ctx.Request.URL.Query().Get("q") + accounts, err := model.AccountsAll(q) + if err != nil { + httputil.NewError(ctx, http.StatusNotFound, err) + return + } + ctx.JSON(http.StatusOK, accounts) +} +//... +``` + +```bash +swag init +``` + +4. 运行程序,然后在浏览器中访问 http://localhost:8080/swagger/index.html 。将看到Swagger 2.0 Api文档,如下所示: + +![swagger_index.html](https://raw.githubusercontent.com/swaggo/swag/master/assets/swagger-image.png) + +## 格式化说明 + +可以针对Swag的注释自动格式化,就像`go fmt`。 +此处查看格式化结果 [here](https://github.com/swaggo/swag/tree/master/example/celler). + +示例: +```shell +swag fmt +``` + +排除目录(不扫描)示例: +```shell +swag fmt -d ./ --exclude ./internal +``` + +## 开发现状 + +[Swagger 2.0 文档](https://swagger.io/docs/specification/2-0/basic-structure/) + +- [x] Basic Structure +- [x] API Host and Base Path +- [x] Paths and Operations +- [x] Describing Parameters +- [x] Describing Request Body +- [x] Describing Responses +- [x] MIME Types +- [x] Authentication + - [x] Basic Authentication + - [x] API Keys +- [x] Adding Examples +- [x] File Upload +- [x] Enums +- [x] Grouping Operations With Tags +- [ ] Swagger Extensions + +## 声明式注释格式 + +## 通用API信息 + +**示例** [`celler/main.go`](https://github.com/swaggo/swag/blob/master/example/celler/main.go) + +| 注释 | 说明 | 示例 | +| ----------------------- | ----------------------------------------------------------------------------------------------- | --------------------------------------------------------------- | +| title | **必填** 应用程序的名称。 | // @title Swagger Example API | +| version | **必填** 提供应用程序API的版本。 | // @version 1.0 | +| description | 应用程序的简短描述。 | // @description This is a sample server celler server. | +| tag.name | 标签的名称。 | // @tag.name This is the name of the tag | +| tag.description | 标签的描述。 | // @tag.description Cool Description | +| tag.docs.url | 标签的外部文档的URL。 | // @tag.docs.url https://example.com | +| tag.docs.description | 标签的外部文档说明。 | // @tag.docs.description Best example documentation | +| termsOfService | API的服务条款。 | // @termsOfService http://swagger.io/terms/ | +| contact.name | 公开的API的联系信息。 | // @contact.name API Support | +| contact.url | 联系信息的URL。 必须采用网址格式。 | // @contact.url http://www.swagger.io/support | +| contact.email | 联系人/组织的电子邮件地址。 必须采用电子邮件地址的格式。 | // @contact.email support@swagger.io | +| license.name | **必填** 用于API的许可证名称。 | // @license.name Apache 2.0 | +| license.url | 用于API的许可证的URL。 必须采用网址格式。 | // @license.url http://www.apache.org/licenses/LICENSE-2.0.html | +| host | 运行API的主机(主机名或IP地址)。 | // @host localhost:8080 | +| BasePath | 运行API的基本路径。 | // @BasePath /api/v1 | +| accept | API 可以使用的 MIME 类型列表。 请注意,Accept 仅影响具有请求正文的操作,例如 POST、PUT 和 PATCH。 值必须如“[Mime类型](#mime类型)”中所述。 | // @accept json | +| produce | API可以生成的MIME类型的列表。值必须如“[Mime类型](#mime类型)”中所述。 | // @produce json | +| query.collection.format | 请求URI query里数组参数的默认格式:csv,multi,pipes,tsv,ssv。 如果未设置,则默认为csv。 | // @query.collection.format multi | +| schemes | 用空格分隔的请求的传输协议。 | // @schemes http https | +| externalDocs.description | Description of the external document. | // @externalDocs.description OpenAPI | +| externalDocs.url | URL of the external document. | // @externalDocs.url https://swagger.io/resources/open-api/ | +| x-name | 扩展的键必须以x-开头,并且只能使用json值 | // @x-example-key {"key": "value"} | + +### 使用Markdown描述 + +如果文档中的短字符串不足以完整表达,或者需要展示图片,代码示例等类似的内容,则可能需要使用Markdown描述。要使用Markdown描述,请使用一下注释。 + +| 注释 | 说明 | 示例 | +| ------------------------ | ------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------- | +| title | **必填** 应用程序的名称。 | // @title Swagger Example API | +| version | **必填** 提供应用程序API的版本。 | // @version 1.0 | +| description.markdown | 应用程序的简短描述。 从`api.md`文件中解析。 这是`@description`的替代用法。 | // @description.markdown No value needed, this parses the description from api.md | +| tag.name | 标签的名称。 | // @tag.name This is the name of the tag | +| tag.description.markdown | 标签说明,这是`tag.description`的替代用法。 该描述将从名为`tagname.md的`文件中读取。 | // @tag.description.markdown | + +## API操作 + +Example [celler/controller](https://github.com/swaggo/swag/tree/master/example/celler/controller) + +| 注释 | 描述 | +|----------------------|------------------------------------------------------------------------------------------------| +| description | 操作行为的详细说明。 | +| description.markdown | 应用程序的简短描述。该描述将从名为`endpointname.md`的文件中读取。 | +| id | 用于标识操作的唯一字符串。在所有API操作中必须唯一。 | +| tags | 每个API操作的标签列表,以逗号分隔。 | +| summary | 该操作的简短摘要。 | +| accept | API 可以使用的 MIME 类型列表。 请注意,Accept 仅影响具有请求正文的操作,例如 POST、PUT 和 PATCH。 值必须如“[Mime类型](#mime类型)”中所述。 | +| produce | API可以生成的MIME类型的列表。值必须如“[Mime类型](#mime类型)”中所述。 | +| param | 用空格分隔的参数。`param name`,`param type`,`data type`,`is mandatory?`,`comment` `attribute(optional)` | +| security | 每个API操作的[安全性](#安全性)。 | +| success | 以空格分隔的成功响应。`return code`,`{param type}`,`data type`,`comment` | +| failure | 以空格分隔的故障响应。`return code`,`{param type}`,`data type`,`comment` | +| response | 与success、failure作用相同 | +| header | 以空格分隔的头字段。 `return code`,`{param type}`,`data type`,`comment` | +| router | 以空格分隔的路径定义。 `path`,`[httpMethod]` | +| deprecatedrouter | 与router相同,但是是deprecated的。 | +| x-name | 扩展字段必须以`x-`开头,并且只能使用json值。 | +| deprecated | 将当前API操作的所有路径设置为deprecated | + +## Mime类型 + +`swag` 接受所有格式正确的MIME类型, 即使匹配 `*/*`。除此之外,`swag`还接受某些MIME类型的别名,如下所示: + +| Alias | MIME Type | +| --------------------- | --------------------------------- | +| json | application/json | +| xml | text/xml | +| plain | text/plain | +| html | text/html | +| mpfd | multipart/form-data | +| x-www-form-urlencoded | application/x-www-form-urlencoded | +| json-api | application/vnd.api+json | +| json-stream | application/x-json-stream | +| octet-stream | application/octet-stream | +| png | image/png | +| jpeg | image/jpeg | +| gif | image/gif | + +## 参数类型 + +- query +- path +- header +- body +- formData + +## 数据类型 + +- string (string) +- integer (int, uint, uint32, uint64) +- number (float32) +- boolean (bool) +- user defined struct + +## 安全性 + +| 注释 | 描述 | 参数 | 示例 | +| -------------------------------------- | --------------------------------------------------------------------------------------------- | --------------------------------- | ------------------------------------------------------------ | +| securitydefinitions.basic | [Basic](https://swagger.io/docs/specification/2-0/authentication/basic-authentication/) auth. | | // @securityDefinitions.basic BasicAuth | +| securitydefinitions.apikey | [API key](https://swagger.io/docs/specification/2-0/authentication/api-keys/) auth. | in, name | // @securityDefinitions.apikey ApiKeyAuth | +| securitydefinitions.oauth2.application | [OAuth2 application](https://swagger.io/docs/specification/authentication/oauth2/) auth. | tokenUrl, scope | // @securitydefinitions.oauth2.application OAuth2Application | +| securitydefinitions.oauth2.implicit | [OAuth2 implicit](https://swagger.io/docs/specification/authentication/oauth2/) auth. | authorizationUrl, scope | // @securitydefinitions.oauth2.implicit OAuth2Implicit | +| securitydefinitions.oauth2.password | [OAuth2 password](https://swagger.io/docs/specification/authentication/oauth2/) auth. | tokenUrl, scope | // @securitydefinitions.oauth2.password OAuth2Password | +| securitydefinitions.oauth2.accessCode | [OAuth2 access code](https://swagger.io/docs/specification/authentication/oauth2/) auth. | tokenUrl, authorizationUrl, scope | // @securitydefinitions.oauth2.accessCode OAuth2AccessCode | + +| 参数注释 | 示例 | +| ---------------- | -------------------------------------------------------- | +| in | // @in header | +| name | // @name Authorization | +| tokenUrl | // @tokenUrl https://example.com/oauth/token | +| authorizationurl | // @authorizationurl https://example.com/oauth/authorize | +| scope.hoge | // @scope.write Grants write access | + +## 属性 + +```go +// @Param enumstring query string false "string enums" Enums(A, B, C) +// @Param enumint query int false "int enums" Enums(1, 2, 3) +// @Param enumnumber query number false "int enums" Enums(1.1, 1.2, 1.3) +// @Param string query string false "string valid" minlength(5) maxlength(10) +// @Param int query int false "int valid" minimum(1) maximum(10) +// @Param default query string false "string default" default(A) +// @Param collection query []string false "string collection" collectionFormat(multi) +// @Param extensions query []string false "string collection" extensions(x-example=test,x-nullable) +``` + +也适用于结构体字段: + +```go +type Foo struct { + Bar string `minLength:"4" maxLength:"16"` + Baz int `minimum:"10" maximum:"20" default:"15"` + Qux []string `enums:"foo,bar,baz"` +} +``` + +### 当前可用的 + +| 字段名 | 类型 | 描述 | +| ---------------- | --------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| default | * | 声明如果未提供任何参数,则服务器将使用的默认参数值,例如,如果请求中的客户端未提供该参数,则用于控制每页结果数的“计数”可能默认为100。 (注意:“default”对于必需的参数没有意义)。参看 https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-6.2。 与JSON模式不同,此值务必符合此参数的定义[类型](#parameterType)。 | +| maximum | `number` | 参看 https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.1.2. | +| minimum | `number` | 参看 https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.1.3. | +| maxLength | `integer` | 参看 https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.2.1. | +| minLength | `integer` | 参看 https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.2.2. | +| enums | [\*] | 参看 https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.5.1. | +| format | `string` | 上面提到的[类型](#parameterType)的扩展格式。有关更多详细信息,请参见[数据类型格式](https://swagger.io/specification/v2/#dataTypeFormat)。 | +| collectionFormat | `string` | 指定query数组参数的格式。 可能的值为:
  • `csv` - 逗号分隔值 `foo,bar`.
  • `ssv` - 空格分隔值 `foo bar`.
  • `tsv` - 制表符分隔值 `foo\tbar`.
  • `pipes` - 管道符分隔值 foo|bar.
  • `multi` - 对应于多个参数实例,而不是单个实例 `foo=bar&foo=baz` 的多个值。这仅对“`query`”或“`formData`”中的参数有效。
默认值是 `csv`。 | + +### 进一步的 + +| 字段名 | 类型 | 描述 | +| ----------- | :-------: | ---------------------------------------------------------------------------------- | +| multipleOf | `number` | See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.1.1. | +| pattern | `string` | See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.2.3. | +| maxItems | `integer` | See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.3.2. | +| minItems | `integer` | See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.3.3. | +| uniqueItems | `boolean` | See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.3.4. | + +## 样例 + +### 多行的描述 + +可以在常规api描述或路由定义中添加跨越多行的描述,如下所示: + +```go +// @description This is the first line +// @description This is the second line +// @description And so forth. +``` + +### 用户自定义的具有数组类型的结构 + +```go +// @Success 200 {array} model.Account <-- This is a user defined struct. +``` + +```go +package model + +type Account struct { + ID int `json:"id" example:"1"` + Name string `json:"name" example:"account name"` +} +``` + +### 响应对象中的模型组合 + +```go +// JSONResult的data字段类型将被proto.Order类型替换 +@success 200 {object} jsonresult.JSONResult{data=proto.Order} "desc" +``` + +```go +type JSONResult struct { + Code int `json:"code" ` + Message string `json:"message"` + Data interface{} `json:"data"` +} + +type Order struct { //in `proto` package + ... +} +``` + +- 还支持对象数组和原始类型作为嵌套响应 + +```go +@success 200 {object} jsonresult.JSONResult{data=[]proto.Order} "desc" +@success 200 {object} jsonresult.JSONResult{data=string} "desc" +@success 200 {object} jsonresult.JSONResult{data=[]string} "desc" +``` + +- 替换多个字段的类型。如果某字段不存在,将添加该字段。 + +```go +@success 200 {object} jsonresult.JSONResult{data1=string,data2=[]string,data3=proto.Order,data4=[]proto.Order} "desc" +``` + +### 在响应中增加头字段 + +```go +// @Success 200 {string} string "ok" +// @failure 400 {string} string "error" +// @response default {string} string "other error" +// @Header 200 {string} Location "/entity/1" +// @Header 200,400,default {string} Token "token" +// @Header all {string} Token2 "token2" +``` + +### 使用多路径参数 + +```go +/// ... +// @Param group_id path int true "Group ID" +// @Param account_id path int true "Account ID" +// ... +// @Router /examples/groups/{group_id}/accounts/{account_id} [get] +``` + +### 结构体的示例值 + +```go +type Account struct { + ID int `json:"id" example:"1"` + Name string `json:"name" example:"account name"` + PhotoUrls []string `json:"photo_urls" example:"http://test/image/1.jpg,http://test/image/2.jpg"` +} +``` + +### 结构体描述 + +```go +type Account struct { + // ID this is userid + ID int `json:"id"` + Name string `json:"name"` // This is Name +} +``` + +### 使用`swaggertype`标签更改字段类型 + +[#201](https://github.com/swaggo/swag/issues/201#issuecomment-475479409) + +```go +type TimestampTime struct { + time.Time +} + +///实现encoding.JSON.Marshaler接口 +func (t *TimestampTime) MarshalJSON() ([]byte, error) { + bin := make([]byte, 16) + bin = strconv.AppendInt(bin[:0], t.Time.Unix(), 10) + return bin, nil +} + +///实现encoding.JSON.Unmarshaler接口 +func (t *TimestampTime) UnmarshalJSON(bin []byte) error { + v, err := strconv.ParseInt(string(bin), 10, 64) + if err != nil { + return err + } + t.Time = time.Unix(v, 0) + return nil +} +/// + +type Account struct { + // 使用`swaggertype`标签将别名类型更改为内置类型integer + ID sql.NullInt64 `json:"id" swaggertype:"integer"` + + // 使用`swaggertype`标签更改struct类型为内置类型integer + RegisterTime TimestampTime `json:"register_time" swaggertype:"primitive,integer"` + + // Array types can be overridden using "array," format + Coeffs []big.Float `json:"coeffs" swaggertype:"array,number"` +} +``` + +[#379](https://github.com/swaggo/swag/issues/379) + +```go +type CerticateKeyPair struct { + Crt []byte `json:"crt" swaggertype:"string" format:"base64" example:"U3dhZ2dlciByb2Nrcw=="` + Key []byte `json:"key" swaggertype:"string" format:"base64" example:"U3dhZ2dlciByb2Nrcw=="` +} +``` + +生成的swagger文档如下: + +```go +"api.MyBinding": { + "type":"object", + "properties":{ + "crt":{ + "type":"string", + "format":"base64", + "example":"U3dhZ2dlciByb2Nrcw==" + }, + "key":{ + "type":"string", + "format":"base64", + "example":"U3dhZ2dlciByb2Nrcw==" + } + } +} +``` + +### 使用`swaggerignore`标签排除字段 + +```go +type Account struct { + ID string `json:"id"` + Name string `json:"name"` + Ignored int `swaggerignore:"true"` +} +``` + +### 将扩展信息添加到结构字段 + +```go +type Account struct { + ID string `json:"id" extensions:"x-nullable,x-abc=def,!x-omitempty"` // 扩展字段必须以"x-"开头 +} +``` + +生成swagger文档,如下所示: + +```go +"Account": { + "type": "object", + "properties": { + "id": { + "type": "string", + "x-nullable": true, + "x-abc": "def", + "x-omitempty": false + } + } +} +``` + +### 对展示的模型重命名 + +```go +type Resp struct { + Code int +}//@name Response +``` + +### 如何使用安全性注释 + +通用API信息。 + +```go +// @securityDefinitions.basic BasicAuth + +// @securitydefinitions.oauth2.application OAuth2Application +// @tokenUrl https://example.com/oauth/token +// @scope.write Grants write access +// @scope.admin Grants read and write access to administrative information +``` + +每个API操作。 + +```go +// @Security ApiKeyAuth +``` + +使用AND条件。 + +```go +// @Security ApiKeyAuth +// @Security OAuth2Application[write, admin] +``` + +## 项目相关 + +This project was inspired by [yvasiyarov/swagger](https://github.com/yvasiyarov/swagger) but we simplified the usage and added support a variety of [web frameworks](#supported-web-frameworks). Gopher image source is [tenntenn/gopher-stickers](https://github.com/tenntenn/gopher-stickers). It has licenses [creative commons licensing](http://creativecommons.org/licenses/by/3.0/deed.en). + +## 贡献者 + +This project exists thanks to all the people who contribute. [[Contribute](CONTRIBUTING.md)]. + + +## 支持者 + +Thank you to all our backers! 🙏 [[Become a backer](https://opencollective.com/swag#backer)] + + + +## 赞助商 + +Support this project by becoming a sponsor. Your logo will show up here with a link to your website. [[Become a sponsor](https://opencollective.com/swag#sponsor)] + + + + + + + + + + + + +## License + +[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fswaggo%2Fswag.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fswaggo%2Fswag?ref=badge_large) diff --git a/vendor/github.com/swaggo/swag/v2/const.go b/vendor/github.com/swaggo/swag/v2/const.go new file mode 100644 index 00000000..83755103 --- /dev/null +++ b/vendor/github.com/swaggo/swag/v2/const.go @@ -0,0 +1,567 @@ +package swag + +import ( + "go/ast" + "go/token" + "reflect" + "strconv" + "strings" + "unicode/utf8" +) + +// ConstVariable a model to record a const variable +type ConstVariable struct { + Name *ast.Ident + Type ast.Expr + Value interface{} + Comment *ast.CommentGroup + File *ast.File + Pkg *PackageDefinitions +} + +var escapedChars = map[uint8]uint8{ + 'n': '\n', + 'r': '\r', + 't': '\t', + 'v': '\v', + '\\': '\\', + '"': '"', +} + +// EvaluateEscapedChar parse escaped character +func EvaluateEscapedChar(text string) rune { + if len(text) == 1 { + return rune(text[0]) + } + + if len(text) == 2 && text[0] == '\\' { + return rune(escapedChars[text[1]]) + } + + if len(text) == 6 && text[0:2] == "\\u" { + n, err := strconv.ParseInt(text[2:], 16, 32) + if err == nil { + return rune(n) + } + } + + return 0 +} + +// EvaluateEscapedString parse escaped characters in string +func EvaluateEscapedString(text string) string { + if !strings.ContainsRune(text, '\\') { + return text + } + result := make([]byte, 0, len(text)) + for i := 0; i < len(text); i++ { + if text[i] == '\\' { + i++ + if text[i] == 'u' { + i++ + char, err := strconv.ParseInt(text[i:i+4], 16, 32) + if err == nil { + result = utf8.AppendRune(result, rune(char)) + } + i += 3 + } else if c, ok := escapedChars[text[i]]; ok { + result = append(result, c) + } + } else { + result = append(result, text[i]) + } + } + return string(result) +} + +// EvaluateDataConversion evaluate the type a explicit type conversion +func EvaluateDataConversion(x interface{}, typeName string) interface{} { + switch value := x.(type) { + case int: + switch typeName { + case "int": + return int(value) + case "byte": + return byte(value) + case "int8": + return int8(value) + case "int16": + return int16(value) + case "int32": + return int32(value) + case "int64": + return int64(value) + case "uint": + return uint(value) + case "uint8": + return uint8(value) + case "uint16": + return uint16(value) + case "uint32": + return uint32(value) + case "uint64": + return uint64(value) + case "rune": + return rune(value) + } + case uint: + switch typeName { + case "int": + return int(value) + case "byte": + return byte(value) + case "int8": + return int8(value) + case "int16": + return int16(value) + case "int32": + return int32(value) + case "int64": + return int64(value) + case "uint": + return uint(value) + case "uint8": + return uint8(value) + case "uint16": + return uint16(value) + case "uint32": + return uint32(value) + case "uint64": + return uint64(value) + case "rune": + return rune(value) + } + case int8: + switch typeName { + case "int": + return int(value) + case "byte": + return byte(value) + case "int8": + return int8(value) + case "int16": + return int16(value) + case "int32": + return int32(value) + case "int64": + return int64(value) + case "uint": + return uint(value) + case "uint8": + return uint8(value) + case "uint16": + return uint16(value) + case "uint32": + return uint32(value) + case "uint64": + return uint64(value) + case "rune": + return rune(value) + } + case uint8: + switch typeName { + case "int": + return int(value) + case "byte": + return byte(value) + case "int8": + return int8(value) + case "int16": + return int16(value) + case "int32": + return int32(value) + case "int64": + return int64(value) + case "uint": + return uint(value) + case "uint8": + return uint8(value) + case "uint16": + return uint16(value) + case "uint32": + return uint32(value) + case "uint64": + return uint64(value) + case "rune": + return rune(value) + } + case int16: + switch typeName { + case "int": + return int(value) + case "byte": + return byte(value) + case "int8": + return int8(value) + case "int16": + return int16(value) + case "int32": + return int32(value) + case "int64": + return int64(value) + case "uint": + return uint(value) + case "uint8": + return uint8(value) + case "uint16": + return uint16(value) + case "uint32": + return uint32(value) + case "uint64": + return uint64(value) + case "rune": + return rune(value) + } + case uint16: + switch typeName { + case "int": + return int(value) + case "byte": + return byte(value) + case "int8": + return int8(value) + case "int16": + return int16(value) + case "int32": + return int32(value) + case "int64": + return int64(value) + case "uint": + return uint(value) + case "uint8": + return uint8(value) + case "uint16": + return uint16(value) + case "uint32": + return uint32(value) + case "uint64": + return uint64(value) + case "rune": + return rune(value) + } + case int32: + switch typeName { + case "int": + return int(value) + case "byte": + return byte(value) + case "int8": + return int8(value) + case "int16": + return int16(value) + case "int32": + return int32(value) + case "int64": + return int64(value) + case "uint": + return uint(value) + case "uint8": + return uint8(value) + case "uint16": + return uint16(value) + case "uint32": + return uint32(value) + case "uint64": + return uint64(value) + case "rune": + return rune(value) + case "string": + return string(value) + } + case uint32: + switch typeName { + case "int": + return int(value) + case "byte": + return byte(value) + case "int8": + return int8(value) + case "int16": + return int16(value) + case "int32": + return int32(value) + case "int64": + return int64(value) + case "uint": + return uint(value) + case "uint8": + return uint8(value) + case "uint16": + return uint16(value) + case "uint32": + return uint32(value) + case "uint64": + return uint64(value) + case "rune": + return rune(value) + } + case int64: + switch typeName { + case "int": + return int(value) + case "byte": + return byte(value) + case "int8": + return int8(value) + case "int16": + return int16(value) + case "int32": + return int32(value) + case "int64": + return int64(value) + case "uint": + return uint(value) + case "uint8": + return uint8(value) + case "uint16": + return uint16(value) + case "uint32": + return uint32(value) + case "uint64": + return uint64(value) + case "rune": + return rune(value) + } + case uint64: + switch typeName { + case "int": + return int(value) + case "byte": + return byte(value) + case "int8": + return int8(value) + case "int16": + return int16(value) + case "int32": + return int32(value) + case "int64": + return int64(value) + case "uint": + return uint(value) + case "uint8": + return uint8(value) + case "uint16": + return uint16(value) + case "uint32": + return uint32(value) + case "uint64": + return uint64(value) + case "rune": + return rune(value) + } + case string: + switch typeName { + case "string": + return value + } + } + return nil +} + +// EvaluateUnary evaluate the type and value of a unary expression +func EvaluateUnary(x interface{}, operator token.Token, xtype ast.Expr) (interface{}, ast.Expr) { + switch operator { + case token.SUB: + switch value := x.(type) { + case int: + return -value, xtype + case int8: + return -value, xtype + case int16: + return -value, xtype + case int32: + return -value, xtype + case int64: + return -value, xtype + } + case token.XOR: + switch value := x.(type) { + case int: + return ^value, xtype + case int8: + return ^value, xtype + case int16: + return ^value, xtype + case int32: + return ^value, xtype + case int64: + return ^value, xtype + case uint: + return ^value, xtype + case uint8: + return ^value, xtype + case uint16: + return ^value, xtype + case uint32: + return ^value, xtype + case uint64: + return ^value, xtype + } + } + return nil, nil +} + +// EvaluateBinary evaluate the type and value of a binary expression +func EvaluateBinary(x, y interface{}, operator token.Token, xtype, ytype ast.Expr) (interface{}, ast.Expr) { + if operator == token.SHR || operator == token.SHL { + var rightOperand uint64 + yValue := reflect.ValueOf(y) + if yValue.CanUint() { + rightOperand = yValue.Uint() + } else if yValue.CanInt() { + rightOperand = uint64(yValue.Int()) + } + + switch operator { + case token.SHL: + switch xValue := x.(type) { + case int: + return xValue << rightOperand, xtype + case int8: + return xValue << rightOperand, xtype + case int16: + return xValue << rightOperand, xtype + case int32: + return xValue << rightOperand, xtype + case int64: + return xValue << rightOperand, xtype + case uint: + return xValue << rightOperand, xtype + case uint8: + return xValue << rightOperand, xtype + case uint16: + return xValue << rightOperand, xtype + case uint32: + return xValue << rightOperand, xtype + case uint64: + return xValue << rightOperand, xtype + } + case token.SHR: + switch xValue := x.(type) { + case int: + return xValue >> rightOperand, xtype + case int8: + return xValue >> rightOperand, xtype + case int16: + return xValue >> rightOperand, xtype + case int32: + return xValue >> rightOperand, xtype + case int64: + return xValue >> rightOperand, xtype + case uint: + return xValue >> rightOperand, xtype + case uint8: + return xValue >> rightOperand, xtype + case uint16: + return xValue >> rightOperand, xtype + case uint32: + return xValue >> rightOperand, xtype + case uint64: + return xValue >> rightOperand, xtype + } + } + return nil, nil + } + + evalType := xtype + if evalType == nil { + evalType = ytype + } + + xValue := reflect.ValueOf(x) + yValue := reflect.ValueOf(y) + if xValue.Kind() == reflect.String && yValue.Kind() == reflect.String { + return xValue.String() + yValue.String(), evalType + } + + var targetValue reflect.Value + if xValue.Kind() != reflect.Int { + targetValue = reflect.New(xValue.Type()).Elem() + } else { + targetValue = reflect.New(yValue.Type()).Elem() + } + + switch operator { + case token.ADD: + if xValue.CanInt() && yValue.CanInt() { + targetValue.SetInt(xValue.Int() + yValue.Int()) + } else if xValue.CanUint() && yValue.CanUint() { + targetValue.SetUint(xValue.Uint() + yValue.Uint()) + } else if xValue.CanInt() && yValue.CanUint() { + targetValue.SetUint(uint64(xValue.Int()) + yValue.Uint()) + } else if xValue.CanUint() && yValue.CanInt() { + targetValue.SetUint(xValue.Uint() + uint64(yValue.Int())) + } + case token.SUB: + if xValue.CanInt() && yValue.CanInt() { + targetValue.SetInt(xValue.Int() - yValue.Int()) + } else if xValue.CanUint() && yValue.CanUint() { + targetValue.SetUint(xValue.Uint() - yValue.Uint()) + } else if xValue.CanInt() && yValue.CanUint() { + targetValue.SetUint(uint64(xValue.Int()) - yValue.Uint()) + } else if xValue.CanUint() && yValue.CanInt() { + targetValue.SetUint(xValue.Uint() - uint64(yValue.Int())) + } + case token.MUL: + if xValue.CanInt() && yValue.CanInt() { + targetValue.SetInt(xValue.Int() * yValue.Int()) + } else if xValue.CanUint() && yValue.CanUint() { + targetValue.SetUint(xValue.Uint() * yValue.Uint()) + } else if xValue.CanInt() && yValue.CanUint() { + targetValue.SetUint(uint64(xValue.Int()) * yValue.Uint()) + } else if xValue.CanUint() && yValue.CanInt() { + targetValue.SetUint(xValue.Uint() * uint64(yValue.Int())) + } + case token.QUO: + if xValue.CanInt() && yValue.CanInt() { + targetValue.SetInt(xValue.Int() / yValue.Int()) + } else if xValue.CanUint() && yValue.CanUint() { + targetValue.SetUint(xValue.Uint() / yValue.Uint()) + } else if xValue.CanInt() && yValue.CanUint() { + targetValue.SetUint(uint64(xValue.Int()) / yValue.Uint()) + } else if xValue.CanUint() && yValue.CanInt() { + targetValue.SetUint(xValue.Uint() / uint64(yValue.Int())) + } + case token.REM: + if xValue.CanInt() && yValue.CanInt() { + targetValue.SetInt(xValue.Int() % yValue.Int()) + } else if xValue.CanUint() && yValue.CanUint() { + targetValue.SetUint(xValue.Uint() % yValue.Uint()) + } else if xValue.CanInt() && yValue.CanUint() { + targetValue.SetUint(uint64(xValue.Int()) % yValue.Uint()) + } else if xValue.CanUint() && yValue.CanInt() { + targetValue.SetUint(xValue.Uint() % uint64(yValue.Int())) + } + case token.AND: + if xValue.CanInt() && yValue.CanInt() { + targetValue.SetInt(xValue.Int() & yValue.Int()) + } else if xValue.CanUint() && yValue.CanUint() { + targetValue.SetUint(xValue.Uint() & yValue.Uint()) + } else if xValue.CanInt() && yValue.CanUint() { + targetValue.SetUint(uint64(xValue.Int()) & yValue.Uint()) + } else if xValue.CanUint() && yValue.CanInt() { + targetValue.SetUint(xValue.Uint() & uint64(yValue.Int())) + } + case token.OR: + if xValue.CanInt() && yValue.CanInt() { + targetValue.SetInt(xValue.Int() | yValue.Int()) + } else if xValue.CanUint() && yValue.CanUint() { + targetValue.SetUint(xValue.Uint() | yValue.Uint()) + } else if xValue.CanInt() && yValue.CanUint() { + targetValue.SetUint(uint64(xValue.Int()) | yValue.Uint()) + } else if xValue.CanUint() && yValue.CanInt() { + targetValue.SetUint(xValue.Uint() | uint64(yValue.Int())) + } + case token.XOR: + if xValue.CanInt() && yValue.CanInt() { + targetValue.SetInt(xValue.Int() ^ yValue.Int()) + } else if xValue.CanUint() && yValue.CanUint() { + targetValue.SetUint(xValue.Uint() ^ yValue.Uint()) + } else if xValue.CanInt() && yValue.CanUint() { + targetValue.SetUint(uint64(xValue.Int()) ^ yValue.Uint()) + } else if xValue.CanUint() && yValue.CanInt() { + targetValue.SetUint(xValue.Uint() ^ uint64(yValue.Int())) + } + } + return targetValue.Interface(), evalType +} diff --git a/vendor/github.com/swaggo/swag/v2/doc.go b/vendor/github.com/swaggo/swag/v2/doc.go new file mode 100644 index 00000000..b57f0f38 --- /dev/null +++ b/vendor/github.com/swaggo/swag/v2/doc.go @@ -0,0 +1,5 @@ +/* +Package swag converts Go annotations to Swagger Documentation 2.0. +See https://github.com/swaggo/swag for more information about swag. +*/ +package swag // import "github.com/swaggo/swag/v2" diff --git a/vendor/github.com/swaggo/swag/v2/enums.go b/vendor/github.com/swaggo/swag/v2/enums.go new file mode 100644 index 00000000..38658f20 --- /dev/null +++ b/vendor/github.com/swaggo/swag/v2/enums.go @@ -0,0 +1,13 @@ +package swag + +const ( + enumVarNamesExtension = "x-enum-varnames" + enumCommentsExtension = "x-enum-comments" +) + +// EnumValue a model to record an enum consts variable +type EnumValue struct { + key string + Value interface{} + Comment string +} diff --git a/vendor/github.com/swaggo/swag/v2/extensions.go b/vendor/github.com/swaggo/swag/v2/extensions.go new file mode 100644 index 00000000..358104ca --- /dev/null +++ b/vendor/github.com/swaggo/swag/v2/extensions.go @@ -0,0 +1,4 @@ +package swag + +// CodeSamples is used to parse code samples. +type CodeSamples []map[string]string diff --git a/vendor/github.com/swaggo/swag/v2/field_parser.go b/vendor/github.com/swaggo/swag/v2/field_parser.go new file mode 100644 index 00000000..5b8761fb --- /dev/null +++ b/vendor/github.com/swaggo/swag/v2/field_parser.go @@ -0,0 +1,703 @@ +package swag + +import ( + "fmt" + "go/ast" + "reflect" + "regexp" + "strconv" + "strings" + "sync" + "unicode" + + "github.com/go-openapi/spec" +) + +var _ FieldParser = &tagBaseFieldParser{p: nil, field: nil, tag: ""} + +const ( + requiredLabel = "required" + optionalLabel = "optional" + swaggerTypeTag = "swaggertype" + swaggerIgnoreTag = "swaggerignore" +) + +var _ FieldParser = &tagBaseFieldParser{} + +var _ FieldParser = &tagBaseFieldParser{} + +type tagBaseFieldParser struct { + p *Parser + field *ast.Field + tag reflect.StructTag +} + +func newTagBaseFieldParser(p *Parser, field *ast.Field) FieldParser { + fieldParser := tagBaseFieldParser{ + p: p, + field: field, + tag: "", + } + if fieldParser.field.Tag != nil { + fieldParser.tag = reflect.StructTag(strings.ReplaceAll(field.Tag.Value, "`", "")) + } + + return &fieldParser +} + +func (ps *tagBaseFieldParser) ShouldSkip() bool { + // Skip non-exported fields. + if ps.field.Names != nil && !ast.IsExported(ps.field.Names[0].Name) { + return true + } + + if ps.field.Tag == nil { + return false + } + + ignoreTag := ps.tag.Get(swaggerIgnoreTag) + if strings.EqualFold(ignoreTag, "true") { + return true + } + + // json:"tag,hoge" + name := strings.TrimSpace(strings.Split(ps.tag.Get(jsonTag), ",")[0]) + if name == "-" { + return true + } + + return false +} + +func (ps *tagBaseFieldParser) FieldNames() ([]string, error) { + if len(ps.field.Names) <= 1 { + // if embedded but with a json/form name ?? + if ps.field.Tag != nil { + // json:"tag,hoge" + name := strings.TrimSpace(strings.Split(ps.tag.Get(jsonTag), ",")[0]) + if name != "" { + return []string{name}, nil + } + + // use "form" tag over json tag + name = ps.FormName() + if name != "" { + return []string{name}, nil + } + } + if len(ps.field.Names) == 0 { + return nil, nil + } + } + var names = make([]string, 0, len(ps.field.Names)) + for _, name := range ps.field.Names { + switch ps.p.PropNamingStrategy { + case SnakeCase: + names = append(names, toSnakeCase(name.Name)) + case PascalCase: + names = append(names, name.Name) + default: + names = append(names, toLowerCamelCase(name.Name)) + } + } + return names, nil +} + +func (ps *tagBaseFieldParser) firstTagValue(tag string) string { + if ps.field.Tag != nil { + return strings.TrimRight(strings.TrimSpace(strings.Split(ps.tag.Get(tag), ",")[0]), "[]") + } + return "" +} + +func (ps *tagBaseFieldParser) FormName() string { + return ps.firstTagValue(formTag) +} + +func (ps *tagBaseFieldParser) HeaderName() string { + return ps.firstTagValue(headerTag) +} + +func (ps *tagBaseFieldParser) PathName() string { + return ps.firstTagValue(uriTag) +} + +func toSnakeCase(in string) string { + var ( + runes = []rune(in) + length = len(runes) + out []rune + ) + + for idx := 0; idx < length; idx++ { + if idx > 0 && unicode.IsUpper(runes[idx]) && + ((idx+1 < length && unicode.IsLower(runes[idx+1])) || unicode.IsLower(runes[idx-1])) { + out = append(out, '_') + } + + out = append(out, unicode.ToLower(runes[idx])) + } + + return string(out) +} + +func toLowerCamelCase(in string) string { + var flag bool + + out := make([]rune, len(in)) + + runes := []rune(in) + for i, curr := range runes { + if (i == 0 && unicode.IsUpper(curr)) || (flag && unicode.IsUpper(curr)) { + out[i] = unicode.ToLower(curr) + flag = true + + continue + } + + out[i] = curr + flag = false + } + + return string(out) +} + +func (ps *tagBaseFieldParser) CustomSchema() (*spec.Schema, error) { + if ps.field.Tag == nil { + return nil, nil + } + + typeTag := ps.tag.Get(swaggerTypeTag) + if typeTag != "" { + return BuildCustomSchema(strings.Split(typeTag, ",")) + } + + return nil, nil +} + +type structField struct { + title string + schemaType string + arrayType string + formatType string + maximum *float64 + minimum *float64 + multipleOf *float64 + maxLength *int64 + minLength *int64 + maxItems *int64 + minItems *int64 + exampleValue interface{} + enums []interface{} + enumVarNames []interface{} + unique bool + pattern string +} + +// splitNotWrapped slices s into all substrings separated by sep if sep is not +// wrapped by brackets and returns a slice of the substrings between those separators. +func splitNotWrapped(s string, sep rune) []string { + openCloseMap := map[rune]rune{ + '(': ')', + '[': ']', + '{': '}', + } + + var ( + result = make([]string, 0) + current = strings.Builder{} + openCount = 0 + openChar rune + ) + + for _, char := range s { + switch { + case openChar == 0 && openCloseMap[char] != 0: + openChar = char + + openCount++ + + current.WriteRune(char) + case char == openChar: + openCount++ + + current.WriteRune(char) + case openCount > 0 && char == openCloseMap[openChar]: + openCount-- + + current.WriteRune(char) + case openCount == 0 && char == sep: + result = append(result, current.String()) + + openChar = 0 + + current = strings.Builder{} + default: + current.WriteRune(char) + } + } + + if current.String() != "" { + result = append(result, current.String()) + } + + return result +} + +// ComplementSchema complement schema with field properties +func (ps *tagBaseFieldParser) ComplementSchema(schema *spec.Schema) error { + types := ps.p.GetSchemaTypePath(schema, 2) + if len(types) == 0 { + return fmt.Errorf("invalid type for field: %s", ps.field.Names[0]) + } + + if IsRefSchema(schema) { + var newSchema = spec.Schema{} + err := ps.complementSchema(&newSchema, types) + if err != nil { + return err + } + if !reflect.ValueOf(newSchema).IsZero() { + *schema = *(newSchema.WithAllOf(*schema)) + } + return nil + } + + return ps.complementSchema(schema, types) +} + +// complementSchema complement schema with field properties +func (ps *tagBaseFieldParser) complementSchema(schema *spec.Schema, types []string) error { + if ps.field.Tag == nil { + if ps.field.Doc != nil { + schema.Description = strings.TrimSpace(ps.field.Doc.Text()) + } + + if schema.Description == "" && ps.field.Comment != nil { + schema.Description = strings.TrimSpace(ps.field.Comment.Text()) + } + + return nil + } + + field := &structField{ + schemaType: types[0], + formatType: ps.tag.Get(formatTag), + title: ps.tag.Get(titleTag), + } + + if len(types) > 1 && (types[0] == ARRAY || types[0] == OBJECT) { + field.arrayType = types[1] + } + + jsonTagValue := ps.tag.Get(jsonTag) + + bindingTagValue := ps.tag.Get(bindingTag) + if bindingTagValue != "" { + parseValidTags(bindingTagValue, field) + } + + validateTagValue := ps.tag.Get(validateTag) + if validateTagValue != "" { + parseValidTags(validateTagValue, field) + } + + enumsTagValue := ps.tag.Get(enumsTag) + if enumsTagValue != "" { + err := parseEnumTags(enumsTagValue, field) + if err != nil { + return err + } + } + + if IsNumericType(field.schemaType) || IsNumericType(field.arrayType) { + maximum, err := getFloatTag(ps.tag, maximumTag) + if err != nil { + return err + } + + if maximum != nil { + field.maximum = maximum + } + + minimum, err := getFloatTag(ps.tag, minimumTag) + if err != nil { + return err + } + + if minimum != nil { + field.minimum = minimum + } + + multipleOf, err := getFloatTag(ps.tag, multipleOfTag) + if err != nil { + return err + } + + if multipleOf != nil { + field.multipleOf = multipleOf + } + } + + if field.schemaType == STRING || field.arrayType == STRING { + maxLength, err := getIntTag(ps.tag, maxLengthTag) + if err != nil { + return err + } + + if maxLength != nil { + field.maxLength = maxLength + } + + minLength, err := getIntTag(ps.tag, minLengthTag) + if err != nil { + return err + } + + if minLength != nil { + field.minLength = minLength + } + + pattern, ok := ps.tag.Lookup(patternTag) + if ok { + field.pattern = pattern + } + } + + // json:"name,string" or json:",string" + exampleTagValue, ok := ps.tag.Lookup(exampleTag) + if ok { + field.exampleValue = exampleTagValue + + if !strings.Contains(jsonTagValue, ",string") { + example, err := defineTypeOfExample(field.schemaType, field.arrayType, exampleTagValue) + if err != nil { + return err + } + + field.exampleValue = example + } + } + + // perform this after setting everything else (min, max, etc...) + if strings.Contains(jsonTagValue, ",string") { + // @encoding/json: "It applies only to fields of string, floating point, integer, or boolean types." + defaultValues := map[string]string{ + // Zero Values as string + STRING: "", + INTEGER: "0", + BOOLEAN: "false", + NUMBER: "0", + } + + defaultValue, ok := defaultValues[field.schemaType] + if ok { + field.schemaType = STRING + *schema = *PrimitiveSchema(field.schemaType) + + if field.exampleValue == nil { + // if exampleValue is not defined by the user, + // we will force an example with a correct value + // (eg: int->"0", bool:"false") + field.exampleValue = defaultValue + } + } + } + + if ps.field.Doc != nil { + schema.Description = strings.TrimSpace(ps.field.Doc.Text()) + } + + if schema.Description == "" && ps.field.Comment != nil { + schema.Description = strings.TrimSpace(ps.field.Comment.Text()) + } + + schema.ReadOnly = ps.tag.Get(readOnlyTag) == "true" + + defaultTagValue := ps.tag.Get(defaultTag) + if defaultTagValue != "" { + value, err := defineType(field.schemaType, defaultTagValue) + if err != nil { + return err + } + + schema.Default = value + } + + schema.Example = field.exampleValue + + if field.schemaType != ARRAY { + schema.Format = field.formatType + } + schema.Title = field.title + + extensionsTagValue := ps.tag.Get(extensionsTag) + if extensionsTagValue != "" { + schema.Extensions = setExtensionParam(extensionsTagValue) + } + + varNamesTag := ps.tag.Get("x-enum-varnames") + if varNamesTag != "" { + varNames := strings.Split(varNamesTag, ",") + if len(varNames) != len(field.enums) { + return fmt.Errorf("invalid count of x-enum-varnames. expected %d, got %d", len(field.enums), len(varNames)) + } + + field.enumVarNames = nil + + for _, v := range varNames { + field.enumVarNames = append(field.enumVarNames, v) + } + + if field.schemaType == ARRAY { + // Add the var names in the items schema + if schema.Items.Schema.Extensions == nil { + schema.Items.Schema.Extensions = map[string]interface{}{} + } + schema.Items.Schema.Extensions[enumVarNamesExtension] = field.enumVarNames + } else { + // Add to top level schema + if schema.Extensions == nil { + schema.Extensions = map[string]interface{}{} + } + schema.Extensions[enumVarNamesExtension] = field.enumVarNames + } + } + + eleSchema := schema + + if field.schemaType == ARRAY { + // For Array only + schema.MaxItems = field.maxItems + schema.MinItems = field.minItems + schema.UniqueItems = field.unique + schema.Pattern = field.pattern + + eleSchema = schema.Items.Schema + eleSchema.Format = field.formatType + } + + eleSchema.Maximum = field.maximum + eleSchema.Minimum = field.minimum + eleSchema.MultipleOf = field.multipleOf + eleSchema.MaxLength = field.maxLength + eleSchema.MinLength = field.minLength + eleSchema.Enum = field.enums + eleSchema.Pattern = field.pattern + + return nil +} + +func getFloatTag(structTag reflect.StructTag, tagName string) (*float64, error) { + strValue := structTag.Get(tagName) + if strValue == "" { + return nil, nil + } + + value, err := strconv.ParseFloat(strValue, 64) + if err != nil { + return nil, fmt.Errorf("can't parse numeric value of %q tag: %v", tagName, err) + } + + return &value, nil +} + +func getIntTag(structTag reflect.StructTag, tagName string) (*int64, error) { + strValue := structTag.Get(tagName) + if strValue == "" { + return nil, nil + } + + value, err := strconv.ParseInt(strValue, 10, 64) + if err != nil { + return nil, fmt.Errorf("can't parse numeric value of %q tag: %v", tagName, err) + } + + return &value, nil +} + +func (ps *tagBaseFieldParser) IsRequired() (bool, error) { + if ps.field.Tag == nil { + return false, nil + } + + bindingTag := ps.tag.Get(bindingTag) + if bindingTag != "" { + for _, val := range strings.Split(bindingTag, ",") { + switch val { + case requiredLabel: + return true, nil + case optionalLabel: + return false, nil + } + } + } + + validateTag := ps.tag.Get(validateTag) + if validateTag != "" { + for _, val := range strings.Split(validateTag, ",") { + switch val { + case requiredLabel: + return true, nil + case optionalLabel: + return false, nil + } + } + } + + return ps.p.RequiredByDefault, nil +} + +func parseValidTags(validTag string, sf *structField) { + + // `validate:"required,max=10,min=1"` + // ps. required checked by IsRequired(). + for _, val := range strings.Split(validTag, ",") { + var ( + valValue string + keyVal = strings.Split(val, "=") + ) + + switch len(keyVal) { + case 1: + case 2: + valValue = strings.ReplaceAll(strings.ReplaceAll(keyVal[1], utf8HexComma, ","), utf8Pipe, "|") + default: + continue + } + + switch keyVal[0] { + case "max", "lte": + sf.setMax(valValue) + case "min", "gte": + sf.setMin(valValue) + case "oneof": + if strings.Contains(validTag, "swaggerIgnore") { + continue + } + + sf.setOneOf(valValue) + case "unique": + if sf.schemaType == ARRAY { + sf.unique = true + } + case "dive": + // ignore dive + return + default: + continue + } + } +} + +func parseEnumTags(enumTag string, field *structField) error { + enumType := field.schemaType + if field.schemaType == ARRAY { + enumType = field.arrayType + } + + field.enums = nil + + for _, e := range strings.Split(enumTag, ",") { + value, err := defineType(enumType, e) + if err != nil { + return err + } + + field.enums = append(field.enums, value) + } + + return nil +} + +func (sf *structField) setOneOf(valValue string) { + if len(sf.enums) != 0 { + return + } + + enumType := sf.schemaType + if sf.schemaType == ARRAY { + enumType = sf.arrayType + } + + valValues := parseOneOfParam2(valValue) + for i := range valValues { + value, err := defineType(enumType, valValues[i]) + if err != nil { + continue + } + + sf.enums = append(sf.enums, value) + } +} + +func (sf *structField) setMin(valValue string) { + value, err := strconv.ParseFloat(valValue, 64) + if err != nil { + return + } + + switch sf.schemaType { + case INTEGER, NUMBER: + sf.minimum = &value + case STRING: + intValue := int64(value) + sf.minLength = &intValue + case ARRAY: + intValue := int64(value) + sf.minItems = &intValue + } +} + +func (sf *structField) setMax(valValue string) { + value, err := strconv.ParseFloat(valValue, 64) + if err != nil { + return + } + + switch sf.schemaType { + case INTEGER, NUMBER: + sf.maximum = &value + case STRING: + intValue := int64(value) + sf.maxLength = &intValue + case ARRAY: + intValue := int64(value) + sf.maxItems = &intValue + } +} + +const ( + utf8HexComma = "0x2C" + utf8Pipe = "0x7C" +) + +// These code copy from +// https://github.com/go-playground/validator/blob/d4271985b44b735c6f76abc7a06532ee997f9476/baked_in.go#L207 +// ---. +var oneofValsCache = map[string][]string{} +var oneofValsCacheRWLock = sync.RWMutex{} +var splitParamsRegex = regexp.MustCompile(`'[^']*'|\S+`) + +func parseOneOfParam2(param string) []string { + oneofValsCacheRWLock.RLock() + values, ok := oneofValsCache[param] + oneofValsCacheRWLock.RUnlock() + + if !ok { + oneofValsCacheRWLock.Lock() + values = splitParamsRegex.FindAllString(param, -1) + + for i := 0; i < len(values); i++ { + values[i] = strings.ReplaceAll(values[i], "'", "") + } + + oneofValsCache[param] = values + + oneofValsCacheRWLock.Unlock() + } + + return values +} + +// ---. diff --git a/vendor/github.com/swaggo/swag/v2/field_parserv3.go b/vendor/github.com/swaggo/swag/v2/field_parserv3.go new file mode 100644 index 00000000..37a9c646 --- /dev/null +++ b/vendor/github.com/swaggo/swag/v2/field_parserv3.go @@ -0,0 +1,596 @@ +package swag + +import ( + "fmt" + "go/ast" + "reflect" + "strconv" + "strings" + + "github.com/sv-tools/openapi/spec" +) + +type structFieldV3 struct { + schemaType string + arrayType string + formatType string + maximum *int + minimum *int + multipleOf *int + maxLength *int + minLength *int + maxItems *int + minItems *int + exampleValue interface{} + enums []interface{} + enumVarNames []interface{} + unique bool + pattern string +} + +func (sf *structFieldV3) setOneOf(valValue string) { + if len(sf.enums) != 0 { + return + } + + enumType := sf.schemaType + if sf.schemaType == ARRAY { + enumType = sf.arrayType + } + + valValues := parseOneOfParam2(valValue) + for i := range valValues { + value, err := defineType(enumType, valValues[i]) + if err != nil { + continue + } + + sf.enums = append(sf.enums, value) + } +} + +func (sf *structFieldV3) setMin(valValue string) { + value, err := strconv.Atoi(valValue) + if err != nil { + return + } + + switch sf.schemaType { + case INTEGER, NUMBER: + sf.minimum = &value + case STRING: + sf.minLength = &value + case ARRAY: + sf.minItems = &value + } +} + +func (sf *structFieldV3) setMax(valValue string) { + value, err := strconv.Atoi(valValue) + if err != nil { + return + } + + switch sf.schemaType { + case INTEGER, NUMBER: + sf.maximum = &value + case STRING: + sf.maxLength = &value + case ARRAY: + sf.maxItems = &value + } +} + +type tagBaseFieldParserV3 struct { + p *Parser + file *ast.File + field *ast.Field + tag reflect.StructTag +} + +func newTagBaseFieldParserV3(p *Parser, file *ast.File, field *ast.Field) FieldParserV3 { + fieldParser := tagBaseFieldParserV3{ + p: p, + file: file, + field: field, + tag: "", + } + if fieldParser.field.Tag != nil { + fieldParser.tag = reflect.StructTag(strings.ReplaceAll(field.Tag.Value, "`", "")) + } + + return &fieldParser +} + +func (ps *tagBaseFieldParserV3) CustomSchema() (*spec.RefOrSpec[spec.Schema], error) { + if ps.field.Tag == nil { + return nil, nil + } + + typeTag := ps.tag.Get(swaggerTypeTag) + if typeTag != "" { + return BuildCustomSchemaV3(strings.Split(typeTag, ",")) + } + + return nil, nil +} + +// ComplementSchema complement schema with field properties +func (ps *tagBaseFieldParserV3) ComplementSchema(schema *spec.RefOrSpec[spec.Schema]) error { + if schema.Spec == nil { + schema = ps.p.openAPI.Components.Spec.Schemas[strings.ReplaceAll(schema.Ref.Ref, "#/components/schemas/", "")] + if schema == nil { + return fmt.Errorf("could not resolve schema for ref %s", schema.Ref.Ref) + } + } + + types := ps.p.GetSchemaTypePathV3(schema, 2) + if len(types) == 0 { + return fmt.Errorf("invalid type for field: %s", ps.field.Names[0]) + } + + if schema.Ref != nil { //IsRefSchema(schema) + // TODO fetch existing schema from components + var newSchema = spec.Schema{} + err := ps.complementSchema(&newSchema, types) + if err != nil { + return err + } + if !reflect.ValueOf(newSchema).IsZero() { + newSchema.AllOf = []*spec.RefOrSpec[spec.Schema]{{Spec: schema.Spec}} + *schema = spec.RefOrSpec[spec.Schema]{Spec: &newSchema} + } + return nil + } + + return ps.complementSchema(schema.Spec, types) +} + +// complementSchema complement schema with field properties +func (ps *tagBaseFieldParserV3) complementSchema(schema *spec.Schema, types []string) error { + if ps.field.Tag == nil { + if ps.field.Doc != nil { + schema.Description = strings.TrimSpace(ps.field.Doc.Text()) + } + + if schema.Description == "" && ps.field.Comment != nil { + schema.Description = strings.TrimSpace(ps.field.Comment.Text()) + } + + return nil + } + + field := &structFieldV3{ + schemaType: types[0], + formatType: ps.tag.Get(formatTag), + } + + if len(types) > 1 && (types[0] == ARRAY || types[0] == OBJECT) { + field.arrayType = types[1] + } + + jsonTagValue := ps.tag.Get(jsonTag) + + bindingTagValue := ps.tag.Get(bindingTag) + if bindingTagValue != "" { + field.parseValidTags(bindingTagValue) + } + + validateTagValue := ps.tag.Get(validateTag) + if validateTagValue != "" { + field.parseValidTags(validateTagValue) + } + + enumsTagValue := ps.tag.Get(enumsTag) + if enumsTagValue != "" { + err := field.parseEnumTags(enumsTagValue) + if err != nil { + return err + } + } + + if IsNumericType(field.schemaType) || IsNumericType(field.arrayType) { + maximum, err := getIntTagV3(ps.tag, maximumTag) + if err != nil { + return err + } + + if maximum != nil { + field.maximum = maximum + } + + minimum, err := getIntTagV3(ps.tag, minimumTag) + if err != nil { + return err + } + + if minimum != nil { + field.minimum = minimum + } + + multipleOf, err := getIntTagV3(ps.tag, multipleOfTag) + if err != nil { + return err + } + + if multipleOf != nil { + field.multipleOf = multipleOf + } + } + + if field.schemaType == STRING || field.arrayType == STRING { + maxLength, err := getIntTagV3(ps.tag, maxLengthTag) + if err != nil { + return err + } + + if maxLength != nil { + field.maxLength = maxLength + } + + minLength, err := getIntTagV3(ps.tag, minLengthTag) + if err != nil { + return err + } + + if minLength != nil { + field.minLength = minLength + } + + pattern, ok := ps.tag.Lookup(patternTag) + if ok { + field.pattern = pattern + } + } + + // json:"name,string" or json:",string" + exampleTagValue, ok := ps.tag.Lookup(exampleTag) + if ok { + field.exampleValue = exampleTagValue + + if !strings.Contains(jsonTagValue, ",string") { + example, err := defineTypeOfExample(field.schemaType, field.arrayType, exampleTagValue) + if err != nil { + return err + } + + field.exampleValue = example + } + } + + // perform this after setting everything else (min, max, etc...) + if strings.Contains(jsonTagValue, ",string") { + // @encoding/json: "It applies only to fields of string, floating point, integer, or boolean types." + defaultValues := map[string]string{ + // Zero Values as string + STRING: "", + INTEGER: "0", + BOOLEAN: "false", + NUMBER: "0", + } + + defaultValue, ok := defaultValues[field.schemaType] + if ok { + field.schemaType = STRING + *schema = *PrimitiveSchemaV3(field.schemaType).Spec + + if field.exampleValue == nil { + // if exampleValue is not defined by the user, + // we will force an example with a correct value + // (eg: int->"0", bool:"false") + field.exampleValue = defaultValue + } + } + } + + if ps.field.Doc != nil { + schema.Description = strings.TrimSpace(ps.field.Doc.Text()) + } + + if schema.Description == "" && ps.field.Comment != nil { + schema.Description = strings.TrimSpace(ps.field.Comment.Text()) + } + + schema.ReadOnly = ps.tag.Get(readOnlyTag) == "true" + + defaultTagValue := ps.tag.Get(defaultTag) + if defaultTagValue != "" { + value, err := defineType(field.schemaType, defaultTagValue) + if err != nil { + return err + } + + schema.Default = value + } + + schema.Example = field.exampleValue + + if field.schemaType != ARRAY { + schema.Format = field.formatType + } + + extensionsTagValue := ps.tag.Get(extensionsTag) + if extensionsTagValue != "" { + schema.Extensions = setExtensionParam(extensionsTagValue) + } + + varNamesTag := ps.tag.Get("x-enum-varnames") + if varNamesTag != "" { + varNames := strings.Split(varNamesTag, ",") + if len(varNames) != len(field.enums) { + return fmt.Errorf("invalid count of x-enum-varnames. expected %d, got %d", len(field.enums), len(varNames)) + } + + field.enumVarNames = nil + + for _, v := range varNames { + field.enumVarNames = append(field.enumVarNames, v) + } + + if field.schemaType == ARRAY { + // Add the var names in the items schema + if schema.Items.Schema.Spec.Extensions == nil { + schema.Items.Schema.Spec.Extensions = map[string]interface{}{} + } + schema.Items.Schema.Spec.Extensions[enumVarNamesExtension] = field.enumVarNames + } else { + // Add to top level schema + if schema.Extensions == nil { + schema.Extensions = map[string]interface{}{} + } + schema.Extensions[enumVarNamesExtension] = field.enumVarNames + } + } + + var oneOfSchemas []*spec.RefOrSpec[spec.Schema] + oneOfTagValue := ps.tag.Get(oneOfTag) + if oneOfTagValue != "" { + oneOfTypes := strings.Split((oneOfTagValue), ",") + for _, oneOfType := range oneOfTypes { + oneOfSchema, err := ps.p.getTypeSchemaV3(oneOfType, ps.file, true) + if err != nil { + return fmt.Errorf("can't find oneOf type %q: %v", oneOfType, err) + } + oneOfSchemas = append(oneOfSchemas, oneOfSchema) + } + } + + elemSchema := schema + + if field.schemaType == ARRAY { + // For Array only + schema.MaxItems = field.maxItems + schema.MinItems = field.minItems + schema.UniqueItems = &field.unique + + elemSchema = schema.Items.Schema.Spec + if elemSchema == nil { + elemSchema = ps.p.getSchemaByRef(schema.Items.Schema.Ref) + } + + elemSchema.Format = field.formatType + } + + elemSchema.Maximum = field.maximum + elemSchema.Minimum = field.minimum + elemSchema.MultipleOf = field.multipleOf + elemSchema.MaxLength = field.maxLength + elemSchema.MinLength = field.minLength + elemSchema.Enum = field.enums + elemSchema.Pattern = field.pattern + elemSchema.OneOf = oneOfSchemas + + return nil +} + +func getIntTagV3(structTag reflect.StructTag, tagName string) (*int, error) { + strValue := structTag.Get(tagName) + if strValue == "" { + return nil, nil + } + + value, err := strconv.Atoi(strValue) + if err != nil { + return nil, fmt.Errorf("can't parse numeric value of %q tag: %v", tagName, err) + } + + return &value, nil +} + +func parseValidTagsV3(validTag string, sf *structFieldV3) { + + // `validate:"required,max=10,min=1"` + // ps. required checked by IsRequired(). + for _, val := range strings.Split(validTag, ",") { + var ( + valValue string + keyVal = strings.Split(val, "=") + ) + + switch len(keyVal) { + case 1: + case 2: + valValue = strings.ReplaceAll(strings.ReplaceAll(keyVal[1], utf8HexComma, ","), utf8Pipe, "|") + default: + continue + } + + switch keyVal[0] { + case "max", "lte": + sf.setMax(valValue) + case "min", "gte": + sf.setMin(valValue) + case "oneof": + if strings.Contains(validTag, "swaggerIgnore") { + continue + } + + sf.setOneOf(valValue) + case "unique": + if sf.schemaType == ARRAY { + sf.unique = true + } + case "dive": + // ignore dive + return + default: + continue + } + } +} + +func (sf *structFieldV3) parseValidTags(validTag string) { + + // `validate:"required,max=10,min=1"` + // ps. required checked by IsRequired(). + for _, val := range strings.Split(validTag, ",") { + var ( + valValue string + keyVal = strings.Split(val, "=") + ) + + switch len(keyVal) { + case 1: + case 2: + valValue = strings.ReplaceAll(strings.ReplaceAll(keyVal[1], utf8HexComma, ","), utf8Pipe, "|") + default: + continue + } + + switch keyVal[0] { + case "max", "lte": + sf.setMax(valValue) + case "min", "gte": + sf.setMin(valValue) + case "oneof": + if strings.Contains(validTag, "swaggerIgnore") { + continue + } + + sf.setOneOf(valValue) + case "unique": + if sf.schemaType == ARRAY { + sf.unique = true + } + case "dive": + // ignore dive + return + default: + continue + } + } +} + +func (sf *structFieldV3) parseEnumTags(enumTag string) error { + enumType := sf.schemaType + if sf.schemaType == ARRAY { + enumType = sf.arrayType + } + + sf.enums = nil + + for _, e := range strings.Split(enumTag, ",") { + value, err := defineType(enumType, e) + if err != nil { + return err + } + + sf.enums = append(sf.enums, value) + } + + return nil +} + +func (ps *tagBaseFieldParserV3) ShouldSkip() bool { + // Skip non-exported fields. + if ps.field.Names != nil && !ast.IsExported(ps.field.Names[0].Name) { + return true + } + + if ps.field.Tag == nil { + return false + } + + ignoreTag := ps.tag.Get(swaggerIgnoreTag) + if strings.EqualFold(ignoreTag, "true") { + return true + } + + // json:"tag,hoge" + name := strings.TrimSpace(strings.Split(ps.tag.Get(jsonTag), ",")[0]) + if name == "-" { + return true + } + + return false +} + +func (ps *tagBaseFieldParserV3) FieldName() (string, error) { + var name string + + if ps.field.Tag != nil { + // json:"tag,hoge" + name = strings.TrimSpace(strings.Split(ps.tag.Get(jsonTag), ",")[0]) + if name != "" { + return name, nil + } + + // use "form" tag over json tag + name = ps.FormName() + if name != "" { + return name, nil + } + } + + if ps.field.Names == nil { + return "", nil + } + + switch ps.p.PropNamingStrategy { + case SnakeCase: + return toSnakeCase(ps.field.Names[0].Name), nil + case PascalCase: + return ps.field.Names[0].Name, nil + default: + return toLowerCamelCase(ps.field.Names[0].Name), nil + } +} + +func (ps *tagBaseFieldParserV3) FormName() string { + if ps.field.Tag != nil { + return strings.TrimSpace(strings.Split(ps.tag.Get(formTag), ",")[0]) + } + return "" +} + +func (ps *tagBaseFieldParserV3) IsRequired() (bool, error) { + if ps.field.Tag == nil { + return false, nil + } + + bindingTag := ps.tag.Get(bindingTag) + if bindingTag != "" { + for _, val := range strings.Split(bindingTag, ",") { + switch val { + case requiredLabel: + return true, nil + case optionalLabel: + return false, nil + } + } + } + + validateTag := ps.tag.Get(validateTag) + if validateTag != "" { + for _, val := range strings.Split(validateTag, ",") { + switch val { + case requiredLabel: + return true, nil + case optionalLabel: + return false, nil + } + } + } + + return ps.p.RequiredByDefault, nil +} diff --git a/vendor/github.com/swaggo/swag/v2/formatter.go b/vendor/github.com/swaggo/swag/v2/formatter.go new file mode 100644 index 00000000..511e3a82 --- /dev/null +++ b/vendor/github.com/swaggo/swag/v2/formatter.go @@ -0,0 +1,182 @@ +package swag + +import ( + "bytes" + "fmt" + "go/ast" + goparser "go/parser" + "go/token" + "log" + "os" + "regexp" + "sort" + "strings" + "text/tabwriter" +) + +// Check of @Param @Success @Failure @Response @Header +var specialTagForSplit = map[string]bool{ + paramAttr: true, + successAttr: true, + failureAttr: true, + responseAttr: true, + headerAttr: true, +} + +var skipChar = map[byte]byte{ + '"': '"', + '(': ')', + '{': '}', + '[': ']', +} + +// Formatter implements a formatter for Go source files. +type Formatter struct { + // debugging output goes here + debug Debugger +} + +// NewFormatter create a new formatter instance. +func NewFormatter() *Formatter { + formatter := &Formatter{ + debug: log.New(os.Stdout, "", log.LstdFlags), + } + return formatter +} + +// Format formats swag comments in contents. It uses fileName to report errors +// that happen during parsing of contents. +func (f *Formatter) Format(fileName string, contents []byte) ([]byte, error) { + fileSet := token.NewFileSet() + ast, err := goparser.ParseFile(fileSet, fileName, contents, goparser.ParseComments) + if err != nil { + return nil, err + } + + // Formatting changes are described as an edit list of byte range + // replacements. We make these content-level edits directly rather than + // changing the AST nodes and writing those out (via [go/printer] or + // [go/format]) so that we only change the formatting of Swag attribute + // comments. This won't touch the formatting of any other comments, or of + // functions, etc. + maxEdits := 0 + for _, comment := range ast.Comments { + maxEdits += len(comment.List) + } + edits := make(edits, 0, maxEdits) + + for _, comment := range ast.Comments { + formatFuncDoc(fileSet, comment.List, &edits) + } + + return edits.apply(contents), nil +} + +type edit struct { + begin int + end int + replacement []byte +} + +type edits []edit + +func (edits edits) apply(contents []byte) []byte { + // Apply the edits with the highest offset first, so that earlier edits + // don't affect the offsets of later edits. + sort.Slice(edits, func(i, j int) bool { + return edits[i].begin > edits[j].begin + }) + + for _, edit := range edits { + prefix := contents[:edit.begin] + suffix := contents[edit.end:] + contents = append(prefix, append(edit.replacement, suffix...)...) + } + + return contents +} + +// formatFuncDoc reformats the comment lines in commentList, and appends any +// changes to the edit list. +func formatFuncDoc(fileSet *token.FileSet, commentList []*ast.Comment, edits *edits) { + // Building the edit list to format a comment block is a two-step process. + // First, we iterate over each comment line looking for Swag attributes. In + // each one we find, we replace alignment whitespace with a tab character, + // then write the result into a tab writer. + + linesToComments := make(map[int]int, len(commentList)) + + buffer := &bytes.Buffer{} + w := tabwriter.NewWriter(buffer, 1, 4, 1, '\t', 0) + + for commentIndex, comment := range commentList { + text := comment.Text + if attr, body, found := swagComment(text); found { + formatted := "//\t" + attr + if body != "" { + formatted += "\t" + splitComment2(attr, body) + } + _, _ = fmt.Fprintln(w, formatted) + linesToComments[len(linesToComments)] = commentIndex + } + } + + // Once we've loaded all of the comment lines to be aligned into the tab + // writer, flushing it causes the aligned text to be written out to the + // backing buffer. + _ = w.Flush() + + // Now the second step: we iterate over the aligned comment lines that were + // written into the backing buffer, pair each one up to its original + // comment line, and use the combination to describe the edit that needs to + // be made to the original input. + formattedComments := bytes.Split(buffer.Bytes(), []byte("\n")) + for lineIndex, commentIndex := range linesToComments { + comment := commentList[commentIndex] + *edits = append(*edits, edit{ + begin: fileSet.Position(comment.Pos()).Offset, + end: fileSet.Position(comment.End()).Offset, + replacement: formattedComments[lineIndex], + }) + } +} + +func splitComment2(attr, body string) string { + if specialTagForSplit[strings.ToLower(attr)] { + for i := 0; i < len(body); i++ { + if skipEnd, ok := skipChar[body[i]]; ok { + skipStart, n := body[i], 1 + for i++; i < len(body); i++ { + if skipStart != skipEnd && body[i] == skipStart { + n++ + } else if body[i] == skipEnd { + n-- + if n == 0 { + break + } + } + } + } else if body[i] == ' ' || body[i] == '\t' { + j := i + for ; j < len(body) && (body[j] == ' ' || body[j] == '\t'); j++ { + } + body = replaceRange(body, i, j, "\t") + } + } + } + return body +} + +func replaceRange(s string, start, end int, new string) string { + return s[:start] + new + s[end:] +} + +var swagCommentLineExpression = regexp.MustCompile(`^\/\/\s+(@[\S.]+)\s*(.*)`) + +func swagComment(comment string) (string, string, bool) { + matches := swagCommentLineExpression.FindStringSubmatch(comment) + if matches == nil { + return "", "", false + } + return matches[1], matches[2], true +} diff --git a/vendor/github.com/swaggo/swag/v2/generics.go b/vendor/github.com/swaggo/swag/v2/generics.go new file mode 100644 index 00000000..82afba6a --- /dev/null +++ b/vendor/github.com/swaggo/swag/v2/generics.go @@ -0,0 +1,445 @@ +package swag + +import ( + "errors" + "fmt" + "go/ast" + "strings" + "unicode" + + "github.com/go-openapi/spec" +) + +type genericTypeSpec struct { + TypeSpec *TypeSpecDef + Name string +} + +type formalParamType struct { + Name string + Type string +} + +func (t *genericTypeSpec) TypeName() string { + if t.TypeSpec != nil { + return t.TypeSpec.TypeName() + } + return t.Name +} + +func normalizeGenericTypeName(name string) string { + return strings.Replace(name, ".", "_", -1) +} + +func (pkgDefs *PackagesDefinitions) getTypeFromGenericParam(genericParam string, file *ast.File) (typeSpecDef *TypeSpecDef) { + if strings.HasPrefix(genericParam, "[]") { + typeSpecDef = pkgDefs.getTypeFromGenericParam(genericParam[2:], file) + if typeSpecDef == nil { + return nil + } + var expr ast.Expr + switch typeSpecDef.TypeSpec.Type.(type) { + case *ast.ArrayType, *ast.MapType: + expr = typeSpecDef.TypeSpec.Type + default: + name := typeSpecDef.TypeName() + expr = ast.NewIdent(name) + if _, ok := pkgDefs.uniqueDefinitions[name]; !ok { + pkgDefs.uniqueDefinitions[name] = typeSpecDef + } + } + return &TypeSpecDef{ + TypeSpec: &ast.TypeSpec{ + Name: ast.NewIdent(string(IgnoreNameOverridePrefix) + "array_" + typeSpecDef.TypeName()), + Type: &ast.ArrayType{ + Elt: expr, + }, + }, + Enums: typeSpecDef.Enums, + PkgPath: typeSpecDef.PkgPath, + ParentSpec: typeSpecDef.ParentSpec, + SchemaName: "array_" + typeSpecDef.SchemaName, + NotUnique: false, + } + } + + if strings.HasPrefix(genericParam, "map[") { + parts := strings.SplitN(genericParam[4:], "]", 2) + if len(parts) != 2 { + return nil + } + typeSpecDef = pkgDefs.getTypeFromGenericParam(parts[1], file) + if typeSpecDef == nil { + return nil + } + var expr ast.Expr + switch typeSpecDef.TypeSpec.Type.(type) { + case *ast.ArrayType, *ast.MapType: + expr = typeSpecDef.TypeSpec.Type + default: + name := typeSpecDef.TypeName() + expr = ast.NewIdent(name) + if _, ok := pkgDefs.uniqueDefinitions[name]; !ok { + pkgDefs.uniqueDefinitions[name] = typeSpecDef + } + } + return &TypeSpecDef{ + TypeSpec: &ast.TypeSpec{ + Name: ast.NewIdent(string(IgnoreNameOverridePrefix) + "map_" + parts[0] + "_" + typeSpecDef.TypeName()), + Type: &ast.MapType{ + Key: ast.NewIdent(parts[0]), //assume key is string or integer + Value: expr, + }, + }, + Enums: typeSpecDef.Enums, + PkgPath: typeSpecDef.PkgPath, + ParentSpec: typeSpecDef.ParentSpec, + SchemaName: "map_" + parts[0] + "_" + typeSpecDef.SchemaName, + NotUnique: false, + } + } + if IsGolangPrimitiveType(genericParam) { + return &TypeSpecDef{ + TypeSpec: &ast.TypeSpec{ + Name: ast.NewIdent(genericParam), + Type: ast.NewIdent(genericParam), + }, + SchemaName: genericParam, + } + } + return pkgDefs.FindTypeSpec(genericParam, file) +} + +func (pkgDefs *PackagesDefinitions) parametrizeGenericType(file *ast.File, original *TypeSpecDef, fullGenericForm string) *TypeSpecDef { + if original == nil || original.TypeSpec.TypeParams == nil || len(original.TypeSpec.TypeParams.List) == 0 { + return original + } + + name, genericParams := splitGenericsTypeName(fullGenericForm) + if genericParams == nil { + return nil + } + + //generic[x,y any,z any] considered, TODO what if the type is not `any`, but a concrete one, such as `int32|int64` or an certain interface{} + var formals []formalParamType + for _, field := range original.TypeSpec.TypeParams.List { + for _, ident := range field.Names { + formal := formalParamType{Name: ident.Name} + if ident, ok := field.Type.(*ast.Ident); ok { + formal.Type = ident.Name + } + formals = append(formals, formal) + } + } + if len(genericParams) != len(formals) { + return nil + } + genericParamTypeDefs := map[string]*genericTypeSpec{} + + for i, genericParam := range genericParams { + var typeDef *TypeSpecDef + if !IsGolangPrimitiveType(genericParam) { + typeDef = pkgDefs.getTypeFromGenericParam(genericParam, file) + if typeDef != nil { + genericParam = typeDef.TypeName() + if _, ok := pkgDefs.uniqueDefinitions[genericParam]; !ok { + pkgDefs.uniqueDefinitions[genericParam] = typeDef + } + } + } + genericParamTypeDefs[formals[i].Name] = &genericTypeSpec{ + TypeSpec: typeDef, + Name: genericParam, + } + } + + name = fmt.Sprintf("%s%s-", string(IgnoreNameOverridePrefix), original.TypeName()) + schemaName := fmt.Sprintf("%s-", original.SchemaName) + + var nameParts []string + var schemaNameParts []string + + for _, def := range formals { + if specDef, ok := genericParamTypeDefs[def.Name]; ok { + nameParts = append(nameParts, specDef.Name) + + schemaNamePart := specDef.Name + + if specDef.TypeSpec != nil { + schemaNamePart = specDef.TypeSpec.SchemaName + } + + schemaNameParts = append(schemaNameParts, schemaNamePart) + } + } + + name += normalizeGenericTypeName(strings.Join(nameParts, "-")) + schemaName += normalizeGenericTypeName(strings.Join(schemaNameParts, "-")) + + if typeSpec, ok := pkgDefs.uniqueDefinitions[name]; ok { + return typeSpec + } + + parametrizedTypeSpec := &TypeSpecDef{ + File: original.File, + PkgPath: original.PkgPath, + TypeSpec: &ast.TypeSpec{ + Name: &ast.Ident{ + Name: name, + NamePos: original.TypeSpec.Name.NamePos, + Obj: original.TypeSpec.Name.Obj, + }, + Doc: original.TypeSpec.Doc, + Assign: original.TypeSpec.Assign, + }, + SchemaName: schemaName, + } + pkgDefs.uniqueDefinitions[name] = parametrizedTypeSpec + + parametrizedTypeSpec.TypeSpec.Type = pkgDefs.resolveGenericType(original.File, original.TypeSpec.Type, genericParamTypeDefs) + + return parametrizedTypeSpec +} + +// splitGenericsTypeName splits a generic struct name in his parts +func splitGenericsTypeName(fullGenericForm string) (string, []string) { + //remove all spaces character + fullGenericForm = strings.Map(func(r rune) rune { + if unicode.IsSpace(r) { + return -1 + } + return r + }, fullGenericForm) + + // split only at the first '[' and remove the last ']' + if fullGenericForm[len(fullGenericForm)-1] != ']' { + return "", nil + } + + genericParams := strings.SplitN(fullGenericForm[:len(fullGenericForm)-1], "[", 2) + if len(genericParams) == 1 { + return "", nil + } + + // generic type name + genericTypeName := genericParams[0] + + depth := 0 + genericParams = strings.FieldsFunc(genericParams[1], func(r rune) bool { + if r == '[' { + depth++ + } else if r == ']' { + depth-- + } else if r == ',' && depth == 0 { + return true + } + return false + }) + if depth != 0 { + return "", nil + } + + return genericTypeName, genericParams +} + +func (pkgDefs *PackagesDefinitions) getParametrizedType(genTypeSpec *genericTypeSpec) ast.Expr { + if genTypeSpec.TypeSpec != nil && strings.Contains(genTypeSpec.Name, ".") { + parts := strings.SplitN(genTypeSpec.Name, ".", 2) + return &ast.SelectorExpr{ + X: &ast.Ident{Name: parts[0]}, + Sel: &ast.Ident{Name: parts[1]}, + } + } + + //a primitive type name or a type name in current package + return &ast.Ident{Name: genTypeSpec.Name} +} + +func (pkgDefs *PackagesDefinitions) resolveGenericType(file *ast.File, expr ast.Expr, genericParamTypeDefs map[string]*genericTypeSpec) ast.Expr { + switch astExpr := expr.(type) { + case *ast.Ident: + if genTypeSpec, ok := genericParamTypeDefs[astExpr.Name]; ok { + return pkgDefs.getParametrizedType(genTypeSpec) + } + case *ast.ArrayType: + return &ast.ArrayType{ + Elt: pkgDefs.resolveGenericType(file, astExpr.Elt, genericParamTypeDefs), + Len: astExpr.Len, + Lbrack: astExpr.Lbrack, + } + case *ast.MapType: + return &ast.MapType{ + Map: astExpr.Map, + Key: pkgDefs.resolveGenericType(file, astExpr.Key, genericParamTypeDefs), + Value: pkgDefs.resolveGenericType(file, astExpr.Value, genericParamTypeDefs), + } + case *ast.StarExpr: + return &ast.StarExpr{ + Star: astExpr.Star, + X: pkgDefs.resolveGenericType(file, astExpr.X, genericParamTypeDefs), + } + case *ast.IndexExpr, *ast.IndexListExpr: + fullGenericName, _ := getGenericFieldType(file, expr, genericParamTypeDefs) + typeDef := pkgDefs.FindTypeSpec(fullGenericName, file) + if typeDef != nil { + return typeDef.TypeSpec.Name + } + case *ast.StructType: + newStructTypeDef := &ast.StructType{ + Struct: astExpr.Struct, + Incomplete: astExpr.Incomplete, + Fields: &ast.FieldList{ + Opening: astExpr.Fields.Opening, + Closing: astExpr.Fields.Closing, + }, + } + + for _, field := range astExpr.Fields.List { + newField := &ast.Field{ + Type: field.Type, + Doc: field.Doc, + Names: field.Names, + Tag: field.Tag, + Comment: field.Comment, + } + + newField.Type = pkgDefs.resolveGenericType(file, field.Type, genericParamTypeDefs) + + newStructTypeDef.Fields.List = append(newStructTypeDef.Fields.List, newField) + } + return newStructTypeDef + } + return expr +} + +func getExtendedGenericFieldType(file *ast.File, field ast.Expr, genericParamTypeDefs map[string]*genericTypeSpec) (string, error) { + switch fieldType := field.(type) { + case *ast.ArrayType: + fieldName, err := getExtendedGenericFieldType(file, fieldType.Elt, genericParamTypeDefs) + return "[]" + fieldName, err + case *ast.StarExpr: + return getExtendedGenericFieldType(file, fieldType.X, genericParamTypeDefs) + case *ast.Ident: + if genericParamTypeDefs != nil { + if typeSpec, ok := genericParamTypeDefs[fieldType.Name]; ok { + return typeSpec.Name, nil + } + } + if fieldType.Obj == nil { + return fieldType.Name, nil + } + + tSpec := &TypeSpecDef{ + File: file, + TypeSpec: fieldType.Obj.Decl.(*ast.TypeSpec), + PkgPath: file.Name.Name, + } + return tSpec.TypeName(), nil + default: + return getFieldType(file, field, genericParamTypeDefs) + } +} + +func getGenericFieldType(file *ast.File, field ast.Expr, genericParamTypeDefs map[string]*genericTypeSpec) (string, error) { + var fullName string + var baseName string + var err error + switch fieldType := field.(type) { + case *ast.IndexListExpr: + baseName, err = getGenericTypeName(file, fieldType.X) + if err != nil { + return "", err + } + fullName = baseName + "[" + + for _, index := range fieldType.Indices { + fieldName, err := getExtendedGenericFieldType(file, index, genericParamTypeDefs) + if err != nil { + return "", err + } + + fullName += fieldName + "," + } + + fullName = strings.TrimRight(fullName, ",") + "]" + case *ast.IndexExpr: + baseName, err = getGenericTypeName(file, fieldType.X) + if err != nil { + return "", err + } + + indexName, err := getExtendedGenericFieldType(file, fieldType.Index, genericParamTypeDefs) + if err != nil { + return "", err + } + + fullName = fmt.Sprintf("%s[%s]", baseName, indexName) + } + + if fullName == "" { + return "", fmt.Errorf("unknown field type %#v", field) + } + + var packageName string + if !strings.Contains(baseName, ".") { + if file.Name == nil { + return "", errors.New("file name is nil") + } + packageName, _ = getFieldType(file, file.Name, genericParamTypeDefs) + } + + return strings.TrimLeft(fmt.Sprintf("%s.%s", packageName, fullName), "."), nil +} + +func getGenericTypeName(file *ast.File, field ast.Expr) (string, error) { + switch fieldType := field.(type) { + case *ast.Ident: + if fieldType.Obj == nil { + return fieldType.Name, nil + } + + tSpec := &TypeSpecDef{ + File: file, + TypeSpec: fieldType.Obj.Decl.(*ast.TypeSpec), + PkgPath: file.Name.Name, + } + return tSpec.TypeName(), nil + case *ast.ArrayType: + tSpec := &TypeSpecDef{ + File: file, + TypeSpec: fieldType.Elt.(*ast.Ident).Obj.Decl.(*ast.TypeSpec), + PkgPath: file.Name.Name, + } + return tSpec.TypeName(), nil + case *ast.SelectorExpr: + return fmt.Sprintf("%s.%s", fieldType.X.(*ast.Ident).Name, fieldType.Sel.Name), nil + } + return "", fmt.Errorf("unknown type %#v", field) +} + +func (parser *Parser) parseGenericTypeExpr(file *ast.File, typeExpr ast.Expr) (*spec.Schema, error) { + switch expr := typeExpr.(type) { + // suppress debug messages for these types + case *ast.InterfaceType: + case *ast.StructType: + case *ast.Ident: + case *ast.StarExpr: + case *ast.SelectorExpr: + case *ast.ArrayType: + case *ast.MapType: + case *ast.FuncType: + case *ast.IndexExpr, *ast.IndexListExpr: + name, err := getExtendedGenericFieldType(file, expr, nil) + if err == nil { + if schema, err := parser.getTypeSchema(name, file, false); err == nil { + return schema, nil + } + } + + parser.debug.Printf("Type definition of type '%T' is not supported yet. Using 'object' instead. (%s)\n", typeExpr, err) + default: + parser.debug.Printf("Type definition of type '%T' is not supported yet. Using 'object' instead.\n", typeExpr) + } + + return PrimitiveSchema(OBJECT), nil +} diff --git a/vendor/github.com/swaggo/swag/v2/genericsv3.go b/vendor/github.com/swaggo/swag/v2/genericsv3.go new file mode 100644 index 00000000..1306a362 --- /dev/null +++ b/vendor/github.com/swaggo/swag/v2/genericsv3.go @@ -0,0 +1,34 @@ +package swag + +import ( + "go/ast" + + "github.com/sv-tools/openapi/spec" +) + +func (p *Parser) parseGenericTypeExprV3(file *ast.File, typeExpr ast.Expr) (*spec.RefOrSpec[spec.Schema], error) { + switch expr := typeExpr.(type) { + // suppress debug messages for these types + case *ast.InterfaceType: + case *ast.StructType: + case *ast.Ident: + case *ast.StarExpr: + case *ast.SelectorExpr: + case *ast.ArrayType: + case *ast.MapType: + case *ast.FuncType: + case *ast.IndexExpr, *ast.IndexListExpr: + name, err := getExtendedGenericFieldType(file, expr, nil) + if err == nil { + if schema, err := p.getTypeSchemaV3(name, file, false); err == nil { + return schema, nil + } + } + + p.debug.Printf("Type definition of type '%T' is not supported yet. Using 'object' instead. (%s)\n", typeExpr, err) + default: + p.debug.Printf("Type definition of type '%T' is not supported yet. Using 'object' instead.\n", typeExpr) + } + + return PrimitiveSchemaV3(OBJECT), nil +} diff --git a/vendor/github.com/swaggo/swag/v2/golist.go b/vendor/github.com/swaggo/swag/v2/golist.go new file mode 100644 index 00000000..fa0b2cd9 --- /dev/null +++ b/vendor/github.com/swaggo/swag/v2/golist.go @@ -0,0 +1,78 @@ +package swag + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "go/build" + "os/exec" + "path/filepath" +) + +func listPackages(ctx context.Context, dir string, env []string, args ...string) (pkgs []*build.Package, finalErr error) { + cmd := exec.CommandContext(ctx, "go", append([]string{"list", "-json", "-e"}, args...)...) + cmd.Env = env + cmd.Dir = dir + + stdout, err := cmd.StdoutPipe() + if err != nil { + return nil, err + } + var stderrBuf bytes.Buffer + cmd.Stderr = &stderrBuf + defer func() { + if stderrBuf.Len() > 0 { + finalErr = fmt.Errorf("%v\n%s", finalErr, stderrBuf.Bytes()) + } + }() + + err = cmd.Start() + if err != nil { + return nil, err + } + dec := json.NewDecoder(stdout) + for dec.More() { + var pkg build.Package + err = dec.Decode(&pkg) + if err != nil { + return nil, err + } + pkgs = append(pkgs, &pkg) + } + err = cmd.Wait() + if err != nil { + return nil, err + } + return pkgs, nil +} + +func (parser *Parser) getAllGoFileInfoFromDepsByList(pkg *build.Package, parseFlag ParseFlag) error { + ignoreInternal := pkg.Goroot && !parser.ParseInternal + if ignoreInternal { // ignored internal + return nil + } + + if parser.skipPackageByPrefix(pkg.ImportPath) { + return nil // ignored by user-defined package path prefixes + } + + srcDir := pkg.Dir + var err error + for i := range pkg.GoFiles { + err = parser.parseFile(pkg.ImportPath, filepath.Join(srcDir, pkg.GoFiles[i]), nil, parseFlag) + if err != nil { + return err + } + } + + // parse .go source files that import "C" + for i := range pkg.CgoFiles { + err = parser.parseFile(pkg.ImportPath, filepath.Join(srcDir, pkg.CgoFiles[i]), nil, parseFlag) + if err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/swaggo/swag/v2/license b/vendor/github.com/swaggo/swag/v2/license new file mode 100644 index 00000000..a97865bf --- /dev/null +++ b/vendor/github.com/swaggo/swag/v2/license @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 Eason Lin + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/swaggo/swag/v2/operation.go b/vendor/github.com/swaggo/swag/v2/operation.go new file mode 100644 index 00000000..12fd7a09 --- /dev/null +++ b/vendor/github.com/swaggo/swag/v2/operation.go @@ -0,0 +1,1266 @@ +package swag + +import ( + "encoding/json" + "fmt" + "go/ast" + goparser "go/parser" + "go/token" + "log" + "net/http" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + + "github.com/go-openapi/spec" + "golang.org/x/tools/go/loader" + "gopkg.in/yaml.v2" +) + +// RouteProperties describes HTTP properties of a single router comment. +type RouteProperties struct { + HTTPMethod string + Path string + Deprecated bool +} + +// Operation describes a single API operation on a path. +// For more information: https://github.com/swaggo/swag#api-operation +type Operation struct { + parser *Parser + codeExampleFilesDir string + spec.Operation + RouterProperties []RouteProperties + State string +} + +var mimeTypeAliases = map[string]string{ + "json": "application/json", + "xml": "text/xml", + "plain": "text/plain", + "html": "text/html", + "mpfd": "multipart/form-data", + "x-www-form-urlencoded": "application/x-www-form-urlencoded", + "json-api": "application/vnd.api+json", + "json-stream": "application/x-json-stream", + "octet-stream": "application/octet-stream", + "png": "image/png", + "jpeg": "image/jpeg", + "gif": "image/gif", +} + +var mimeTypePattern = regexp.MustCompile("^[^/]+/[^/]+$") + +// NewOperation creates a new Operation with default properties. +// map[int]Response. +func NewOperation(parser *Parser, options ...func(*Operation)) *Operation { + if parser == nil { + parser = New() + } + + result := &Operation{ + parser: parser, + RouterProperties: []RouteProperties{}, + Operation: spec.Operation{ + OperationProps: spec.OperationProps{ + ID: "", + Description: "", + Summary: "", + Security: nil, + ExternalDocs: nil, + Deprecated: false, + Tags: []string{}, + Consumes: []string{}, + Produces: []string{}, + Schemes: []string{}, + Parameters: []spec.Parameter{}, + Responses: &spec.Responses{ + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{}, + }, + ResponsesProps: spec.ResponsesProps{ + Default: nil, + StatusCodeResponses: make(map[int]spec.Response), + }, + }, + }, + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{}, + }, + }, + codeExampleFilesDir: "", + } + + for _, option := range options { + option(result) + } + + return result +} + +// SetCodeExampleFilesDirectory sets the directory to search for codeExamples. +func SetCodeExampleFilesDirectory(directoryPath string) func(*Operation) { + return func(o *Operation) { + o.codeExampleFilesDir = directoryPath + } +} + +// ParseComment parses comment for given comment string and returns error if error occurs. +func (operation *Operation) ParseComment(comment string, astFile *ast.File) error { + commentLine := strings.TrimSpace(strings.TrimLeft(comment, "/")) + if len(commentLine) == 0 { + return nil + } + + fields := FieldsByAnySpace(commentLine, 2) + attribute := fields[0] + lowerAttribute := strings.ToLower(attribute) + var lineRemainder string + if len(fields) > 1 { + lineRemainder = fields[1] + } + switch lowerAttribute { + case stateAttr: + operation.ParseStateComment(lineRemainder) + case descriptionAttr: + operation.ParseDescriptionComment(lineRemainder) + case descriptionMarkdownAttr: + commentInfo, err := getMarkdownForTag(lineRemainder, operation.parser.markdownFileDir) + if err != nil { + return err + } + + operation.ParseDescriptionComment(string(commentInfo)) + case summaryAttr: + operation.Summary = lineRemainder + case idAttr: + operation.ID = lineRemainder + case tagsAttr: + operation.ParseTagsComment(lineRemainder) + case acceptAttr: + return operation.ParseAcceptComment(lineRemainder) + case produceAttr: + return operation.ParseProduceComment(lineRemainder) + case paramAttr: + return operation.ParseParamComment(lineRemainder, astFile) + case successAttr, failureAttr, responseAttr: + return operation.ParseResponseComment(lineRemainder, astFile) + case headerAttr: + return operation.ParseResponseHeaderComment(lineRemainder, astFile) + case routerAttr: + return operation.ParseRouterComment(lineRemainder, false) + case deprecatedRouterAttr: + return operation.ParseRouterComment(lineRemainder, true) + case securityAttr: + return operation.ParseSecurityComment(lineRemainder) + case deprecatedAttr: + operation.Deprecate() + case xCodeSamplesAttr: + return operation.ParseCodeSample(attribute, commentLine, lineRemainder) + default: + return operation.ParseMetadata(attribute, lowerAttribute, lineRemainder) + } + + return nil +} + +// ParseCodeSample parse code sample. +func (operation *Operation) ParseCodeSample(attribute, _, lineRemainder string) error { + log.Println("line remainder:", lineRemainder) + + if lineRemainder == "file" { + log.Println("line remainder is file") + + data, isJSON, err := getCodeExampleForSummary(operation.Summary, operation.codeExampleFilesDir) + if err != nil { + return err + } + + var valueJSON interface{} + + if isJSON { + err = json.Unmarshal(data, &valueJSON) + if err != nil { + return fmt.Errorf("annotation %s need a valid json value. error: %s", attribute, err.Error()) + } + } else { + err = yaml.Unmarshal(data, &valueJSON) + if err != nil { + return fmt.Errorf("annotation %s need a valid yaml value. error: %s", attribute, err.Error()) + } + } + + // don't use the method provided by spec lib, because it will call toLower() on attribute names, which is wrongly + operation.Extensions[attribute[1:]] = valueJSON + + return nil + } + + // Fallback into existing logic + return operation.ParseMetadata(attribute, strings.ToLower(attribute), lineRemainder) +} + +// ParseStateComment parse state comment. +func (operation *Operation) ParseStateComment(lineRemainder string) { + operation.State = lineRemainder +} + +// ParseDescriptionComment parse description comment. +func (operation *Operation) ParseDescriptionComment(lineRemainder string) { + if operation.Description == "" { + operation.Description = lineRemainder + + return + } + + operation.Description += "\n" + lineRemainder +} + +// ParseMetadata parse metadata. +func (operation *Operation) ParseMetadata(attribute, lowerAttribute, lineRemainder string) error { + // parsing specific meta data extensions + if strings.HasPrefix(lowerAttribute, "@x-") { + if len(lineRemainder) == 0 { + return fmt.Errorf("annotation %s need a value", attribute) + } + + var valueJSON interface{} + + err := json.Unmarshal([]byte(lineRemainder), &valueJSON) + if err != nil { + return fmt.Errorf("annotation %s need a valid json value. error: %s", attribute, err.Error()) + } + + // don't use the method provided by spec lib, because it will call toLower() on attribute names, which is wrongly + operation.Extensions[attribute[1:]] = valueJSON + } + + return nil +} + +var paramPattern = regexp.MustCompile(`(\S+)\s+(\w+)\s+([\S. ]+?)\s+(\w+)\s+"([^"]+)"`) + +func findInSlice(arr []string, target string) bool { + for _, str := range arr { + if str == target { + return true + } + } + + return false +} + +// ParseParamComment parses params return []string of param properties +// E.g. @Param queryText formData string true "The email for login" +// +// [param name] [paramType] [data type] [is mandatory?] [Comment] +// +// E.g. @Param some_id path int true "Some ID". +func (operation *Operation) ParseParamComment(commentLine string, astFile *ast.File) error { + matches := paramPattern.FindStringSubmatch(commentLine) + if len(matches) != 6 { + return fmt.Errorf("missing required param comment parameters \"%s\"", commentLine) + } + + name := matches[1] + paramType := matches[2] + refType := TransToValidSchemeType(matches[3]) + + // Detect refType + objectType := OBJECT + + if strings.HasPrefix(refType, "[]") { + objectType = ARRAY + refType = strings.TrimPrefix(refType, "[]") + refType = TransToValidSchemeType(refType) + } else if IsPrimitiveType(refType) || + paramType == "formData" && refType == "file" { + objectType = PRIMITIVE + } + + var enums []interface{} + if !IsPrimitiveType(refType) { + schema, _ := operation.parser.getTypeSchema(refType, astFile, false) + if schema != nil && len(schema.Type) == 1 && schema.Enum != nil { + if objectType == OBJECT { + objectType = PRIMITIVE + } + refType = TransToValidSchemeType(schema.Type[0]) + enums = schema.Enum + } + } + + requiredText := strings.ToLower(matches[4]) + required := requiredText == "true" || requiredText == requiredLabel + description := strings.Join(strings.Split(matches[5], "\\n"), "\n") + + param := createParameter(paramType, description, name, objectType, refType, required, enums, operation.parser.collectionFormatInQuery) + + switch paramType { + case "path", "header", "query", "formData": + switch objectType { + case ARRAY: + if !IsPrimitiveType(refType) && !(refType == "file" && paramType == "formData") { + return fmt.Errorf("%s is not supported array type for %s", refType, paramType) + } + case PRIMITIVE: + break + case OBJECT: + schema, err := operation.parser.getTypeSchema(refType, astFile, false) + if err != nil { + return err + } + + if len(schema.Properties) == 0 { + return nil + } + + items := schema.Properties.ToOrderedSchemaItems() + + for _, item := range items { + name, prop := item.Name, &item.Schema + if len(prop.Type) == 0 { + prop = operation.parser.getUnderlyingSchema(prop) + if len(prop.Type) == 0 { + continue + } + } + + nameOverrideType := paramType + // query also uses formData tags + if paramType == "query" { + nameOverrideType = "formData" + } + // load overridden type specific name from extensions if exists + if nameVal, ok := item.Schema.Extensions[nameOverrideType]; ok { + name = nameVal.(string) + } + + switch { + case prop.Type[0] == ARRAY: + if prop.Items.Schema == nil { + continue + } + itemSchema := prop.Items.Schema + if len(itemSchema.Type) == 0 { + itemSchema = operation.parser.getUnderlyingSchema(prop.Items.Schema) + } + if itemSchema == nil { + continue + } + if len(itemSchema.Type) == 0 { + continue + } + if !IsSimplePrimitiveType(itemSchema.Type[0]) { + continue + } + param = createParameter(paramType, prop.Description, name, prop.Type[0], itemSchema.Type[0], findInSlice(schema.Required, item.Name), itemSchema.Enum, operation.parser.collectionFormatInQuery) + + case IsSimplePrimitiveType(prop.Type[0]): + param = createParameter(paramType, prop.Description, name, PRIMITIVE, prop.Type[0], findInSlice(schema.Required, item.Name), nil, operation.parser.collectionFormatInQuery) + default: + operation.parser.debug.Printf("skip field [%s] in %s is not supported type for %s", name, refType, paramType) + continue + } + + param.Nullable = prop.Nullable + param.Format = prop.Format + param.Default = prop.Default + param.Example = prop.Example + param.Extensions = prop.Extensions + param.CommonValidations.Maximum = prop.Maximum + param.CommonValidations.Minimum = prop.Minimum + param.CommonValidations.ExclusiveMaximum = prop.ExclusiveMaximum + param.CommonValidations.ExclusiveMinimum = prop.ExclusiveMinimum + param.CommonValidations.MaxLength = prop.MaxLength + param.CommonValidations.MinLength = prop.MinLength + param.CommonValidations.Pattern = prop.Pattern + param.CommonValidations.MaxItems = prop.MaxItems + param.CommonValidations.MinItems = prop.MinItems + param.CommonValidations.UniqueItems = prop.UniqueItems + param.CommonValidations.MultipleOf = prop.MultipleOf + param.CommonValidations.Enum = prop.Enum + operation.Operation.Parameters = append(operation.Operation.Parameters, param) + } + + return nil + } + case "body": + if objectType == PRIMITIVE { + param.Schema = PrimitiveSchema(refType) + } else { + schema, err := operation.parseAPIObjectSchema(commentLine, objectType, refType, astFile) + if err != nil { + return err + } + + param.Schema = schema + } + default: + return fmt.Errorf("%s is not supported paramType", paramType) + } + + err := operation.parseParamAttribute(commentLine, objectType, refType, paramType, ¶m) + + if err != nil { + return err + } + + operation.Operation.Parameters = append(operation.Operation.Parameters, param) + + return nil +} + +const ( + formTag = "form" + jsonTag = "json" + uriTag = "uri" + headerTag = "header" + bindingTag = "binding" + defaultTag = "default" + enumsTag = "enums" + exampleTag = "example" + schemaExampleTag = "schemaExample" + formatTag = "format" + titleTag = "title" + validateTag = "validate" + minimumTag = "minimum" + maximumTag = "maximum" + minLengthTag = "minLength" + maxLengthTag = "maxLength" + multipleOfTag = "multipleOf" + readOnlyTag = "readonly" + extensionsTag = "extensions" + collectionFormatTag = "collectionFormat" + patternTag = "pattern" + oneOfTag = "oneOf" +) + +var regexAttributes = map[string]*regexp.Regexp{ + // for Enums(A, B) + enumsTag: regexp.MustCompile(`(?i)\s+enums\(.*\)`), + // for maximum(0) + maximumTag: regexp.MustCompile(`(?i)\s+maxinum|maximum\(.*\)`), + // for minimum(0) + minimumTag: regexp.MustCompile(`(?i)\s+mininum|minimum\(.*\)`), + // for default(0) + defaultTag: regexp.MustCompile(`(?i)\s+default\(.*\)`), + // for minlength(0) + minLengthTag: regexp.MustCompile(`(?i)\s+minlength\(.*\)`), + // for maxlength(0) + maxLengthTag: regexp.MustCompile(`(?i)\s+maxlength\(.*\)`), + // for format(email) + formatTag: regexp.MustCompile(`(?i)\s+format\(.*\)`), + // for extensions(x-example=test) + extensionsTag: regexp.MustCompile(`(?i)\s+extensions\(.*\)`), + // for collectionFormat(csv) + collectionFormatTag: regexp.MustCompile(`(?i)\s+collectionFormat\(.*\)`), + // example(0) + exampleTag: regexp.MustCompile(`(?i)\s+example\(.*\)`), + // schemaExample(0) + schemaExampleTag: regexp.MustCompile(`(?i)\s+schemaExample\(.*\)`), +} + +func (operation *Operation) parseParamAttribute(comment, objectType, schemaType, paramType string, param *spec.Parameter) error { + schemaType = TransToValidSchemeType(schemaType) + + for attrKey, re := range regexAttributes { + attr, err := findAttr(re, comment) + if err != nil { + continue + } + + switch attrKey { + case enumsTag: + err = setEnumParam(param, attr, objectType, schemaType, paramType) + case minimumTag, maximumTag: + err = setNumberParam(param, attrKey, schemaType, attr, comment) + case defaultTag: + err = setDefault(param, schemaType, attr) + case minLengthTag, maxLengthTag: + err = setStringParam(param, attrKey, schemaType, attr, comment) + case formatTag: + param.Format = attr + case exampleTag: + err = setExample(param, schemaType, attr) + case schemaExampleTag: + err = setSchemaExample(param, schemaType, attr) + case extensionsTag: + param.Extensions = setExtensionParam(attr) + case collectionFormatTag: + err = setCollectionFormatParam(param, attrKey, objectType, attr, comment) + } + + if err != nil { + return err + } + } + + return nil +} + +func findAttr(re *regexp.Regexp, commentLine string) (string, error) { + attr := re.FindString(commentLine) + + l, r := strings.Index(attr, "("), strings.Index(attr, ")") + if l == -1 || r == -1 { + return "", fmt.Errorf("can not find regex=%s, comment=%s", re.String(), commentLine) + } + + return strings.TrimSpace(attr[l+1 : r]), nil +} + +func setStringParam(param *spec.Parameter, name, schemaType, attr, commentLine string) error { + if schemaType != STRING { + return fmt.Errorf("%s is attribute to set to a number. comment=%s got=%s", name, commentLine, schemaType) + } + + n, err := strconv.ParseInt(attr, 10, 64) + if err != nil { + return fmt.Errorf("%s is allow only a number got=%s", name, attr) + } + + switch name { + case minLengthTag: + param.MinLength = &n + case maxLengthTag: + param.MaxLength = &n + } + + return nil +} + +func setNumberParam(param *spec.Parameter, name, schemaType, attr, commentLine string) error { + switch schemaType { + case INTEGER, NUMBER: + n, err := strconv.ParseFloat(attr, 64) + if err != nil { + return fmt.Errorf("maximum is allow only a number. comment=%s got=%s", commentLine, attr) + } + + switch name { + case minimumTag: + param.Minimum = &n + case maximumTag: + param.Maximum = &n + } + + return nil + default: + return fmt.Errorf("%s is attribute to set to a number. comment=%s got=%s", name, commentLine, schemaType) + } +} + +func setEnumParam(param *spec.Parameter, attr, objectType, schemaType, paramType string) error { + for _, e := range strings.Split(attr, ",") { + e = strings.TrimSpace(e) + + value, err := defineType(schemaType, e) + if err != nil { + return err + } + + switch objectType { + case ARRAY: + param.Items.Enum = append(param.Items.Enum, value) + default: + switch paramType { + case "body": + param.Schema.Enum = append(param.Schema.Enum, value) + default: + param.Enum = append(param.Enum, value) + } + } + } + + return nil +} + +func setExtensionParam(attr string) spec.Extensions { + extensions := spec.Extensions{} + + for _, val := range splitNotWrapped(attr, ',') { + parts := strings.SplitN(val, "=", 2) + if len(parts) == 2 { + extensions.Add(parts[0], parts[1]) + + continue + } + + if len(parts[0]) > 0 && string(parts[0][0]) == "!" { + extensions.Add(parts[0][1:], false) + + continue + } + + extensions.Add(parts[0], true) + } + + return extensions +} + +func setCollectionFormatParam(param *spec.Parameter, name, schemaType, attr, commentLine string) error { + if schemaType == ARRAY { + param.CollectionFormat = TransToValidCollectionFormat(attr) + + return nil + } + + return fmt.Errorf("%s is attribute to set to an array. comment=%s got=%s", name, commentLine, schemaType) +} + +func setDefault(param *spec.Parameter, schemaType string, value string) error { + val, err := defineType(schemaType, value) + if err != nil { + return nil // Don't set a default value if it's not valid + } + + param.Default = val + + return nil +} + +func setSchemaExample(param *spec.Parameter, schemaType string, value string) error { + val, err := defineType(schemaType, value) + if err != nil { + return nil // Don't set a example value if it's not valid + } + // skip schema + if param.Schema == nil { + return nil + } + + switch v := val.(type) { + case string: + // replaces \r \n \t in example string values. + param.Schema.Example = strings.NewReplacer(`\r`, "\r", `\n`, "\n", `\t`, "\t").Replace(v) + default: + param.Schema.Example = val + } + + return nil +} + +func setExample(param *spec.Parameter, schemaType string, value string) error { + val, err := defineType(schemaType, value) + if err != nil { + return nil // Don't set a example value if it's not valid + } + + param.Example = val + + return nil +} + +// defineType enum value define the type (object and array unsupported). +func defineType(schemaType string, value string) (v interface{}, err error) { + schemaType = TransToValidSchemeType(schemaType) + + switch schemaType { + case STRING: + return value, nil + case NUMBER: + v, err = strconv.ParseFloat(value, 64) + if err != nil { + return nil, fmt.Errorf("enum value %s can't convert to %s err: %s", value, schemaType, err) + } + case INTEGER: + v, err = strconv.Atoi(value) + if err != nil { + return nil, fmt.Errorf("enum value %s can't convert to %s err: %s", value, schemaType, err) + } + case BOOLEAN: + v, err = strconv.ParseBool(value) + if err != nil { + return nil, fmt.Errorf("enum value %s can't convert to %s err: %s", value, schemaType, err) + } + default: + return nil, fmt.Errorf("%s is unsupported type in enum value %s", schemaType, value) + } + + return v, nil +} + +// ParseTagsComment parses comment for given `tag` comment string. +func (operation *Operation) ParseTagsComment(commentLine string) { + for _, tag := range strings.Split(commentLine, ",") { + operation.Tags = append(operation.Tags, strings.TrimSpace(tag)) + } +} + +// ParseAcceptComment parses comment for given `accept` comment string. +func (operation *Operation) ParseAcceptComment(commentLine string) error { + return parseMimeTypeList(commentLine, &operation.Consumes, "%v accept type can't be accepted") +} + +// ParseProduceComment parses comment for given `produce` comment string. +func (operation *Operation) ParseProduceComment(commentLine string) error { + return parseMimeTypeList(commentLine, &operation.Produces, "%v produce type can't be accepted") +} + +// parseMimeTypeList parses a list of MIME Types for a comment like +// `produce` (`Content-Type:` response header) or +// `accept` (`Accept:` request header). +func parseMimeTypeList(mimeTypeList string, typeList *[]string, format string) error { + for _, typeName := range strings.Split(mimeTypeList, ",") { + if mimeTypePattern.MatchString(typeName) { + *typeList = append(*typeList, typeName) + + continue + } + + aliasMimeType, ok := mimeTypeAliases[typeName] + if !ok { + return fmt.Errorf(format, typeName) + } + + *typeList = append(*typeList, aliasMimeType) + } + + return nil +} + +var routerPattern = regexp.MustCompile(`^(/[\w./\-{}\(\)+:$]*)[[:blank:]]+\[(\w+)]`) + +// ParseRouterComment parses comment for given `router` comment string. +func (operation *Operation) ParseRouterComment(commentLine string, deprecated bool) error { + matches := routerPattern.FindStringSubmatch(commentLine) + if len(matches) != 3 { + return fmt.Errorf("can not parse router comment \"%s\"", commentLine) + } + + signature := RouteProperties{ + Path: matches[1], + HTTPMethod: strings.ToUpper(matches[2]), + Deprecated: deprecated, + } + + if _, ok := allMethod[signature.HTTPMethod]; !ok { + return fmt.Errorf("invalid method: %s", signature.HTTPMethod) + } + + operation.RouterProperties = append(operation.RouterProperties, signature) + + return nil +} + +// ParseSecurityComment parses comment for given `security` comment string. +func (operation *Operation) ParseSecurityComment(commentLine string) error { + if len(commentLine) == 0 { + operation.Security = []map[string][]string{} + return nil + } + + var ( + securityMap = make(map[string][]string) + securitySource = commentLine[strings.Index(commentLine, "@Security")+1:] + ) + + for _, securityOption := range strings.Split(securitySource, "||") { + securityOption = strings.TrimSpace(securityOption) + + left, right := strings.Index(securityOption, "["), strings.Index(securityOption, "]") + + if !(left == -1 && right == -1) { + scopes := securityOption[left+1 : right] + + var options []string + + for _, scope := range strings.Split(scopes, ",") { + options = append(options, strings.TrimSpace(scope)) + } + + securityKey := securityOption[0:left] + securityMap[securityKey] = append(securityMap[securityKey], options...) + } else { + securityKey := strings.TrimSpace(securityOption) + securityMap[securityKey] = []string{} + } + } + + operation.Security = append(operation.Security, securityMap) + + return nil +} + +// findTypeDef attempts to find the *ast.TypeSpec for a specific type given the +// type's name and the package's import path. +// TODO: improve finding external pkg. +func findTypeDef(importPath, typeName string) (*ast.TypeSpec, error) { + cwd, err := os.Getwd() + if err != nil { + return nil, err + } + + conf := loader.Config{ + ParserMode: goparser.SpuriousErrors, + Cwd: cwd, + } + + conf.Import(importPath) + + lprog, err := conf.Load() + if err != nil { + return nil, err + } + + // If the pkg is vendored, the actual pkg path is going to resemble + // something like "{importPath}/vendor/{importPath}" + for k := range lprog.AllPackages { + realPkgPath := k.Path() + + if strings.Contains(realPkgPath, "vendor/"+importPath) { + importPath = realPkgPath + } + } + + pkgInfo := lprog.Package(importPath) + + if pkgInfo == nil { + return nil, fmt.Errorf("package was nil") + } + + // TODO: possibly cache pkgInfo since it's an expensive operation + for i := range pkgInfo.Files { + for _, astDeclaration := range pkgInfo.Files[i].Decls { + generalDeclaration, ok := astDeclaration.(*ast.GenDecl) + if ok && generalDeclaration.Tok == token.TYPE { + for _, astSpec := range generalDeclaration.Specs { + typeSpec, ok := astSpec.(*ast.TypeSpec) + if ok { + if typeSpec.Name.String() == typeName { + return typeSpec, nil + } + } + } + } + } + } + + return nil, fmt.Errorf("type spec not found") +} + +var responsePattern = regexp.MustCompile(`^([\w,]+)\s+([\w{}]+)\s+([\w\-.\\{}=,\[\s\]]+)\s*(".*)?`) + +// ResponseType{data1=Type1,data2=Type2}. +var combinedPattern = regexp.MustCompile(`^([\w\-./\[\]]+){(.*)}$`) + +func (operation *Operation) parseObjectSchema(refType string, astFile *ast.File) (*spec.Schema, error) { + return parseObjectSchema(operation.parser, refType, astFile) +} + +func parseObjectSchema(parser *Parser, refType string, astFile *ast.File) (*spec.Schema, error) { + switch { + case refType == NIL: + return nil, nil + case refType == INTERFACE: + return &spec.Schema{}, nil + case refType == ANY: + return &spec.Schema{}, nil + case IsGolangPrimitiveType(refType): + refType = TransToValidSchemeType(refType) + + return PrimitiveSchema(refType), nil + case IsPrimitiveType(refType): + return PrimitiveSchema(refType), nil + case strings.HasPrefix(refType, "[]"): + schema, err := parseObjectSchema(parser, refType[2:], astFile) + if err != nil { + return nil, err + } + + return spec.ArrayProperty(schema), nil + case strings.HasPrefix(refType, "map["): + // ignore key type + idx := strings.Index(refType, "]") + if idx < 0 { + return nil, fmt.Errorf("invalid type: %s", refType) + } + + refType = refType[idx+1:] + if refType == INTERFACE || refType == ANY { + return spec.MapProperty(nil), nil + } + + schema, err := parseObjectSchema(parser, refType, astFile) + if err != nil { + return nil, err + } + + return spec.MapProperty(schema), nil + case strings.Contains(refType, "{"): + return parseCombinedObjectSchema(parser, refType, astFile) + default: + if parser != nil { // checking refType has existing in 'TypeDefinitions' + schema, err := parser.getTypeSchema(refType, astFile, true) + if err != nil { + return nil, err + } + + return schema, nil + } + + return RefSchema(refType), nil + } +} + +func parseFields(s string) []string { + nestLevel := 0 + + return strings.FieldsFunc(s, func(char rune) bool { + if char == '{' { + nestLevel++ + + return false + } else if char == '}' { + nestLevel-- + + return false + } + + return char == ',' && nestLevel == 0 + }) +} + +func parseCombinedObjectSchema(parser *Parser, refType string, astFile *ast.File) (*spec.Schema, error) { + matches := combinedPattern.FindStringSubmatch(refType) + if len(matches) != 3 { + return nil, fmt.Errorf("invalid type: %s", refType) + } + + schema, err := parseObjectSchema(parser, matches[1], astFile) + if err != nil { + return nil, err + } + + fields, props := parseFields(matches[2]), map[string]spec.Schema{} + + for _, field := range fields { + keyVal := strings.SplitN(field, "=", 2) + if len(keyVal) == 2 { + schema, err := parseObjectSchema(parser, keyVal[1], astFile) + if err != nil { + return nil, err + } + + if schema == nil { + schema = PrimitiveSchema(OBJECT) + } + + props[keyVal[0]] = *schema + } + } + + if len(props) == 0 { + return schema, nil + } + + if schema.Ref.GetURL() == nil && len(schema.Type) > 0 && schema.Type[0] == OBJECT && len(schema.Properties) == 0 && schema.AdditionalProperties == nil { + schema.Properties = props + return schema, nil + } + + return spec.ComposedSchema(*schema, spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{OBJECT}, + Properties: props, + }, + }), nil +} + +func (operation *Operation) parseAPIObjectSchema(commentLine, schemaType, refType string, astFile *ast.File) (*spec.Schema, error) { + if strings.HasSuffix(refType, ",") && strings.Contains(refType, "[") { + // regexp may have broken generic syntax. find closing bracket and add it back + allMatchesLenOffset := strings.Index(commentLine, refType) + len(refType) + lostPartEndIdx := strings.Index(commentLine[allMatchesLenOffset:], "]") + if lostPartEndIdx >= 0 { + refType += commentLine[allMatchesLenOffset : allMatchesLenOffset+lostPartEndIdx+1] + } + } + + switch schemaType { + case OBJECT: + if !strings.HasPrefix(refType, "[]") { + return operation.parseObjectSchema(refType, astFile) + } + + refType = refType[2:] + + fallthrough + case ARRAY: + schema, err := operation.parseObjectSchema(refType, astFile) + if err != nil { + return nil, err + } + + return spec.ArrayProperty(schema), nil + default: + return PrimitiveSchema(schemaType), nil + } +} + +// ParseResponseComment parses comment for given `response` comment string. +func (operation *Operation) ParseResponseComment(commentLine string, astFile *ast.File) error { + matches := responsePattern.FindStringSubmatch(commentLine) + if len(matches) != 5 { + err := operation.ParseEmptyResponseComment(commentLine) + if err != nil { + return operation.ParseEmptyResponseOnly(commentLine) + } + + return err + } + + description := strings.Trim(matches[4], "\"") + + schema, err := operation.parseAPIObjectSchema(commentLine, strings.Trim(matches[2], "{}"), strings.TrimSpace(matches[3]), astFile) + if err != nil { + return err + } + + for _, codeStr := range strings.Split(matches[1], ",") { + if strings.EqualFold(codeStr, defaultTag) { + operation.DefaultResponse().WithSchema(schema).WithDescription(description) + + continue + } + + code, err := strconv.Atoi(codeStr) + if err != nil { + return fmt.Errorf("can not parse response comment \"%s\"", commentLine) + } + + resp := spec.NewResponse().WithSchema(schema).WithDescription(description) + if description == "" { + resp.WithDescription(http.StatusText(code)) + } + + operation.AddResponse(code, resp) + } + + return nil +} + +func newHeaderSpec(schemaType, description string) spec.Header { + return spec.Header{ + SimpleSchema: spec.SimpleSchema{ + Type: schemaType, + }, + HeaderProps: spec.HeaderProps{ + Description: description, + }, + VendorExtensible: spec.VendorExtensible{ + Extensions: nil, + }, + CommonValidations: spec.CommonValidations{ + Maximum: nil, + ExclusiveMaximum: false, + Minimum: nil, + ExclusiveMinimum: false, + MaxLength: nil, + MinLength: nil, + Pattern: "", + MaxItems: nil, + MinItems: nil, + UniqueItems: false, + MultipleOf: nil, + Enum: nil, + }, + } +} + +// ParseResponseHeaderComment parses comment for given `response header` comment string. +func (operation *Operation) ParseResponseHeaderComment(commentLine string, _ *ast.File) error { + matches := responsePattern.FindStringSubmatch(commentLine) + if len(matches) != 5 { + return fmt.Errorf("can not parse response comment \"%s\"", commentLine) + } + + header := newHeaderSpec(strings.Trim(matches[2], "{}"), strings.Trim(matches[4], "\"")) + + headerKey := strings.TrimSpace(matches[3]) + + if strings.EqualFold(matches[1], "all") { + if operation.Responses.Default != nil { + operation.Responses.Default.Headers[headerKey] = header + } + + if operation.Responses.StatusCodeResponses != nil { + for code, response := range operation.Responses.StatusCodeResponses { + response.Headers[headerKey] = header + operation.Responses.StatusCodeResponses[code] = response + } + } + + return nil + } + + for _, codeStr := range strings.Split(matches[1], ",") { + if strings.EqualFold(codeStr, defaultTag) { + if operation.Responses.Default != nil { + operation.Responses.Default.Headers[headerKey] = header + } + + continue + } + + code, err := strconv.Atoi(codeStr) + if err != nil { + return fmt.Errorf("can not parse response comment \"%s\"", commentLine) + } + + if operation.Responses.StatusCodeResponses != nil { + response, responseExist := operation.Responses.StatusCodeResponses[code] + if responseExist { + response.Headers[headerKey] = header + + operation.Responses.StatusCodeResponses[code] = response + } + } + } + + return nil +} + +var emptyResponsePattern = regexp.MustCompile(`([\w,]+)\s+"(.*)"`) + +// ParseEmptyResponseComment parse only comment out status code and description,eg: @Success 200 "it's ok". +func (operation *Operation) ParseEmptyResponseComment(commentLine string) error { + matches := emptyResponsePattern.FindStringSubmatch(commentLine) + if len(matches) != 3 { + return fmt.Errorf("can not parse response comment \"%s\"", commentLine) + } + + description := strings.Trim(matches[2], "\"") + + for _, codeStr := range strings.Split(matches[1], ",") { + if strings.EqualFold(codeStr, defaultTag) { + operation.DefaultResponse().WithDescription(description) + + continue + } + + code, err := strconv.Atoi(codeStr) + if err != nil { + return fmt.Errorf("can not parse response comment \"%s\"", commentLine) + } + + operation.AddResponse(code, spec.NewResponse().WithDescription(description)) + } + + return nil +} + +// ParseEmptyResponseOnly parse only comment out status code ,eg: @Success 200. +func (operation *Operation) ParseEmptyResponseOnly(commentLine string) error { + for _, codeStr := range strings.Split(commentLine, ",") { + if strings.EqualFold(codeStr, defaultTag) { + _ = operation.DefaultResponse() + + continue + } + + code, err := strconv.Atoi(codeStr) + if err != nil { + return fmt.Errorf("can not parse response comment \"%s\"", commentLine) + } + + operation.AddResponse(code, spec.NewResponse().WithDescription(http.StatusText(code))) + } + + return nil +} + +// DefaultResponse return the default response member pointer. +func (operation *Operation) DefaultResponse() *spec.Response { + if operation.Responses.Default == nil { + operation.Responses.Default = &spec.Response{ + ResponseProps: spec.ResponseProps{ + Description: "", + Headers: make(map[string]spec.Header), + }, + } + } + + return operation.Responses.Default +} + +// AddResponse add a response for a code. +func (operation *Operation) AddResponse(code int, response *spec.Response) { + if response.Headers == nil { + response.Headers = make(map[string]spec.Header) + } + + operation.Responses.StatusCodeResponses[code] = *response +} + +// createParameter returns swagger spec.Parameter for given paramType, description, paramName, schemaType, required. +func createParameter(paramType, description, paramName, objectType, schemaType string, required bool, enums []interface{}, collectionFormat string) spec.Parameter { + // //five possible parameter types. query, path, body, header, form + result := spec.Parameter{ + ParamProps: spec.ParamProps{ + Name: paramName, + Description: description, + Required: required, + In: paramType, + }, + } + + if paramType == "body" { + return result + } + + switch objectType { + case ARRAY: + result.Type = objectType + result.CollectionFormat = collectionFormat + result.Items = &spec.Items{ + CommonValidations: spec.CommonValidations{ + Enum: enums, + }, + SimpleSchema: spec.SimpleSchema{ + Type: schemaType, + }, + } + case PRIMITIVE, OBJECT: + result.Type = schemaType + result.Enum = enums + } + return result +} + +func getCodeExampleForSummary(summaryName string, dirPath string) ([]byte, bool, error) { + dirEntries, err := os.ReadDir(dirPath) + if err != nil { + return nil, false, err + } + + for _, entry := range dirEntries { + if entry.IsDir() { + continue + } + + fileName := entry.Name() + + isJSON := strings.Contains(fileName, ".json") + isYaml := strings.Contains(fileName, ".yaml") + if !isJSON && !isYaml { + continue + } + + if strings.Contains(fileName, summaryName) { + fullPath := filepath.Join(dirPath, fileName) + + commentInfo, err := os.ReadFile(fullPath) + if err != nil { + return nil, false, fmt.Errorf("Failed to read code example file %s error: %s ", fullPath, err) + } + + return commentInfo, isJSON, nil + } + } + + return nil, false, fmt.Errorf("unable to find code example file for tag %s in the given directory", summaryName) +} diff --git a/vendor/github.com/swaggo/swag/v2/operationv3.go b/vendor/github.com/swaggo/swag/v2/operationv3.go new file mode 100644 index 00000000..bd65f869 --- /dev/null +++ b/vendor/github.com/swaggo/swag/v2/operationv3.go @@ -0,0 +1,1234 @@ +package swag + +import ( + "encoding/json" + "fmt" + "go/ast" + "log" + "net/http" + "strconv" + "strings" + + "github.com/pkg/errors" + "github.com/sv-tools/openapi/spec" + "gopkg.in/yaml.v2" +) + +// OperationV3 describes a single API operation on a path. +// For more information: https://github.com/swaggo/swag#api-operation +type OperationV3 struct { + parser *Parser + codeExampleFilesDir string + spec.Operation + RouterProperties []RouteProperties + responseMimeTypes []string +} + +// NewOperationV3 returns a new instance of OperationV3. +func NewOperationV3(parser *Parser, options ...func(*OperationV3)) *OperationV3 { + op := *spec.NewOperation().Spec + op.Responses = spec.NewResponses() + + operation := &OperationV3{ + parser: parser, + Operation: op, + } + + for _, option := range options { + option(operation) + } + + return operation +} + +// SetCodeExampleFilesDirectoryV3 sets the directory to search for codeExamples. +func SetCodeExampleFilesDirectoryV3(directoryPath string) func(*OperationV3) { + return func(o *OperationV3) { + o.codeExampleFilesDir = directoryPath + } +} + +// ParseComment parses comment for given comment string and returns error if error occurs. +func (o *OperationV3) ParseComment(comment string, astFile *ast.File) error { + commentLine := strings.TrimSpace(strings.TrimLeft(comment, "/")) + if len(commentLine) == 0 { + return nil + } + + fields := FieldsByAnySpace(commentLine, 2) + attribute := fields[0] + lowerAttribute := strings.ToLower(attribute) + var lineRemainder string + if len(fields) > 1 { + lineRemainder = fields[1] + } + switch lowerAttribute { + case descriptionAttr: + o.ParseDescriptionComment(lineRemainder) + case descriptionMarkdownAttr: + commentInfo, err := getMarkdownForTag(lineRemainder, o.parser.markdownFileDir) + if err != nil { + return err + } + + o.ParseDescriptionComment(string(commentInfo)) + case summaryAttr: + o.Summary = lineRemainder + case idAttr: + o.OperationID = lineRemainder + case tagsAttr: + o.ParseTagsComment(lineRemainder) + case acceptAttr: + return o.ParseAcceptComment(lineRemainder) + case produceAttr: + return o.ParseProduceComment(lineRemainder) + case paramAttr: + return o.ParseParamComment(lineRemainder, astFile) + case successAttr, failureAttr, responseAttr: + return o.ParseResponseComment(lineRemainder, astFile) + case headerAttr: + return o.ParseResponseHeaderComment(lineRemainder, astFile) + case routerAttr: + return o.ParseRouterComment(lineRemainder) + case securityAttr: + return o.ParseSecurityComment(lineRemainder) + case deprecatedAttr: + o.Deprecated = true + case xCodeSamplesAttr, xCodeSamplesAttrOriginal: + return o.ParseCodeSample(attribute, commentLine, lineRemainder) + case "@servers.url": + return o.ParseServerURLComment(lineRemainder) + case "@servers.description": + return o.ParseServerDescriptionComment(lineRemainder) + default: + return o.ParseMetadata(attribute, lowerAttribute, lineRemainder) + } + + return nil +} + +// ParseDescriptionComment parses the description comment and sets it to the operation. +func (o *OperationV3) ParseDescriptionComment(lineRemainder string) { + if o.Description == "" { + o.Description = lineRemainder + + return + } + + o.Description += "\n" + lineRemainder +} + +// ParseMetadata godoc. +func (o *OperationV3) ParseMetadata(attribute, lowerAttribute, lineRemainder string) error { + // parsing specific meta data extensions + if strings.HasPrefix(lowerAttribute, "@x-") { + if len(lineRemainder) == 0 { + return fmt.Errorf("annotation %s need a value", attribute) + } + + var valueJSON any + + err := json.Unmarshal([]byte(lineRemainder), &valueJSON) + if err != nil { + return fmt.Errorf("annotation %s need a valid json value. error: %s", attribute, err.Error()) + } + + o.Responses.Extensions[attribute[1:]] = valueJSON + return nil + } + + return nil +} + +// ParseTagsComment parses comment for given `tag` comment string. +func (o *OperationV3) ParseTagsComment(commentLine string) { + for _, tag := range strings.Split(commentLine, ",") { + o.Tags = append(o.Tags, strings.TrimSpace(tag)) + } +} + +// ParseAcceptComment parses comment for given `accept` comment string. +func (o *OperationV3) ParseAcceptComment(commentLine string) error { + const errMessage = "could not parse accept comment" + + validTypes, err := parseMimeTypeListV3(commentLine, "%v accept type can't be accepted") + if err != nil { + return errors.Wrap(err, errMessage) + } + + if o.RequestBody == nil { + o.RequestBody = spec.NewRequestBodySpec() + } + + if o.RequestBody.Spec.Spec.Content == nil { + o.RequestBody.Spec.Spec.Content = make(map[string]*spec.Extendable[spec.MediaType], len(validTypes)) + } + + for _, value := range validTypes { + // skip correctly setup types like application/json + if o.RequestBody.Spec.Spec.Content[value] != nil { + continue + } + + mediaType := spec.NewMediaType() + schema := spec.NewSchemaSpec() + + switch value { + case "application/json", "multipart/form-data", "text/xml": + schema.Spec.Type = spec.NewSingleOrArray(OBJECT) + case "image/png", + "image/jpeg", + "image/gif", + "application/octet-stream", + "application/pdf", + "application/msexcel", + "application/zip", + "application/vnd.openxmlformats-officedocument.wordprocessingml.document", + "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", + "application/vnd.openxmlformats-officedocument.presentationml.presentation": + schema.Spec.Type = spec.NewSingleOrArray(STRING) + schema.Spec.Format = "binary" + default: + schema.Spec.Type = spec.NewSingleOrArray(STRING) + } + + mediaType.Spec.Schema = schema + o.RequestBody.Spec.Spec.Content[value] = mediaType + } + + return nil +} + +// ParseProduceComment parses comment for given `produce` comment string. +func (o *OperationV3) ParseProduceComment(commentLine string) error { + const errMessage = "could not parse produce comment" + + validTypes, err := parseMimeTypeListV3(commentLine, "%v produce type can't be accepted") + if err != nil { + return errors.Wrap(err, errMessage) + } + + o.responseMimeTypes = validTypes + + return nil +} + +// ProcessProduceComment processes the previously parsed produce comment. +func (o *OperationV3) ProcessProduceComment() error { + const errMessage = "could not process produce comment" + + if o.Responses == nil { + return nil + } + + for _, value := range o.responseMimeTypes { + if o.Responses.Spec.Response == nil { + o.Responses.Spec.Response = make(map[string]*spec.RefOrSpec[spec.Extendable[spec.Response]], len(o.responseMimeTypes)) + } + + for key, response := range o.Responses.Spec.Response { + code, err := strconv.Atoi(key) + if err != nil { + return errors.Wrap(err, errMessage) + } + + // Status 204 is no content. So we do not need to add content. + if code == 204 { + continue + } + + // As this is a workaround, we need to check if the code is in range. + // The Produce comment is being deprecated soon. + if code < 200 || code > 299 { + continue + } + + // skip correctly setup types like application/json + if response.Spec.Spec.Content[value] != nil { + continue + } + + mediaType := spec.NewMediaType() + schema := spec.NewSchemaSpec() + + switch value { + case "application/json", "multipart/form-data", "text/xml": + schema.Spec.Type = spec.NewSingleOrArray(OBJECT) + case "image/png", + "image/jpeg", + "image/gif", + "application/octet-stream", + "application/pdf", + "application/msexcel", + "application/zip", + "application/vnd.openxmlformats-officedocument.wordprocessingml.document", + "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", + "application/vnd.openxmlformats-officedocument.presentationml.presentation": + schema.Spec.Type = spec.NewSingleOrArray(STRING) + schema.Spec.Format = "binary" + default: + schema.Spec.Type = spec.NewSingleOrArray(STRING) + } + + mediaType.Spec.Schema = schema + + if response.Spec.Spec.Content == nil { + response.Spec.Spec.Content = make(map[string]*spec.Extendable[spec.MediaType]) + } + + response.Spec.Spec.Content[value] = mediaType + + } + } + + return nil +} + +// parseMimeTypeList parses a list of MIME Types for a comment like +// `produce` (`Content-Type:` response header) or +// `accept` (`Accept:` request header). +func parseMimeTypeListV3(mimeTypeList string, format string) ([]string, error) { + var result []string + for _, typeName := range strings.Split(mimeTypeList, ",") { + typeName = strings.TrimSpace(typeName) + + if mimeTypePattern.MatchString(typeName) { + result = append(result, typeName) + + continue + } + + aliasMimeType, ok := mimeTypeAliases[typeName] + if !ok { + return nil, fmt.Errorf(format, typeName) + } + + result = append(result, aliasMimeType) + } + + return result, nil +} + +// ParseParamComment parses params return []string of param properties +// E.g. @Param queryText formData string true "The email for login" +// +// [param name] [paramType] [data type] [is mandatory?] [Comment] +// +// E.g. @Param some_id path int true "Some ID". +func (o *OperationV3) ParseParamComment(commentLine string, astFile *ast.File) error { + matches := paramPattern.FindStringSubmatch(commentLine) + if len(matches) != 6 { + return fmt.Errorf("missing required param comment parameters \"%s\"", commentLine) + } + + name := matches[1] + paramType := matches[2] + refType := TransToValidSchemeType(matches[3]) + + // Detect refType + objectType := OBJECT + + if strings.HasPrefix(refType, "[]") { + objectType = ARRAY + refType = strings.TrimPrefix(refType, "[]") + refType = TransToValidSchemeType(refType) + } else if IsPrimitiveType(refType) || + paramType == "formData" && refType == "file" { + objectType = PRIMITIVE + } + + var enums []interface{} + if !IsPrimitiveType(refType) { + schema, _ := o.parser.getTypeSchemaV3(refType, astFile, false) + if schema != nil && schema.Spec != nil && schema.Spec.Enum != nil { + // schema.Spec.Type != ARRAY + fmt.Println(schema.Spec.Type) + + if objectType == OBJECT { + objectType = PRIMITIVE + } + refType = TransToValidSchemeType(schema.Spec.Type[0]) + enums = schema.Spec.Enum + } + } + + requiredText := strings.ToLower(matches[4]) + required := requiredText == "true" || requiredText == requiredLabel + description := matches[5] + + param := createParameterV3(paramType, description, name, objectType, refType, required, enums, o.parser.collectionFormatInQuery) + + switch paramType { + case "path", "header": + switch objectType { + case ARRAY: + if !IsPrimitiveType(refType) { + return fmt.Errorf("%s is not supported array type for %s", refType, paramType) + } + case OBJECT: + return fmt.Errorf("%s is not supported type for %s", refType, paramType) + } + case "query": + switch objectType { + case ARRAY: + if !IsPrimitiveType(refType) && !(refType == "file" && paramType == "formData") { + return fmt.Errorf("%s is not supported array type for %s", refType, paramType) + } + case PRIMITIVE: + break + case OBJECT: + schema, err := o.parser.getTypeSchemaV3(refType, astFile, false) + if err != nil { + return err + } + + if len(schema.Spec.Properties) == 0 { + return nil + } + + for name, item := range schema.Spec.Properties { + prop := item.Spec + if len(prop.Type) == 0 { + continue + } + + itemParam := param // Avoid shadowed variable which could cause side effects to o.Operation.Parameters + + switch { + case prop.Type[0] == ARRAY && + prop.Items.Schema != nil && + len(prop.Items.Schema.Spec.Type) > 0 && + IsSimplePrimitiveType(prop.Items.Schema.Spec.Type[0]): + + itemParam = createParameterV3(paramType, prop.Description, name, prop.Type[0], prop.Items.Schema.Spec.Type[0], findInSlice(schema.Spec.Required, name), enums, o.parser.collectionFormatInQuery) + + case IsSimplePrimitiveType(prop.Type[0]): + itemParam = createParameterV3(paramType, prop.Description, name, PRIMITIVE, prop.Type[0], findInSlice(schema.Spec.Required, name), enums, o.parser.collectionFormatInQuery) + default: + o.parser.debug.Printf("skip field [%s] in %s is not supported type for %s", name, refType, paramType) + + continue + } + + itemParam.Schema.Spec = prop + + listItem := &spec.RefOrSpec[spec.Extendable[spec.Parameter]]{ + Spec: &spec.Extendable[spec.Parameter]{ + Spec: &itemParam, + }, + } + + o.Operation.Parameters = append(o.Operation.Parameters, listItem) + } + + return nil + } + case "body", "formData": + if objectType == PRIMITIVE { + schema := PrimitiveSchemaV3(refType) + + err := o.parseParamAttributeForBody(commentLine, objectType, refType, schema.Spec) + if err != nil { + return err + } + + o.fillRequestBody(schema, required, description, true, paramType == "formData") + + return nil + + } + + schema, err := o.parseAPIObjectSchema(commentLine, objectType, refType, astFile) + if err != nil { + return err + } + + err = o.parseParamAttributeForBody(commentLine, objectType, refType, schema.Spec) + if err != nil { + return err + } + + o.fillRequestBody(schema, required, description, false, paramType == "formData") + + return nil + + default: + return fmt.Errorf("%s is not supported paramType", paramType) + } + + err := o.parseParamAttribute(commentLine, objectType, refType, ¶m) + if err != nil { + return err + } + + item := spec.NewRefOrSpec(nil, &spec.Extendable[spec.Parameter]{ + Spec: ¶m, + }) + + o.Operation.Parameters = append(o.Operation.Parameters, item) + + return nil +} + +func (o *OperationV3) fillRequestBody(schema *spec.RefOrSpec[spec.Schema], required bool, description string, primitive, formData bool) { + if o.RequestBody == nil { + o.RequestBody = spec.NewRequestBodySpec() + o.RequestBody.Spec.Spec.Content = make(map[string]*spec.Extendable[spec.MediaType]) + + if primitive && !formData { + o.RequestBody.Spec.Spec.Content["text/plain"] = spec.NewMediaType() + } else if formData { + o.RequestBody.Spec.Spec.Content["application/x-www-form-urlencoded"] = spec.NewMediaType() + } else { + o.RequestBody.Spec.Spec.Content["application/json"] = spec.NewMediaType() + } + } + + o.RequestBody.Spec.Spec.Description = description + o.RequestBody.Spec.Spec.Required = required + + for _, value := range o.RequestBody.Spec.Spec.Content { + value.Spec.Schema = schema + } +} + +func (o *OperationV3) parseParamAttribute(comment, objectType, schemaType string, param *spec.Parameter) error { + if param == nil { + return fmt.Errorf("cannot parse empty parameter for comment: %s", comment) + } + + schemaType = TransToValidSchemeType(schemaType) + + for attrKey, re := range regexAttributes { + attr, err := findAttr(re, comment) + if err != nil { + continue + } + + switch attrKey { + case enumsTag: + err = setEnumParamV3(param.Schema.Spec, attr, objectType, schemaType) + case minimumTag, maximumTag: + err = setNumberParamV3(param.Schema.Spec, attrKey, schemaType, attr, comment) + case defaultTag: + err = setDefaultV3(param.Schema.Spec, schemaType, attr) + case minLengthTag, maxLengthTag: + err = setStringParamV3(param.Schema.Spec, attrKey, schemaType, attr, comment) + case formatTag: + param.Schema.Spec.Format = attr + case exampleTag: + val, err := defineType(schemaType, attr) + if err != nil { + continue // Don't set a example value if it's not valid + } + + param.Example = val + case schemaExampleTag: + err = setSchemaExampleV3(param.Schema.Spec, schemaType, attr) + case extensionsTag: + param.Schema.Spec.Extensions = setExtensionParam(attr) + case collectionFormatTag: + err = setCollectionFormatParamV3(param, attrKey, objectType, attr, comment) + } + + if err != nil { + return err + } + } + + return nil +} + +func (o *OperationV3) parseParamAttributeForBody(comment, objectType, schemaType string, param *spec.Schema) error { + schemaType = TransToValidSchemeType(schemaType) + + for attrKey, re := range regexAttributes { + attr, err := findAttr(re, comment) + if err != nil { + continue + } + + switch attrKey { + case enumsTag: + err = setEnumParamV3(param, attr, objectType, schemaType) + case minimumTag, maximumTag: + err = setNumberParamV3(param, attrKey, schemaType, attr, comment) + case defaultTag: + err = setDefaultV3(param, schemaType, attr) + case minLengthTag, maxLengthTag: + err = setStringParamV3(param, attrKey, schemaType, attr, comment) + case formatTag: + param.Format = attr + case exampleTag: + err = setSchemaExampleV3(param, schemaType, attr) + case schemaExampleTag: + err = setSchemaExampleV3(param, schemaType, attr) + case extensionsTag: + param.Extensions = setExtensionParam(attr) + } + + if err != nil { + return err + } + } + + return nil +} + +func setCollectionFormatParamV3(param *spec.Parameter, name, schemaType, attr, commentLine string) error { + if schemaType == ARRAY { + param.Style = TransToValidCollectionFormatV3(attr, param.In) + return nil + } + + return fmt.Errorf("%s is attribute to set to an array. comment=%s got=%s", name, commentLine, schemaType) +} + +func setSchemaExampleV3(param *spec.Schema, schemaType string, value string) error { + val, err := defineType(schemaType, value) + if err != nil { + return nil // Don't set a example value if it's not valid + } + + // skip schema + if param == nil { + return nil + } + + switch v := val.(type) { + case string: + // replaces \r \n \t in example string values. + param.Example = strings.NewReplacer(`\r`, "\r", `\n`, "\n", `\t`, "\t").Replace(v) + default: + param.Example = val + } + + return nil +} + +func setExampleParameterV3(param *spec.Parameter, schemaType string, value string) error { + val, err := defineType(schemaType, value) + if err != nil { + return nil // Don't set a example value if it's not valid + } + + param.Example = val + + return nil +} + +func setStringParamV3(param *spec.Schema, name, schemaType, attr, commentLine string) error { + if schemaType != STRING { + return fmt.Errorf("%s is attribute to set to a number. comment=%s got=%s", name, commentLine, schemaType) + } + + n, err := strconv.Atoi(attr) + if err != nil { + return fmt.Errorf("%s is allow only a number got=%s", name, attr) + } + + switch name { + case minLengthTag: + param.MinLength = &n + case maxLengthTag: + param.MaxLength = &n + } + + return nil +} + +func setDefaultV3(param *spec.Schema, schemaType string, value string) error { + val, err := defineType(schemaType, value) + if err != nil { + return nil // Don't set a default value if it's not valid + } + + param.Default = val + + return nil +} + +func setEnumParamV3(param *spec.Schema, attr, objectType, schemaType string) error { + for _, e := range strings.Split(attr, ",") { + e = strings.TrimSpace(e) + + value, err := defineType(schemaType, e) + if err != nil { + return err + } + + switch objectType { + case ARRAY: + param.Items.Schema.Spec.Enum = append(param.Items.Schema.Spec.Enum, value) + default: + param.Enum = append(param.Enum, value) + } + } + + return nil +} + +func setNumberParamV3(param *spec.Schema, name, schemaType, attr, commentLine string) error { + switch schemaType { + case INTEGER, NUMBER: + n, err := strconv.Atoi(attr) + if err != nil { + return fmt.Errorf("maximum is allow only a number. comment=%s got=%s", commentLine, attr) + } + + switch name { + case minimumTag: + param.Minimum = &n + case maximumTag: + param.Maximum = &n + } + + return nil + default: + return fmt.Errorf("%s is attribute to set to a number. comment=%s got=%s", name, commentLine, schemaType) + } +} + +func (o *OperationV3) parseAPIObjectSchema(commentLine, schemaType, refType string, astFile *ast.File) (*spec.RefOrSpec[spec.Schema], error) { + if strings.HasSuffix(refType, ",") && strings.Contains(refType, "[") { + // regexp may have broken generic syntax. find closing bracket and add it back + allMatchesLenOffset := strings.Index(commentLine, refType) + len(refType) + lostPartEndIdx := strings.Index(commentLine[allMatchesLenOffset:], "]") + if lostPartEndIdx >= 0 { + refType += commentLine[allMatchesLenOffset : allMatchesLenOffset+lostPartEndIdx+1] + } + } + + switch schemaType { + case OBJECT: + if !strings.HasPrefix(refType, "[]") { + return o.parseObjectSchema(refType, astFile) + } + + refType = refType[2:] + + fallthrough + case ARRAY: + schema, err := o.parseObjectSchema(refType, astFile) + if err != nil { + return nil, err + } + + result := spec.NewSchemaSpec() + result.Spec.Type = spec.NewSingleOrArray("array") + result.Spec.Items = spec.NewBoolOrSchema(false, schema) // TODO: allowed? + return result, nil + + default: + return PrimitiveSchemaV3(schemaType), nil + } +} + +// ParseRouterComment parses comment for given `router` comment string. +func (o *OperationV3) ParseRouterComment(commentLine string) error { + matches := routerPattern.FindStringSubmatch(commentLine) + if len(matches) != 3 { + return fmt.Errorf("can not parse router comment \"%s\"", commentLine) + } + + signature := RouteProperties{ + Path: matches[1], + HTTPMethod: strings.ToUpper(matches[2]), + } + + if _, ok := allMethod[signature.HTTPMethod]; !ok { + return fmt.Errorf("invalid method: %s", signature.HTTPMethod) + } + + o.RouterProperties = append(o.RouterProperties, signature) + + return nil +} + +func (o *OperationV3) ParseServerURLComment(commentLine string) error { + server := spec.NewServer() + server.Spec.URL = commentLine + o.Servers = append(o.Servers, server) + return nil +} + +func (o *OperationV3) ParseServerDescriptionComment(commentLine string) error { + lastAddedServer := o.Servers[len(o.Servers)-1] + lastAddedServer.Spec.Description = commentLine + return nil +} + +// createParameter returns swagger spec.Parameter for given paramType, description, paramName, schemaType, required. +func createParameterV3(in, description, paramName, objectType, schemaType string, required bool, enums []interface{}, collectionFormat string) spec.Parameter { + // //five possible parameter types. query, path, body, header, form + result := spec.Parameter{ + Description: description, + Required: required, + Name: paramName, + In: in, + Schema: spec.NewRefOrSpec(nil, &spec.Schema{}), + } + + if in == "body" { + return result + } + + switch objectType { + case ARRAY: + result.Schema.Spec.Type = spec.NewSingleOrArray(objectType) + result.Schema.Spec.Items = spec.NewBoolOrSchema(false, spec.NewSchemaSpec()) + result.Schema.Spec.Items.Schema.Spec.Type = spec.NewSingleOrArray(schemaType) + result.Schema.Spec.Enum = enums + case PRIMITIVE, OBJECT: + result.Schema.Spec.Type = spec.NewSingleOrArray(schemaType) + result.Schema.Spec.Enum = enums + } + + return result +} + +func (o *OperationV3) parseObjectSchema(refType string, astFile *ast.File) (*spec.RefOrSpec[spec.Schema], error) { + return parseObjectSchemaV3(o.parser, refType, astFile) +} + +func parseObjectSchemaV3(parser *Parser, refType string, astFile *ast.File) (*spec.RefOrSpec[spec.Schema], error) { + switch { + case refType == NIL: + return nil, nil + case refType == INTERFACE: + return PrimitiveSchemaV3(OBJECT), nil + case refType == ANY: + return PrimitiveSchemaV3(OBJECT), nil + case IsGolangPrimitiveType(refType): + refType = TransToValidSchemeType(refType) + + return PrimitiveSchemaV3(refType), nil + case IsPrimitiveType(refType): + return PrimitiveSchemaV3(refType), nil + case strings.HasPrefix(refType, "[]"): + schema, err := parseObjectSchemaV3(parser, refType[2:], astFile) + if err != nil { + return nil, err + } + + result := spec.NewSchemaSpec() + result.Spec.Type = spec.NewSingleOrArray("array") + result.Spec.Items = spec.NewBoolOrSchema(false, schema) + + return result, nil + case strings.HasPrefix(refType, "map["): + // ignore key type + idx := strings.Index(refType, "]") + if idx < 0 { + return nil, fmt.Errorf("invalid type: %s", refType) + } + + refType = refType[idx+1:] + if refType == INTERFACE || refType == ANY { + schema := &spec.Schema{} + schema.AdditionalProperties = spec.NewBoolOrSchema(false, spec.NewSchemaSpec()) + schema.Type = spec.NewSingleOrArray(OBJECT) + refOrSpec := spec.NewRefOrSpec(nil, schema) + return refOrSpec, nil + } + + schema, err := parseObjectSchemaV3(parser, refType, astFile) + if err != nil { + return nil, err + } + + result := &spec.Schema{} + result.AdditionalProperties = spec.NewBoolOrSchema(false, schema) + result.Type = spec.NewSingleOrArray(OBJECT) + refOrSpec := spec.NewSchemaSpec() + refOrSpec.Spec = result + + return refOrSpec, nil + case strings.Contains(refType, "{"): + return parseCombinedObjectSchemaV3(parser, refType, astFile) + default: + if parser != nil { // checking refType has existing in 'TypeDefinitions' + schema, err := parser.getTypeSchemaV3(refType, astFile, true) + if err != nil { + return nil, err + } + + return schema, nil + } + + return spec.NewSchemaRef(spec.NewRef("#/components/schemas/" + refType)), nil + } +} + +// ParseResponseHeaderComment parses comment for given `response header` comment string. +func (o *OperationV3) ParseResponseHeaderComment(commentLine string, _ *ast.File) error { + matches := responsePattern.FindStringSubmatch(commentLine) + if len(matches) != 5 { + return fmt.Errorf("can not parse response comment \"%s\"", commentLine) + } + + header := newHeaderSpecV3(strings.Trim(matches[2], "{}"), strings.Trim(matches[4], "\"")) + + headerKey := strings.TrimSpace(matches[3]) + + if strings.EqualFold(matches[1], "all") { + if o.Responses.Spec.Default != nil { + o.Responses.Spec.Default.Spec.Spec.Headers[headerKey] = header + } + + if o.Responses.Spec.Response != nil { + for _, v := range o.Responses.Spec.Response { + v.Spec.Spec.Headers[headerKey] = header + + } + } + + return nil + } + + for _, codeStr := range strings.Split(matches[1], ",") { + if strings.EqualFold(codeStr, defaultTag) { + if o.Responses.Spec.Default != nil { + o.Responses.Spec.Default.Spec.Spec.Headers[headerKey] = header + } + + continue + } + + _, err := strconv.Atoi(codeStr) + if err != nil { + return fmt.Errorf("can not parse response comment \"%s\"", commentLine) + } + + // TODO check condition + if o.Responses != nil && o.Responses.Spec != nil && o.Responses.Spec.Response != nil { + response, responseExist := o.Responses.Spec.Response[codeStr] + if responseExist { + response.Spec.Spec.Headers[headerKey] = header + o.Responses.Spec.Response[codeStr] = response + } + } + } + + return nil +} + +func newHeaderSpecV3(schemaType, description string) *spec.RefOrSpec[spec.Extendable[spec.Header]] { + result := spec.NewHeaderSpec() + result.Spec.Spec.Description = description + result.Spec.Spec.Schema = spec.NewSchemaSpec() + result.Spec.Spec.Schema.Spec.Type = spec.NewSingleOrArray(schemaType) + + return result +} + +// ParseResponseComment parses comment for given `response` comment string. +func (o *OperationV3) ParseResponseComment(commentLine string, astFile *ast.File) error { + matches := responsePattern.FindStringSubmatch(commentLine) + if len(matches) != 5 { + err := o.ParseEmptyResponseComment(commentLine) + if err != nil { + return o.ParseEmptyResponseOnly(commentLine) + } + + return err + } + + description := strings.Trim(matches[4], "\"") + + schema, err := o.parseAPIObjectSchema(commentLine, strings.Trim(matches[2], "{}"), strings.TrimSpace(matches[3]), astFile) + if err != nil { + return err + } + + for _, codeStr := range strings.Split(matches[1], ",") { + if strings.EqualFold(codeStr, defaultTag) { + codeStr = "" + } else { + code, err := strconv.Atoi(codeStr) + if err != nil { + return fmt.Errorf("can not parse response comment \"%s\"", commentLine) + } + if description == "" { + description = http.StatusText(code) + } + } + + response := spec.NewResponseSpec() + response.Spec.Spec.Description = description + + mimeType := "application/json" // TODO: set correct mimeType + setResponseSchema(response.Spec.Spec, mimeType, schema) + + o.AddResponse(codeStr, response) + } + + return nil +} + +// setResponseSchema sets response schema for given response. +func setResponseSchema(response *spec.Response, mimeType string, schema *spec.RefOrSpec[spec.Schema]) { + mediaType := spec.NewMediaType() + mediaType.Spec.Schema = schema + + if response.Content == nil { + response.Content = make(map[string]*spec.Extendable[spec.MediaType]) + } + + response.Content[mimeType] = mediaType +} + +// ParseEmptyResponseComment parse only comment out status code and description,eg: @Success 200 "it's ok". +func (o *OperationV3) ParseEmptyResponseComment(commentLine string) error { + matches := emptyResponsePattern.FindStringSubmatch(commentLine) + if len(matches) != 3 { + return fmt.Errorf("can not parse response comment \"%s\"", commentLine) + } + + description := strings.Trim(matches[2], "\"") + + for _, codeStr := range strings.Split(matches[1], ",") { + if strings.EqualFold(codeStr, defaultTag) { + codeStr = "" + } else { + _, err := strconv.Atoi(codeStr) + if err != nil { + return fmt.Errorf("can not parse response comment \"%s\"", commentLine) + } + } + + o.AddResponse(codeStr, newResponseWithDescription(description)) + } + + return nil +} + +// AddResponse add a response for a code. +// If the code is already exist, it will merge with the old one: +// 1. The description will be replaced by the new one if the new one is not empty. +// 2. The content schema will be merged using `oneOf` if the new one is not empty. +func (o *OperationV3) AddResponse(code string, response *spec.RefOrSpec[spec.Extendable[spec.Response]]) { + if response.Spec.Spec.Headers == nil { + response.Spec.Spec.Headers = make(map[string]*spec.RefOrSpec[spec.Extendable[spec.Header]]) + } + + if o.Responses.Spec.Response == nil { + o.Responses.Spec.Response = make(map[string]*spec.RefOrSpec[spec.Extendable[spec.Response]]) + } + + res := response + var prev *spec.RefOrSpec[spec.Extendable[spec.Response]] + if code != "" { + prev = o.Responses.Spec.Response[code] + } else { + prev = o.Responses.Spec.Default + } + if prev != nil { // merge into prev + res = prev + if response.Spec.Spec.Description != "" { + prev.Spec.Spec.Description = response.Spec.Spec.Description + } + if len(response.Spec.Spec.Content) > 0 { + // responses should only have one content type + singleKey := "" + for k := range response.Spec.Spec.Content { + singleKey = k + break + } + if prevMediaType := prev.Spec.Spec.Content[singleKey]; prevMediaType == nil { + prev.Spec.Spec.Content = response.Spec.Spec.Content + } else { + newMediaType := response.Spec.Spec.Content[singleKey] + if len(newMediaType.Extensions) > 0 { + if prevMediaType.Extensions == nil { + prevMediaType.Extensions = make(map[string]interface{}) + } + for k, v := range newMediaType.Extensions { + prevMediaType.Extensions[k] = v + } + } + if len(newMediaType.Spec.Examples) > 0 { + if prevMediaType.Spec.Examples == nil { + prevMediaType.Spec.Examples = make(map[string]*spec.RefOrSpec[spec.Extendable[spec.Example]]) + } + for k, v := range newMediaType.Spec.Examples { + prevMediaType.Spec.Examples[k] = v + } + } + if prevSchema := prevMediaType.Spec.Schema; prevSchema.Ref != nil || prevSchema.Spec.OneOf == nil { + oneOfSchema := spec.NewSchemaSpec() + oneOfSchema.Spec.OneOf = []*spec.RefOrSpec[spec.Schema]{prevSchema, newMediaType.Spec.Schema} + prevMediaType.Spec.Schema = oneOfSchema + } else { + prevSchema.Spec.OneOf = append(prevSchema.Spec.OneOf, newMediaType.Spec.Schema) + } + } + } + } + + if code != "" { + o.Responses.Spec.Response[code] = res + } else { + o.Responses.Spec.Default = res + } +} + +// ParseEmptyResponseOnly parse only comment out status code ,eg: @Success 200. +func (o *OperationV3) ParseEmptyResponseOnly(commentLine string) error { + for _, codeStr := range strings.Split(commentLine, ",") { + var description string + if strings.EqualFold(codeStr, defaultTag) { + codeStr = "" + } else { + code, err := strconv.Atoi(codeStr) + if err != nil { + return fmt.Errorf("can not parse response comment \"%s\"", commentLine) + } + description = http.StatusText(code) + } + + o.AddResponse(codeStr, newResponseWithDescription(description)) + } + + return nil +} + +func newResponseWithDescription(description string) *spec.RefOrSpec[spec.Extendable[spec.Response]] { + response := spec.NewResponseSpec() + response.Spec.Spec.Description = description + return response +} + +func parseCombinedObjectSchemaV3(parser *Parser, refType string, astFile *ast.File) (*spec.RefOrSpec[spec.Schema], error) { + matches := combinedPattern.FindStringSubmatch(refType) + if len(matches) != 3 { + return nil, fmt.Errorf("invalid type: %s", refType) + } + + schema, err := parseObjectSchemaV3(parser, matches[1], astFile) + if err != nil { + return nil, err + } + + fields, props := parseFields(matches[2]), map[string]*spec.RefOrSpec[spec.Schema]{} + + for _, field := range fields { + keyVal := strings.SplitN(field, "=", 2) + if len(keyVal) != 2 { + continue + } + + schema, err := parseObjectSchemaV3(parser, keyVal[1], astFile) + if err != nil { + return nil, err + } + + props[keyVal[0]] = schema + } + + if len(props) == 0 { + return schema, nil + } + + if schema.Ref == nil && + len(schema.Spec.Type) > 0 && + schema.Spec.Type[0] == OBJECT && + len(schema.Spec.Properties) == 0 && + schema.Spec.AdditionalProperties == nil { + schema.Spec.Properties = props + return schema, nil + } + + schemaRefPath := strings.Replace(schema.Ref.Ref, "#/components/schemas/", "", 1) + schemaSpec := parser.openAPI.Components.Spec.Schemas[schemaRefPath] + schemaSpec.Spec.JsonSchemaComposition.AllOf = make([]*spec.RefOrSpec[spec.Schema], len(props)) + + i := 0 + for name, prop := range props { + wrapperSpec := spec.NewSchemaSpec() + wrapperSpec.Spec = &spec.Schema{} + wrapperSpec.Spec.Type = spec.NewSingleOrArray(OBJECT) + wrapperSpec.Spec.Properties = map[string]*spec.RefOrSpec[spec.Schema]{ + name: prop, + } + + parser.openAPI.Components.Spec.Schemas[name] = wrapperSpec + + ref := spec.NewRefOrSpec[spec.Schema](spec.NewRef("#/components/schemas/"+name), nil) + + schemaSpec.Spec.JsonSchemaComposition.AllOf[i] = ref + i++ + } + + return schemaSpec, nil +} + +// ParseSecurityComment parses comment for given `security` comment string. +func (o *OperationV3) ParseSecurityComment(commentLine string) error { + var ( + securityMap = make(map[string][]string) + securitySource = commentLine[strings.Index(commentLine, "@Security")+1:] + ) + + for _, securityOption := range strings.Split(securitySource, "||") { + securityOption = strings.TrimSpace(securityOption) + + left, right := strings.Index(securityOption, "["), strings.Index(securityOption, "]") + + if !(left == -1 && right == -1) { + scopes := securityOption[left+1 : right] + + var options []string + + for _, scope := range strings.Split(scopes, ",") { + options = append(options, strings.TrimSpace(scope)) + } + + securityKey := securityOption[0:left] + securityMap[securityKey] = append(securityMap[securityKey], options...) + } else { + securityKey := strings.TrimSpace(securityOption) + securityMap[securityKey] = []string{} + } + } + + o.Security = append(o.Security, securityMap) + + return nil +} + +// ParseCodeSample godoc. +func (o *OperationV3) ParseCodeSample(attribute, _, lineRemainder string) error { + log.Println("line remainder:", lineRemainder) + + if lineRemainder == "file" { + log.Println("line remainder is file") + + data, isJSON, err := getCodeExampleForSummary(o.Summary, o.codeExampleFilesDir) + if err != nil { + return err + } + + // using custom type, as json marshaller has problems with []map[interface{}]map[interface{}]interface{} + var valueJSON CodeSamples + + if isJSON { + err = json.Unmarshal(data, &valueJSON) + if err != nil { + return fmt.Errorf("annotation %s need a valid json value. error: %s", attribute, err.Error()) + } + } else { + err = yaml.Unmarshal(data, &valueJSON) + if err != nil { + return fmt.Errorf("annotation %s need a valid yaml value. error: %s", attribute, err.Error()) + } + } + + o.Responses.Extensions[attribute[1:]] = valueJSON + + return nil + } + + // Fallback into existing logic + return o.ParseMetadata(attribute, strings.ToLower(attribute), lineRemainder) +} diff --git a/vendor/github.com/swaggo/swag/v2/package.go b/vendor/github.com/swaggo/swag/v2/package.go new file mode 100644 index 00000000..487da300 --- /dev/null +++ b/vendor/github.com/swaggo/swag/v2/package.go @@ -0,0 +1,197 @@ +package swag + +import ( + "go/ast" + "go/token" + "reflect" + "strconv" + "strings" +) + +// PackageDefinitions files and definition in a package. +type PackageDefinitions struct { + // files in this package, map key is file's relative path starting package path + Files map[string]*ast.File + + // definitions in this package, map key is typeName + TypeDefinitions map[string]*TypeSpecDef + + // const variables in this package, map key is the name + ConstTable map[string]*ConstVariable + + // const variables in order in this package + OrderedConst []*ConstVariable + + // package name + Name string + + // package path + Path string +} + +// ConstVariableGlobalEvaluator an interface used to evaluate enums across packages +type ConstVariableGlobalEvaluator interface { + EvaluateConstValue(pkg *PackageDefinitions, cv *ConstVariable, recursiveStack map[string]struct{}) (interface{}, ast.Expr) + EvaluateConstValueByName(file *ast.File, pkgPath, constVariableName string, recursiveStack map[string]struct{}) (interface{}, ast.Expr) + FindTypeSpec(typeName string, file *ast.File) *TypeSpecDef +} + +// NewPackageDefinitions new a PackageDefinitions object +func NewPackageDefinitions(name, pkgPath string) *PackageDefinitions { + return &PackageDefinitions{ + Name: name, + Path: pkgPath, + Files: make(map[string]*ast.File), + TypeDefinitions: make(map[string]*TypeSpecDef), + ConstTable: make(map[string]*ConstVariable), + } +} + +// AddFile add a file +func (pkg *PackageDefinitions) AddFile(pkgPath string, file *ast.File) *PackageDefinitions { + pkg.Files[pkgPath] = file + return pkg +} + +// AddTypeSpec add a type spec. +func (pkg *PackageDefinitions) AddTypeSpec(name string, typeSpec *TypeSpecDef) *PackageDefinitions { + pkg.TypeDefinitions[name] = typeSpec + return pkg +} + +// AddConst add a const variable. +func (pkg *PackageDefinitions) AddConst(astFile *ast.File, valueSpec *ast.ValueSpec) *PackageDefinitions { + for i := 0; i < len(valueSpec.Names) && i < len(valueSpec.Values); i++ { + variable := &ConstVariable{ + Name: valueSpec.Names[i], + Type: valueSpec.Type, + Value: valueSpec.Values[i], + Comment: valueSpec.Comment, + File: astFile, + } + pkg.ConstTable[valueSpec.Names[i].Name] = variable + pkg.OrderedConst = append(pkg.OrderedConst, variable) + } + return pkg +} + +func (pkg *PackageDefinitions) evaluateConstValue(file *ast.File, iota int, expr ast.Expr, globalEvaluator ConstVariableGlobalEvaluator, recursiveStack map[string]struct{}) (interface{}, ast.Expr) { + switch valueExpr := expr.(type) { + case *ast.Ident: + if valueExpr.Name == "iota" { + return iota, nil + } + if pkg.ConstTable != nil { + if cv, ok := pkg.ConstTable[valueExpr.Name]; ok { + return globalEvaluator.EvaluateConstValue(pkg, cv, recursiveStack) + } + } + case *ast.SelectorExpr: + pkgIdent, ok := valueExpr.X.(*ast.Ident) + if !ok { + return nil, nil + } + return globalEvaluator.EvaluateConstValueByName(file, pkgIdent.Name, valueExpr.Sel.Name, recursiveStack) + case *ast.BasicLit: + switch valueExpr.Kind { + case token.INT: + // handle underscored number, such as 1_000_000 + if strings.ContainsRune(valueExpr.Value, '_') { + valueExpr.Value = strings.Replace(valueExpr.Value, "_", "", -1) + } + // hexadecimal + if len(valueExpr.Value) > 2 && valueExpr.Value[0] == '0' && valueExpr.Value[1] == 'x' { + if x, err := strconv.ParseInt(valueExpr.Value[2:], 16, 64); err == nil { + return int(x), nil + } else if x, err := strconv.ParseUint(valueExpr.Value[2:], 16, 64); err == nil { + return x, nil + } else { + panic(err) + } + } + if len(valueExpr.Value) >= 2 && valueExpr.Value[0] == '0' { + var start, base = 2, 8 + switch valueExpr.Value[1] { + case 'x', 'X': + //hex + base = 16 + case 'b', 'B': + //binary + base = 2 + default: + //octet + start = 1 + } + if x, err := strconv.ParseInt(valueExpr.Value[start:], base, 64); err == nil { + return int(x), nil + } else if x, err := strconv.ParseUint(valueExpr.Value[start:], base, 64); err == nil { + return x, nil + } else { + panic(err) + } + } + + //a basic literal integer is int type in default, or must have an explicit converting type in front + if x, err := strconv.ParseInt(valueExpr.Value, 10, 64); err == nil { + return int(x), nil + } else if x, err := strconv.ParseUint(valueExpr.Value, 10, 64); err == nil { + return x, nil + } else { + panic(err) + } + case token.STRING: + if valueExpr.Value[0] == '`' { + return valueExpr.Value[1 : len(valueExpr.Value)-1], nil + } + return EvaluateEscapedString(valueExpr.Value[1 : len(valueExpr.Value)-1]), nil + case token.CHAR: + return EvaluateEscapedChar(valueExpr.Value[1 : len(valueExpr.Value)-1]), nil + } + case *ast.UnaryExpr: + x, evalType := pkg.evaluateConstValue(file, iota, valueExpr.X, globalEvaluator, recursiveStack) + if x == nil { + return x, evalType + } + return EvaluateUnary(x, valueExpr.Op, evalType) + case *ast.BinaryExpr: + x, evalTypex := pkg.evaluateConstValue(file, iota, valueExpr.X, globalEvaluator, recursiveStack) + y, evalTypey := pkg.evaluateConstValue(file, iota, valueExpr.Y, globalEvaluator, recursiveStack) + if x == nil || y == nil { + return nil, nil + } + return EvaluateBinary(x, y, valueExpr.Op, evalTypex, evalTypey) + case *ast.ParenExpr: + return pkg.evaluateConstValue(file, iota, valueExpr.X, globalEvaluator, recursiveStack) + case *ast.CallExpr: + //data conversion + if len(valueExpr.Args) != 1 { + return nil, nil + } + arg := valueExpr.Args[0] + if ident, ok := valueExpr.Fun.(*ast.Ident); ok { + name := ident.Name + if name == "uintptr" { + name = "uint" + } + value, _ := pkg.evaluateConstValue(file, iota, arg, globalEvaluator, recursiveStack) + if IsGolangPrimitiveType(name) { + value = EvaluateDataConversion(value, name) + return value, nil + } else if name == "len" { + return reflect.ValueOf(value).Len(), nil + } + typeDef := globalEvaluator.FindTypeSpec(name, file) + if typeDef == nil { + return nil, nil + } + return value, valueExpr.Fun + } else if selector, ok := valueExpr.Fun.(*ast.SelectorExpr); ok { + typeDef := globalEvaluator.FindTypeSpec(fullTypeName(selector.X.(*ast.Ident).Name, selector.Sel.Name), file) + if typeDef == nil { + return nil, nil + } + return arg, typeDef.TypeSpec.Type + } + } + return nil, nil +} diff --git a/vendor/github.com/swaggo/swag/v2/packages.go b/vendor/github.com/swaggo/swag/v2/packages.go new file mode 100644 index 00000000..2f157b00 --- /dev/null +++ b/vendor/github.com/swaggo/swag/v2/packages.go @@ -0,0 +1,652 @@ +package swag + +import ( + "fmt" + "go/ast" + goparser "go/parser" + "go/token" + "os" + "path/filepath" + "runtime" + "sort" + "strings" + + "golang.org/x/tools/go/loader" +) + +// PackagesDefinitions map[package import path]*PackageDefinitions. +type PackagesDefinitions struct { + files map[*ast.File]*AstFileInfo + packages map[string]*PackageDefinitions + uniqueDefinitions map[string]*TypeSpecDef + parseDependency ParseFlag + debug Debugger +} + +// NewPackagesDefinitions create object PackagesDefinitions. +func NewPackagesDefinitions() *PackagesDefinitions { + return &PackagesDefinitions{ + files: make(map[*ast.File]*AstFileInfo), + packages: make(map[string]*PackageDefinitions), + uniqueDefinitions: make(map[string]*TypeSpecDef), + } +} + +// ParseFile parse a source file. +func (pkgDefs *PackagesDefinitions) ParseFile(packageDir, path string, src interface{}, flag ParseFlag) error { + // positions are relative to FileSet + fileSet := token.NewFileSet() + astFile, err := goparser.ParseFile(fileSet, path, src, goparser.ParseComments) + if err != nil { + return fmt.Errorf("failed to parse file %s, error:%+v", path, err) + } + return pkgDefs.collectAstFile(fileSet, packageDir, path, astFile, flag) +} + +// collectAstFile collect ast.file. +func (pkgDefs *PackagesDefinitions) collectAstFile(fileSet *token.FileSet, packageDir, path string, astFile *ast.File, flag ParseFlag) error { + if pkgDefs.files == nil { + pkgDefs.files = make(map[*ast.File]*AstFileInfo) + } + + if pkgDefs.packages == nil { + pkgDefs.packages = make(map[string]*PackageDefinitions) + } + + // return without storing the file if we lack a packageDir + if packageDir == "" { + return nil + } + + path, err := filepath.Abs(path) + if err != nil { + return err + } + + dependency, ok := pkgDefs.packages[packageDir] + if ok { + // return without storing the file if it already exists + _, exists := dependency.Files[path] + if exists { + return nil + } + + dependency.Files[path] = astFile + } else { + pkgDefs.packages[packageDir] = NewPackageDefinitions(astFile.Name.Name, packageDir).AddFile(path, astFile) + } + + pkgDefs.files[astFile] = &AstFileInfo{ + FileSet: fileSet, + File: astFile, + Path: path, + PackagePath: packageDir, + ParseFlag: flag, + } + + return nil +} + +// RangeFiles for range the collection of ast.File in alphabetic order. +func (pkgDefs *PackagesDefinitions) RangeFiles(handle func(info *AstFileInfo) error) error { + sortedFiles := make([]*AstFileInfo, 0, len(pkgDefs.files)) + for _, info := range pkgDefs.files { + // ignore package path prefix with 'vendor' or $GOROOT, + // because the router info of api will not be included these files. + if strings.HasPrefix(info.PackagePath, "vendor") || (runtime.GOROOT() != "" && strings.HasPrefix(info.Path, runtime.GOROOT()+string(filepath.Separator))) { + continue + } + sortedFiles = append(sortedFiles, info) + } + + sort.Slice(sortedFiles, func(i, j int) bool { + return strings.Compare(sortedFiles[i].Path, sortedFiles[j].Path) < 0 + }) + + for _, info := range sortedFiles { + err := handle(info) + if err != nil { + return err + } + } + + return nil +} + +// ParseTypes parse types +// @Return parsed definitions. +func (pkgDefs *PackagesDefinitions) ParseTypes() (map[*TypeSpecDef]*Schema, error) { + parsedSchemas := make(map[*TypeSpecDef]*Schema) + for astFile, info := range pkgDefs.files { + pkgDefs.parseTypesFromFile(astFile, info.PackagePath, parsedSchemas) + pkgDefs.parseFunctionScopedTypesFromFile(astFile, info.PackagePath, parsedSchemas) + } + pkgDefs.removeAllNotUniqueTypes() + pkgDefs.evaluateAllConstVariables() + pkgDefs.collectConstEnums(parsedSchemas) + return parsedSchemas, nil +} + +func (pkgDefs *PackagesDefinitions) parseTypesFromFile(astFile *ast.File, packagePath string, parsedSchemas map[*TypeSpecDef]*Schema) { + for _, astDeclaration := range astFile.Decls { + generalDeclaration, ok := astDeclaration.(*ast.GenDecl) + if !ok { + continue + } + if generalDeclaration.Tok == token.TYPE { + for _, astSpec := range generalDeclaration.Specs { + if typeSpec, ok := astSpec.(*ast.TypeSpec); ok { + typeSpecDef := &TypeSpecDef{ + PkgPath: packagePath, + File: astFile, + TypeSpec: typeSpec, + } + + if idt, ok := typeSpec.Type.(*ast.Ident); ok && IsGolangPrimitiveType(idt.Name) && parsedSchemas != nil { + parsedSchemas[typeSpecDef] = &Schema{ + PkgPath: typeSpecDef.PkgPath, + Name: astFile.Name.Name, + Schema: PrimitiveSchema(TransToValidSchemeType(idt.Name)), + } + } + + if pkgDefs.uniqueDefinitions == nil { + pkgDefs.uniqueDefinitions = make(map[string]*TypeSpecDef) + } + + fullName := typeSpecDef.TypeName() + + anotherTypeDef, ok := pkgDefs.uniqueDefinitions[fullName] + if ok { + if anotherTypeDef == nil { + typeSpecDef.NotUnique = true + fullName = typeSpecDef.TypeName() + pkgDefs.uniqueDefinitions[fullName] = typeSpecDef + } else if typeSpecDef.PkgPath != anotherTypeDef.PkgPath { + pkgDefs.uniqueDefinitions[fullName] = nil + anotherTypeDef.NotUnique = true + pkgDefs.uniqueDefinitions[anotherTypeDef.TypeName()] = anotherTypeDef + anotherTypeDef.SetSchemaName() + + typeSpecDef.NotUnique = true + fullName = typeSpecDef.TypeName() + pkgDefs.uniqueDefinitions[fullName] = typeSpecDef + } + } else { + pkgDefs.uniqueDefinitions[fullName] = typeSpecDef + } + + typeSpecDef.SetSchemaName() + + if pkgDefs.packages[typeSpecDef.PkgPath] == nil { + pkgDefs.packages[typeSpecDef.PkgPath] = NewPackageDefinitions(astFile.Name.Name, typeSpecDef.PkgPath).AddTypeSpec(typeSpecDef.Name(), typeSpecDef) + } else if _, ok = pkgDefs.packages[typeSpecDef.PkgPath].TypeDefinitions[typeSpecDef.Name()]; !ok { + pkgDefs.packages[typeSpecDef.PkgPath].AddTypeSpec(typeSpecDef.Name(), typeSpecDef) + } + } + } + } else if generalDeclaration.Tok == token.CONST { + // collect consts + pkgDefs.collectConstVariables(astFile, packagePath, generalDeclaration) + } + } +} + +func (pkgDefs *PackagesDefinitions) parseFunctionScopedTypesFromFile(astFile *ast.File, packagePath string, parsedSchemas map[*TypeSpecDef]*Schema) { + for _, astDeclaration := range astFile.Decls { + funcDeclaration, ok := astDeclaration.(*ast.FuncDecl) + if ok && funcDeclaration.Body != nil { + functionScopedTypes := make(map[string]*TypeSpecDef) + for _, stmt := range funcDeclaration.Body.List { + if declStmt, ok := (stmt).(*ast.DeclStmt); ok { + if genDecl, ok := (declStmt.Decl).(*ast.GenDecl); ok && genDecl.Tok == token.TYPE { + for _, astSpec := range genDecl.Specs { + if typeSpec, ok := astSpec.(*ast.TypeSpec); ok { + typeSpecDef := &TypeSpecDef{ + PkgPath: packagePath, + File: astFile, + TypeSpec: typeSpec, + ParentSpec: astDeclaration, + } + + if idt, ok := typeSpec.Type.(*ast.Ident); ok && IsGolangPrimitiveType(idt.Name) && parsedSchemas != nil { + parsedSchemas[typeSpecDef] = &Schema{ + PkgPath: typeSpecDef.PkgPath, + Name: astFile.Name.Name, + Schema: PrimitiveSchema(TransToValidSchemeType(idt.Name)), + } + } + + fullName := typeSpecDef.TypeName() + if structType, ok := typeSpecDef.TypeSpec.Type.(*ast.StructType); ok { + for _, field := range structType.Fields.List { + var idt *ast.Ident + var ok bool + switch field.Type.(type) { + case *ast.Ident: + idt, ok = field.Type.(*ast.Ident) + case *ast.StarExpr: + idt, ok = field.Type.(*ast.StarExpr).X.(*ast.Ident) + case *ast.ArrayType: + idt, ok = field.Type.(*ast.ArrayType).Elt.(*ast.Ident) + } + if ok && !IsGolangPrimitiveType(idt.Name) { + if functype, ok := functionScopedTypes[idt.Name]; ok { + idt.Name = functype.TypeName() + } + } + } + } + + if pkgDefs.uniqueDefinitions == nil { + pkgDefs.uniqueDefinitions = make(map[string]*TypeSpecDef) + } + + anotherTypeDef, ok := pkgDefs.uniqueDefinitions[fullName] + if ok { + if anotherTypeDef == nil { + typeSpecDef.NotUnique = true + fullName = typeSpecDef.TypeName() + pkgDefs.uniqueDefinitions[fullName] = typeSpecDef + } else if typeSpecDef.PkgPath != anotherTypeDef.PkgPath { + pkgDefs.uniqueDefinitions[fullName] = nil + anotherTypeDef.NotUnique = true + pkgDefs.uniqueDefinitions[anotherTypeDef.TypeName()] = anotherTypeDef + anotherTypeDef.SetSchemaName() + + typeSpecDef.NotUnique = true + fullName = typeSpecDef.TypeName() + pkgDefs.uniqueDefinitions[fullName] = typeSpecDef + } + } else { + pkgDefs.uniqueDefinitions[fullName] = typeSpecDef + functionScopedTypes[typeSpec.Name.Name] = typeSpecDef + } + + typeSpecDef.SetSchemaName() + + if pkgDefs.packages[typeSpecDef.PkgPath] == nil { + pkgDefs.packages[typeSpecDef.PkgPath] = NewPackageDefinitions(astFile.Name.Name, typeSpecDef.PkgPath).AddTypeSpec(fullName, typeSpecDef) + } else if _, ok = pkgDefs.packages[typeSpecDef.PkgPath].TypeDefinitions[fullName]; !ok { + pkgDefs.packages[typeSpecDef.PkgPath].AddTypeSpec(fullName, typeSpecDef) + } + } + } + + } + + } + } + } + } +} + +func (pkgDefs *PackagesDefinitions) collectConstVariables(astFile *ast.File, packagePath string, generalDeclaration *ast.GenDecl) { + pkg, ok := pkgDefs.packages[packagePath] + if !ok { + pkg = NewPackageDefinitions(astFile.Name.Name, packagePath) + pkgDefs.packages[packagePath] = pkg + } + + var lastValueSpec *ast.ValueSpec + for _, astSpec := range generalDeclaration.Specs { + valueSpec, ok := astSpec.(*ast.ValueSpec) + if !ok { + continue + } + if len(valueSpec.Names) == 1 && len(valueSpec.Values) == 1 { + lastValueSpec = valueSpec + } else if len(valueSpec.Names) == 1 && len(valueSpec.Values) == 0 && valueSpec.Type == nil && lastValueSpec != nil { + valueSpec.Type = lastValueSpec.Type + valueSpec.Values = lastValueSpec.Values + } + pkg.AddConst(astFile, valueSpec) + } +} + +func (pkgDefs *PackagesDefinitions) evaluateAllConstVariables() { + for _, pkg := range pkgDefs.packages { + for _, constVar := range pkg.OrderedConst { + pkgDefs.EvaluateConstValue(pkg, constVar, nil) + } + } +} + +// EvaluateConstValue evaluate a const variable. +func (pkgDefs *PackagesDefinitions) EvaluateConstValue(pkg *PackageDefinitions, cv *ConstVariable, recursiveStack map[string]struct{}) (interface{}, ast.Expr) { + if expr, ok := cv.Value.(ast.Expr); ok { + defer func() { + if err := recover(); err != nil { + if fi, ok := pkgDefs.files[cv.File]; ok { + pos := fi.FileSet.Position(cv.Name.NamePos) + pkgDefs.debug.Printf("warning: failed to evaluate const %s at %s:%d:%d, %v", cv.Name.Name, fi.Path, pos.Line, pos.Column, err) + } + } + }() + if recursiveStack == nil { + recursiveStack = make(map[string]struct{}) + } + fullConstName := fullTypeName(pkg.Path, cv.Name.Name) + if _, ok = recursiveStack[fullConstName]; ok { + return nil, nil + } + recursiveStack[fullConstName] = struct{}{} + + value, evalType := pkg.evaluateConstValue(cv.File, cv.Name.Obj.Data.(int), expr, pkgDefs, recursiveStack) + if cv.Type == nil && evalType != nil { + cv.Type = evalType + } + if value != nil { + cv.Value = value + } + return value, cv.Type + } + return cv.Value, cv.Type +} + +// EvaluateConstValueByName evaluate a const variable by name. +func (pkgDefs *PackagesDefinitions) EvaluateConstValueByName(file *ast.File, pkgName, constVariableName string, recursiveStack map[string]struct{}) (interface{}, ast.Expr) { + matchedPkgPaths, externalPkgPaths := pkgDefs.findPackagePathFromImports(pkgName, file) + for _, pkgPath := range matchedPkgPaths { + if pkg, ok := pkgDefs.packages[pkgPath]; ok { + if cv, ok := pkg.ConstTable[constVariableName]; ok { + return pkgDefs.EvaluateConstValue(pkg, cv, recursiveStack) + } + } + } + if pkgDefs.parseDependency > 0 { + for _, pkgPath := range externalPkgPaths { + if err := pkgDefs.loadExternalPackage(pkgPath); err == nil { + if pkg, ok := pkgDefs.packages[pkgPath]; ok { + if cv, ok := pkg.ConstTable[constVariableName]; ok { + return pkgDefs.EvaluateConstValue(pkg, cv, recursiveStack) + } + } + } + } + } + return nil, nil +} + +func (pkgDefs *PackagesDefinitions) collectConstEnums(parsedSchemas map[*TypeSpecDef]*Schema) { + for _, pkg := range pkgDefs.packages { + for _, constVar := range pkg.OrderedConst { + if constVar.Type == nil { + continue + } + ident, ok := constVar.Type.(*ast.Ident) + if !ok || IsGolangPrimitiveType(ident.Name) { + continue + } + typeDef, ok := pkg.TypeDefinitions[ident.Name] + if !ok { + continue + } + + // delete it from parsed schemas, and will parse it again + if _, ok = parsedSchemas[typeDef]; ok { + delete(parsedSchemas, typeDef) + } + + if typeDef.Enums == nil { + typeDef.Enums = make([]EnumValue, 0) + } + + name := constVar.Name.Name + if _, ok = constVar.Value.(ast.Expr); ok { + continue + } + + enumValue := EnumValue{ + key: name, + Value: constVar.Value, + } + if constVar.Comment != nil && len(constVar.Comment.List) > 0 { + enumValue.Comment = constVar.Comment.List[0].Text + enumValue.Comment = strings.TrimPrefix(enumValue.Comment, "//") + enumValue.Comment = strings.TrimPrefix(enumValue.Comment, "/*") + enumValue.Comment = strings.TrimSuffix(enumValue.Comment, "*/") + enumValue.Comment = strings.TrimSpace(enumValue.Comment) + } + typeDef.Enums = append(typeDef.Enums, enumValue) + } + } +} + +func (pkgDefs *PackagesDefinitions) removeAllNotUniqueTypes() { + for key, ud := range pkgDefs.uniqueDefinitions { + if ud == nil { + delete(pkgDefs.uniqueDefinitions, key) + } + } +} + +func (pkgDefs *PackagesDefinitions) findTypeSpec(pkgPath string, typeName string) *TypeSpecDef { + if pkgDefs.packages == nil { + return nil + } + + pd, found := pkgDefs.packages[pkgPath] + if found { + typeSpec, ok := pd.TypeDefinitions[typeName] + if ok { + return typeSpec + } + } + + return nil +} + +func (pkgDefs *PackagesDefinitions) loadExternalPackage(importPath string) error { + cwd, err := os.Getwd() + if err != nil { + return err + } + + conf := loader.Config{ + ParserMode: goparser.ParseComments, + Cwd: cwd, + } + + conf.Import(importPath) + + loaderProgram, err := conf.Load() + if err != nil { + return err + } + + for _, info := range loaderProgram.AllPackages { + pkgPath := strings.TrimPrefix(info.Pkg.Path(), "vendor/") + for _, astFile := range info.Files { + pkgDefs.parseTypesFromFile(astFile, pkgPath, nil) + } + } + + return nil +} + +// findPackagePathFromImports finds out the package path of a package via ranging imports of an ast.File +// @pkg the name of the target package +// @file current ast.File in which to search imports +// @return the package paths of a package of @pkg. +func (pkgDefs *PackagesDefinitions) findPackagePathFromImports(pkg string, file *ast.File) (matchedPkgPaths, externalPkgPaths []string) { + if file == nil { + return + } + + if strings.ContainsRune(pkg, '.') { + pkg = strings.Split(pkg, ".")[0] + } + + matchLastPathPart := func(pkgPath string) bool { + paths := strings.Split(pkgPath, "/") + return paths[len(paths)-1] == pkg + } + + // prior to match named package + for _, imp := range file.Imports { + path := strings.Trim(imp.Path.Value, `"`) + if imp.Name != nil { + if imp.Name.Name == pkg { + // if name match, break loop and return + _, ok := pkgDefs.packages[path] + if ok { + matchedPkgPaths = []string{path} + externalPkgPaths = nil + } else { + externalPkgPaths = []string{path} + matchedPkgPaths = nil + } + break + } else if imp.Name.Name == "_" && len(pkg) > 0 { + // for unused types + pd, ok := pkgDefs.packages[path] + if ok { + if pd.Name == pkg { + matchedPkgPaths = append(matchedPkgPaths, path) + } + } else if matchLastPathPart(path) { + externalPkgPaths = append(externalPkgPaths, path) + } + } else if imp.Name.Name == "." && len(pkg) == 0 { + _, ok := pkgDefs.packages[path] + if ok { + matchedPkgPaths = append(matchedPkgPaths, path) + } else if len(pkg) == 0 || matchLastPathPart(path) { + externalPkgPaths = append(externalPkgPaths, path) + } + } + } else if pkgDefs.packages != nil && len(pkg) > 0 { + pd, ok := pkgDefs.packages[path] + if ok { + if pd.Name == pkg { + matchedPkgPaths = append(matchedPkgPaths, path) + } + } else if matchLastPathPart(path) { + externalPkgPaths = append(externalPkgPaths, path) + } + } + } + + if len(pkg) == 0 || file.Name.Name == pkg { + matchedPkgPaths = append(matchedPkgPaths, pkgDefs.files[file].PackagePath) + } + + return +} + +func (pkgDefs *PackagesDefinitions) findTypeSpecFromPackagePaths(matchedPkgPaths, externalPkgPaths []string, name string) (typeDef *TypeSpecDef) { + if pkgDefs.parseDependency > 0 { + for _, pkgPath := range externalPkgPaths { + if err := pkgDefs.loadExternalPackage(pkgPath); err == nil { + typeDef = pkgDefs.findTypeSpec(pkgPath, name) + if typeDef != nil { + return typeDef + } + } + } + } + + for _, pkgPath := range matchedPkgPaths { + typeDef = pkgDefs.findTypeSpec(pkgPath, name) + if typeDef != nil { + return typeDef + } + } + + return typeDef +} + +// FindTypeSpec finds out TypeSpecDef of a type by typeName +// @typeName the name of the target type, if it starts with a package name, find its own package path from imports on top of @file +// @file the ast.file in which @typeName is used +// @pkgPath the package path of @file. +func (pkgDefs *PackagesDefinitions) FindTypeSpec(typeName string, file *ast.File) *TypeSpecDef { + if IsGolangPrimitiveType(typeName) { + return nil + } + + if file == nil { // for test + return pkgDefs.uniqueDefinitions[typeName] + } + + parts := strings.Split(strings.Split(typeName, "[")[0], ".") + if len(parts) > 1 { + pkgPaths, externalPkgPaths := pkgDefs.findPackagePathFromImports(parts[0], file) + if len(externalPkgPaths) == 0 || pkgDefs.parseDependency == ParseNone { + typeDef, ok := pkgDefs.uniqueDefinitions[typeName] + if ok { + return typeDef + } + } + typeDef := pkgDefs.findTypeSpecFromPackagePaths(pkgPaths, externalPkgPaths, parts[1]) + /* + TODO : remove + if len(pkgPaths) == 0 && len(externalPkgPaths) == 0 { + pkgDefinition := pkgDefs.packages["pkg/"+parts[0]] + if pkgDefinition == nil { + return pkgDefs.findTypeSpec("", parts[1]) + } + + typeDef = pkgDefinition.TypeDefinitions[parts[1]] + } else { + typeDef = pkgDefs.findTypeSpecFromPackagePaths(pkgPaths, externalPkgPaths, parts[1]) + } + + */ + return pkgDefs.parametrizeGenericType(file, typeDef, typeName) + } + + typeDef, ok := pkgDefs.uniqueDefinitions[fullTypeName(file.Name.Name, typeName)] + if ok { + return typeDef + } + + name := parts[0] + typeDef, ok = pkgDefs.uniqueDefinitions[fullTypeName(file.Name.Name, name)] + if !ok { + pkgPaths, externalPkgPaths := pkgDefs.findPackagePathFromImports("", file) + + if len(pkgPaths) == 0 { + pkgDefinition := pkgDefs.packages["pkg/"+parts[0]] + if pkgDefinition == nil { + return pkgDefs.findTypeSpec("", parts[1]) + } + + typeDef = pkgDefinition.TypeDefinitions[parts[0]] + } else { + typeDef = pkgDefs.findTypeSpecFromPackagePaths(pkgPaths, externalPkgPaths, name) + } + } + + if typeDef != nil { + return pkgDefs.parametrizeGenericType(file, typeDef, typeName) + } + + // in case that comment //@name renamed the type with a name without a dot + for k, v := range pkgDefs.uniqueDefinitions { + if v == nil { + pkgDefs.debug.Printf("%s TypeSpecDef is nil", k) + continue + } + if v.SchemaName == typeName { + return v + } + } + + return nil +} + +func isAliasPkgName(file *ast.File, pkgName string) bool { + if file == nil && file.Imports == nil { + return false + } + + for _, pkg := range file.Imports { + if pkg.Name != nil && pkg.Name.Name == pkgName { + return true + } + } + + return false +} diff --git a/vendor/github.com/swaggo/swag/v2/parser.go b/vendor/github.com/swaggo/swag/v2/parser.go new file mode 100644 index 00000000..7a71f102 --- /dev/null +++ b/vendor/github.com/swaggo/swag/v2/parser.go @@ -0,0 +1,2049 @@ +package swag + +import ( + "context" + "encoding/json" + "fmt" + "go/ast" + "go/build" + goparser "go/parser" + "go/token" + "log" + "net/http" + "os" + "os/exec" + "path/filepath" + "reflect" + "sort" + "strconv" + "strings" + + "github.com/pkg/errors" + + "github.com/KyleBanks/depth" + "github.com/go-openapi/spec" + openapi "github.com/sv-tools/openapi/spec" +) + +const ( + // CamelCase indicates using CamelCase strategy for struct field. + CamelCase = "camelcase" + + // PascalCase indicates using PascalCase strategy for struct field. + PascalCase = "pascalcase" + + // SnakeCase indicates using SnakeCase strategy for struct field. + SnakeCase = "snakecase" + + idAttr = "@id" + acceptAttr = "@accept" + produceAttr = "@produce" + paramAttr = "@param" + successAttr = "@success" + failureAttr = "@failure" + responseAttr = "@response" + headerAttr = "@header" + tagsAttr = "@tags" + routerAttr = "@router" + deprecatedRouterAttr = "@deprecatedrouter" + + summaryAttr = "@summary" + deprecatedAttr = "@deprecated" + securityAttr = "@security" + titleAttr = "@title" + conNameAttr = "@contact.name" + conURLAttr = "@contact.url" + conEmailAttr = "@contact.email" + licNameAttr = "@license.name" + licURLAttr = "@license.url" + versionAttr = "@version" + descriptionAttr = "@description" + descriptionMarkdownAttr = "@description.markdown" + secBasicAttr = "@securitydefinitions.basic" + secAPIKeyAttr = "@securitydefinitions.apikey" + secBearerAuthAttr = "@securitydefinitions.bearerauth" + secApplicationAttr = "@securitydefinitions.oauth2.application" + secImplicitAttr = "@securitydefinitions.oauth2.implicit" + secPasswordAttr = "@securitydefinitions.oauth2.password" + secAccessCodeAttr = "@securitydefinitions.oauth2.accesscode" + tosAttr = "@termsofservice" + extDocsDescAttr = "@externaldocs.description" + extDocsURLAttr = "@externaldocs.url" + xCodeSamplesAttr = "@x-codesamples" + xCodeSamplesAttrOriginal = "@x-codeSamples" + scopeAttrPrefix = "@scope." + stateAttr = "@state" +) + +// ParseFlag determine what to parse +type ParseFlag int + +const ( + // ParseNone parse nothing + ParseNone ParseFlag = 0x00 + // ParseModels parse models + ParseModels = 0x01 + // ParseOperations parse operations + ParseOperations = 0x02 + // ParseAll parse operations and models + ParseAll = ParseOperations | ParseModels +) + +var ( + // ErrRecursiveParseStruct recursively parsing struct. + ErrRecursiveParseStruct = errors.New("recursively parsing struct") + + // ErrFuncTypeField field type is func. + ErrFuncTypeField = errors.New("field type is func") + + // ErrFailedConvertPrimitiveType Failed to convert for swag to interpretable type. + ErrFailedConvertPrimitiveType = errors.New("swag property: failed convert primitive type") + + // ErrSkippedField .swaggo specifies field should be skipped. + ErrSkippedField = errors.New("field is skipped by global overrides") +) + +var allMethod = map[string]struct{}{ + http.MethodGet: {}, + http.MethodPut: {}, + http.MethodPost: {}, + http.MethodDelete: {}, + http.MethodOptions: {}, + http.MethodHead: {}, + http.MethodPatch: {}, +} + +// Parser implements a parser for Go source files. +type Parser struct { + // swagger represents the root document object for the API specification + swagger *spec.Swagger + + // openAPI represents the v3.1 root document object for the API specification + openAPI *openapi.OpenAPI + + // packages store entities of APIs, definitions, file, package path etc. and their relations + packages *PackagesDefinitions + + // parsedSchemas store schemas which have been parsed from ast.TypeSpec + parsedSchemas map[*TypeSpecDef]*Schema + + // parsedSchemasV3 store schemas which have been parsed from ast.TypeSpec + parsedSchemasV3 map[*TypeSpecDef]*SchemaV3 + + // outputSchemas store schemas which will be export to swagger + outputSchemas map[*TypeSpecDef]*Schema + + // outputSchemas store schemas which will be export to swagger + outputSchemasV3 map[*TypeSpecDef]*SchemaV3 + + // PropNamingStrategy naming strategy + PropNamingStrategy string + + // ParseVendor parse vendor folder + ParseVendor bool + + // ParseDependencies whether swag should be parse outside dependency folder: 0 none, 1 models, 2 operations, 3 all + ParseDependency ParseFlag + + // ParseInternal whether swag should parse internal packages + ParseInternal bool + + // Strict whether swag should error or warn when it detects cases which are most likely user errors + Strict bool + + // RequiredByDefault set validation required for all fields by default + RequiredByDefault bool + + // structStack stores full names of the structures that were already parsed or are being parsed now + structStack []*TypeSpecDef + + // markdownFileDir holds the path to the folder, where markdown files are stored + markdownFileDir string + + // codeExampleFilesDir holds path to the folder, where code example files are stored + codeExampleFilesDir string + + // collectionFormatInQuery set the default collectionFormat otherwise then 'csv' for array in query params + collectionFormatInQuery string + + // excludes excludes dirs and files in SearchDir + excludes map[string]struct{} + + // packagePrefix is a list of package path prefixes, packages that do not + // match any one of them will be excluded when searching. + packagePrefix []string + + // tells parser to include only specific extension + parseExtension string + + // debugging output goes here + debug Debugger + + // fieldParserFactory create FieldParser + fieldParserFactory FieldParserFactory + + // fieldParserFactoryV3 create FieldParser + fieldParserFactoryV3 FieldParserFactoryV3 + + // Overrides allows global replacements of types. A blank replacement will be skipped. + Overrides map[string]string + + // parseGoList whether swag use go list to parse dependency + parseGoList bool + + // tags to filter the APIs after + tags map[string]struct{} + + // HostState is the state of the host + HostState string + + // ParseFuncBody whether swag should parse api info inside of funcs + ParseFuncBody bool + + // use new openAPI version + openAPIVersion bool +} + +// FieldParserFactory create FieldParser. +type FieldParserFactory func(ps *Parser, field *ast.Field) FieldParser + +// FieldParser parse struct field. +type FieldParser interface { + ShouldSkip() bool + FieldNames() ([]string, error) + FormName() string + HeaderName() string + PathName() string + CustomSchema() (*spec.Schema, error) + ComplementSchema(schema *spec.Schema) error + IsRequired() (bool, error) +} + +// Debugger is the interface that wraps the basic Printf method. +type Debugger interface { + Printf(format string, v ...interface{}) +} + +// New creates a new Parser with default properties. +func New(options ...func(*Parser)) *Parser { + parser := &Parser{ + swagger: &spec.Swagger{ + SwaggerProps: spec.SwaggerProps{ + Info: &spec.Info{ + InfoProps: spec.InfoProps{ + Contact: &spec.ContactInfo{}, + License: nil, + }, + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{}, + }, + }, + Paths: &spec.Paths{ + Paths: make(map[string]spec.PathItem), + VendorExtensible: spec.VendorExtensible{ + Extensions: nil, + }, + }, + Definitions: make(map[string]spec.Schema), + SecurityDefinitions: make(map[string]*spec.SecurityScheme), + }, + VendorExtensible: spec.VendorExtensible{ + Extensions: make(spec.Extensions), + }, + }, + openAPI: &openapi.OpenAPI{ + Info: openapi.NewInfo(), + OpenAPI: "3.1.0", + Components: openapi.NewComponents(), + ExternalDocs: openapi.NewExternalDocs(), + Paths: openapi.NewPaths(), + WebHooks: map[string]*openapi.RefOrSpec[openapi.Extendable[openapi.PathItem]]{}, + Security: []openapi.SecurityRequirement{}, + Tags: []*openapi.Extendable[openapi.Tag]{}, + Servers: []*openapi.Extendable[openapi.Server]{}, + }, + packages: NewPackagesDefinitions(), + debug: log.New(os.Stdout, "", log.LstdFlags), + parsedSchemas: make(map[*TypeSpecDef]*Schema), + parsedSchemasV3: make(map[*TypeSpecDef]*SchemaV3), + outputSchemas: make(map[*TypeSpecDef]*Schema), + outputSchemasV3: make(map[*TypeSpecDef]*SchemaV3), + excludes: make(map[string]struct{}), + tags: make(map[string]struct{}), + fieldParserFactory: newTagBaseFieldParser, + fieldParserFactoryV3: newTagBaseFieldParserV3, + Overrides: make(map[string]string), + } + + for _, option := range options { + option(parser) + } + + parser.packages.debug = parser.debug + + return parser +} + +// SetParseDependency sets whether to parse the dependent packages. +func SetParseDependency(parseDependency int) func(*Parser) { + return func(p *Parser) { + p.ParseDependency = ParseFlag(parseDependency) + if p.packages != nil { + p.packages.parseDependency = p.ParseDependency + } + } +} + +// SetMarkdownFileDirectory sets the directory to search for markdown files. +func SetMarkdownFileDirectory(directoryPath string) func(*Parser) { + return func(p *Parser) { + p.markdownFileDir = directoryPath + } +} + +// SetCodeExamplesDirectory sets the directory to search for code example files. +func SetCodeExamplesDirectory(directoryPath string) func(*Parser) { + return func(p *Parser) { + p.codeExampleFilesDir = directoryPath + } +} + +// SetExcludedDirsAndFiles sets directories and files to be excluded when searching. +func SetExcludedDirsAndFiles(excludes string) func(*Parser) { + return func(p *Parser) { + for _, f := range strings.Split(excludes, ",") { + f = strings.TrimSpace(f) + if f != "" { + f = filepath.Clean(f) + p.excludes[f] = struct{}{} + } + } + } +} + +// SetPackagePrefix sets a list of package path prefixes from a comma-separated +// string, packages that do not match any one of them will be excluded when +// searching. +func SetPackagePrefix(packagePrefix string) func(*Parser) { + return func(p *Parser) { + for _, f := range strings.Split(packagePrefix, ",") { + f = strings.TrimSpace(f) + if f != "" { + p.packagePrefix = append(p.packagePrefix, f) + } + } + } +} + +// SetTags sets the tags to be included +func SetTags(include string) func(*Parser) { + return func(p *Parser) { + for _, f := range strings.Split(include, ",") { + f = strings.TrimSpace(f) + if f != "" { + p.tags[f] = struct{}{} + } + } + } +} + +// SetParseExtension parses only those operations which match given extension +func SetParseExtension(parseExtension string) func(*Parser) { + return func(p *Parser) { + p.parseExtension = parseExtension + } +} + +// SetStrict sets whether swag should error or warn when it detects cases which are most likely user errors. +func SetStrict(strict bool) func(*Parser) { + return func(p *Parser) { + p.Strict = strict + } +} + +// SetDebugger allows the use of user-defined implementations. +func SetDebugger(logger Debugger) func(parser *Parser) { + return func(p *Parser) { + if logger != nil { + p.debug = logger + } + } +} + +// SetFieldParserFactory allows the use of user-defined implementations. +func SetFieldParserFactory(factory FieldParserFactory) func(parser *Parser) { + return func(p *Parser) { + p.fieldParserFactory = factory + } +} + +// SetOverrides allows the use of user-defined global type overrides. +func SetOverrides(overrides map[string]string) func(parser *Parser) { + return func(p *Parser) { + for k, v := range overrides { + p.Overrides[k] = v + } + } +} + +// SetCollectionFormat set default collection format +func SetCollectionFormat(collectionFormat string) func(*Parser) { + return func(p *Parser) { + p.collectionFormatInQuery = collectionFormat + } +} + +// ParseUsingGoList sets whether swag use go list to parse dependency +func ParseUsingGoList(enabled bool) func(parser *Parser) { + return func(p *Parser) { + p.parseGoList = enabled + } +} + +// GenerateOpenAPI3Doc parses only those operations which match given extension +func GenerateOpenAPI3Doc(enable bool) func(*Parser) { + return func(p *Parser) { + p.openAPIVersion = enable + } +} + +// ParseAPI parses general api info for given searchDir and mainAPIFile. +func (parser *Parser) ParseAPI(searchDir string, mainAPIFile string, parseDepth int) error { + return parser.ParseAPIMultiSearchDir([]string{searchDir}, mainAPIFile, parseDepth) +} + +// skipPackageByPrefix returns true the given pkgpath does not match +// any user-defined package path prefixes. +func (parser *Parser) skipPackageByPrefix(pkgpath string) bool { + if len(parser.packagePrefix) == 0 { + return false + } + for _, prefix := range parser.packagePrefix { + if strings.HasPrefix(pkgpath, prefix) { + return false + } + } + return true +} + +// ParseAPIMultiSearchDir is like ParseAPI but for multiple search dirs. +func (parser *Parser) ParseAPIMultiSearchDir(searchDirs []string, mainAPIFile string, parseDepth int) error { + for _, searchDir := range searchDirs { + parser.debug.Printf("Generate general API Info, search dir:%s", searchDir) + + packageDir, err := getPkgName(searchDir) + if err != nil { + parser.debug.Printf("warning: failed to get package name in dir: %s, error: %s", searchDir, err.Error()) + } + + err = parser.getAllGoFileInfo(packageDir, searchDir) + if err != nil { + return err + } + } + + absMainAPIFilePath, err := filepath.Abs(filepath.Join(searchDirs[0], mainAPIFile)) + if err != nil { + return err + } + + // Use 'go list' command instead of depth.Resolve() + if parser.ParseDependency > 0 { + if parser.parseGoList { + pkgs, err := listPackages(context.Background(), filepath.Dir(absMainAPIFilePath), nil, "-deps") + if err != nil { + return fmt.Errorf("pkg %s cannot find all dependencies, %s", filepath.Dir(absMainAPIFilePath), err) + } + + length := len(pkgs) + for i := 0; i < length; i++ { + err := parser.getAllGoFileInfoFromDepsByList(pkgs[i], parser.ParseDependency) + if err != nil { + return err + } + } + } else { + var t depth.Tree + t.ResolveInternal = true + t.MaxDepth = parseDepth + + pkgName, err := getPkgName(filepath.Dir(absMainAPIFilePath)) + if err != nil { + return err + } + + err = t.Resolve(pkgName) + if err != nil { + return fmt.Errorf("pkg %s cannot find all dependencies, %s", pkgName, err) + } + for i := 0; i < len(t.Root.Deps); i++ { + err := parser.getAllGoFileInfoFromDeps(&t.Root.Deps[i], parser.ParseDependency) + if err != nil { + return err + } + } + } + } + + err = parser.ParseGeneralAPIInfo(absMainAPIFilePath) + if err != nil { + return err + } + + parser.parsedSchemas, err = parser.packages.ParseTypes() + if err != nil { + return err + } + + if parser.openAPIVersion { + err = parser.packages.RangeFiles(parser.ParseRouterAPIInfoV3) + if err != nil { + return err + } + } else { + err = parser.packages.RangeFiles(parser.ParseRouterAPIInfo) + if err != nil { + return err + } + } + + return parser.checkOperationIDUniqueness() +} + +func (parser *Parser) parseDeps(absMainAPIFilePath string, parseDepth int) error { + if parser.parseGoList { + pkgs, err := listPackages(context.Background(), filepath.Dir(absMainAPIFilePath), nil, "-deps") + if err != nil { + return fmt.Errorf("pkg %s cannot find all dependencies, %s", filepath.Dir(absMainAPIFilePath), err) + } + + length := len(pkgs) + for i := 0; i < length; i++ { + err := parser.getAllGoFileInfoFromDepsByList(pkgs[i], parser.ParseDependency) + if err != nil { + return err + } + } + } else { + var t depth.Tree + t.ResolveInternal = true + t.MaxDepth = parseDepth + + pkgName, err := getPkgName(absMainAPIFilePath) + if err != nil { + return errors.Wrap(err, "could not parse dependencies") + } + + if err := t.Resolve(pkgName); err != nil { + return errors.Wrap(fmt.Errorf("pkg %s cannot find all dependencies, %s", pkgName, err), "could not resolve dependencies") + } + + for i := 0; i < len(t.Root.Deps); i++ { + if err := parser.getAllGoFileInfoFromDeps(&t.Root.Deps[i], parser.ParseDependency); err != nil { + return errors.Wrap(err, "could not parse dependencies") + } + } + } + + return nil +} + +func getPkgName(searchDir string) (string, error) { + cmd := exec.Command("go", "list", "-f={{.ImportPath}}") + cmd.Dir = searchDir + + var stdout, stderr strings.Builder + + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + fmt.Println("get pkg name for directory:", searchDir) + + if err := cmd.Run(); err != nil { + return "", fmt.Errorf("execute go list command, %s, stdout:%s, stderr:%s", err, stdout.String(), stderr.String()) + } + + outStr, _ := stdout.String(), stderr.String() + + if outStr[0] == '_' { // will shown like _/{GOPATH}/src/{YOUR_PACKAGE} when NOT enable GO MODULE. + outStr = strings.TrimPrefix(outStr, "_"+build.Default.GOPATH+"/src/") + } + + f := strings.Split(outStr, "\n") + + outStr = f[0] + + return outStr, nil +} + +// ParseGeneralAPIInfo parses general api info for given mainAPIFile path. +func (parser *Parser) ParseGeneralAPIInfo(mainAPIFile string) error { + fileSet := token.NewFileSet() + filePath := mainAPIFile + + fileTree, err := goparser.ParseFile(fileSet, filePath, nil, goparser.ParseComments) + if err != nil { + return fmt.Errorf("cannot parse source files %s: %s", filePath, err) + } + + parser.swagger.Swagger = "2.0" + + for i := range fileTree.Comments { + comment := fileTree.Comments[i] + if !isGeneralAPIComment(comment.Text()) { + continue + } + + comments := strings.Split(comment.Text(), "\n") + + if parser.openAPIVersion { + err = parser.parseGeneralAPIInfoV3(comments) + if err != nil { + return err + } + + continue + } + + err = parseGeneralAPIInfo(parser, comments) + if err != nil { + return err + } + + } + + return nil +} + +func parseGeneralAPIInfo(parser *Parser, comments []string) error { + previousAttribute := "" + var tag *spec.Tag + // parsing classic meta data model + for line := 0; line < len(comments); line++ { + commentLine := comments[line] + commentLine = strings.TrimSpace(commentLine) + if len(commentLine) == 0 { + continue + } + fields := FieldsByAnySpace(commentLine, 2) + + attribute := fields[0] + var value string + if len(fields) > 1 { + value = fields[1] + } + + switch attr := strings.ToLower(attribute); attr { + case versionAttr, titleAttr, tosAttr, licNameAttr, licURLAttr, conNameAttr, conURLAttr, conEmailAttr: + setSwaggerInfo(parser.swagger, attr, value) + case descriptionAttr: + if previousAttribute == attribute { + parser.swagger.Info.Description += "\n" + value + + continue + } + + setSwaggerInfo(parser.swagger, attr, value) + case descriptionMarkdownAttr: + commentInfo, err := getMarkdownForTag("api", parser.markdownFileDir) + if err != nil { + return err + } + + setSwaggerInfo(parser.swagger, descriptionAttr, string(commentInfo)) + + case "@host": + parser.swagger.Host = value + case "@hoststate": + fields = FieldsByAnySpace(commentLine, 3) + if len(fields) != 3 { + return fmt.Errorf("%s needs 3 arguments", attribute) + } + if parser.HostState == fields[1] { + parser.swagger.Host = fields[2] + } + case "@basepath": + parser.swagger.BasePath = value + + case acceptAttr: + err := parser.ParseAcceptComment(value) + if err != nil { + return err + } + case produceAttr: + err := parser.ParseProduceComment(value) + if err != nil { + return err + } + case "@schemes": + parser.swagger.Schemes = strings.Split(value, " ") + case "@tag.name": + if parser.matchTag(value) { + parser.swagger.Tags = append(parser.swagger.Tags, spec.Tag{ + TagProps: spec.TagProps{ + Name: value, + }, + }) + tag = &parser.swagger.Tags[len(parser.swagger.Tags)-1] + } else { + tag = nil + } + case "@tag.description": + if tag != nil { + tag.TagProps.Description = value + } + case "@tag.description.markdown": + if tag != nil { + commentInfo, err := getMarkdownForTag(tag.TagProps.Name, parser.markdownFileDir) + if err != nil { + return err + } + + tag.TagProps.Description = string(commentInfo) + } + case "@tag.docs.url": + if tag != nil { + tag.TagProps.ExternalDocs = &spec.ExternalDocumentation{ + URL: value, + } + } + case "@tag.docs.description": + if tag != nil { + if tag.TagProps.ExternalDocs == nil { + return fmt.Errorf("%s needs to come after a @tags.docs.url", attribute) + } + + tag.TagProps.ExternalDocs.Description = value + } + case secBasicAttr, secAPIKeyAttr, secApplicationAttr, secImplicitAttr, secPasswordAttr, secAccessCodeAttr: + scheme, err := parseSecAttributes(attribute, comments, &line) + if err != nil { + return err + } + + parser.swagger.SecurityDefinitions[value] = scheme + + case securityAttr: + parser.swagger.Security = append(parser.swagger.Security, parseSecurity(value)) + + case "@query.collection.format": + parser.collectionFormatInQuery = TransToValidCollectionFormat(value) + + case extDocsDescAttr, extDocsURLAttr: + if parser.swagger.ExternalDocs == nil { + parser.swagger.ExternalDocs = new(spec.ExternalDocumentation) + } + switch attr { + case extDocsDescAttr: + parser.swagger.ExternalDocs.Description = value + case extDocsURLAttr: + parser.swagger.ExternalDocs.URL = value + } + + case "@x-taggroups": + originalAttribute := strings.Split(commentLine, " ")[0] + if len(value) == 0 { + return fmt.Errorf("annotation %s need a value", attribute) + } + + var valueJSON interface{} + if err := json.Unmarshal([]byte(value), &valueJSON); err != nil { + return fmt.Errorf("annotation %s need a valid json value. error: %s", originalAttribute, err.Error()) + } + + parser.swagger.Extensions[originalAttribute[1:]] = valueJSON // don't use the method provided by spec lib, cause it will call toLower() on attribute names, which is wrongy + default: + if strings.HasPrefix(attribute, "@x-") { + extensionName := attribute[1:] + + extExistsInSecurityDef := false + // for each security definition + for _, v := range parser.swagger.SecurityDefinitions { + // check if extension exists + _, extExistsInSecurityDef = v.VendorExtensible.Extensions.GetString(extensionName) + // if it exists in at least one, then we stop iterating + if extExistsInSecurityDef { + break + } + } + + // if it is present on security def, don't add it again + if extExistsInSecurityDef { + break + } + + if len(value) == 0 { + return fmt.Errorf("annotation %s need a value", attribute) + } + + var valueJSON interface{} + err := json.Unmarshal([]byte(value), &valueJSON) + if err != nil { + return fmt.Errorf("annotation %s need a valid json value. error: %s", attribute, err.Error()) + } + + if strings.Contains(extensionName, "logo") { + parser.swagger.Info.Extensions.Add(extensionName, valueJSON) + } else { + if parser.swagger.Extensions == nil { + parser.swagger.Extensions = make(map[string]interface{}) + } + + parser.swagger.Extensions[attribute[1:]] = valueJSON + } + } + } + + previousAttribute = attribute + } + + return nil +} + +func setSwaggerInfo(swagger *spec.Swagger, attribute, value string) { + switch attribute { + case versionAttr: + swagger.Info.Version = value + case titleAttr: + swagger.Info.Title = value + case tosAttr: + swagger.Info.TermsOfService = value + case descriptionAttr: + swagger.Info.Description = value + case conNameAttr: + swagger.Info.Contact.Name = value + case conEmailAttr: + swagger.Info.Contact.Email = value + case conURLAttr: + swagger.Info.Contact.URL = value + case licNameAttr: + swagger.Info.License = initIfEmpty(swagger.Info.License) + swagger.Info.License.Name = value + case licURLAttr: + swagger.Info.License = initIfEmpty(swagger.Info.License) + swagger.Info.License.URL = value + } +} + +func parseSecAttributes(context string, lines []string, index *int) (*spec.SecurityScheme, error) { + const ( + in = "@in" + name = "@name" + descriptionAttr = "@description" + tokenURL = "@tokenurl" + authorizationURL = "@authorizationurl" + ) + + var search []string + + attribute := strings.ToLower(FieldsByAnySpace(lines[*index], 2)[0]) + switch attribute { + case secBasicAttr: + return spec.BasicAuth(), nil + case secAPIKeyAttr: + search = []string{in, name} + case secApplicationAttr, secPasswordAttr: + search = []string{tokenURL} + case secImplicitAttr: + search = []string{authorizationURL} + case secAccessCodeAttr: + search = []string{tokenURL, authorizationURL} + } + + // For the first line we get the attributes in the context parameter, so we skip to the next one + *index++ + + attrMap, scopes := make(map[string]string), make(map[string]string) + extensions, description := make(map[string]interface{}), "" + +loopline: + for ; *index < len(lines); *index++ { + v := strings.TrimSpace(lines[*index]) + if len(v) == 0 { + continue + } + + fields := FieldsByAnySpace(v, 2) + securityAttr := strings.ToLower(fields[0]) + + var value string + if len(fields) > 1 { + value = fields[1] + } + + for _, findTerm := range search { + if securityAttr == findTerm { + attrMap[securityAttr] = value + continue loopline + } + } + + if isExists, err := isExistsScope(securityAttr); err != nil { + return nil, err + } else if isExists { + scopes[securityAttr[len(scopeAttrPrefix):]] = value + continue + } + + if strings.HasPrefix(securityAttr, "@x-") { + // Add the custom attribute without the @ + extensions[securityAttr[1:]] = value + continue + } + + // Not mandatory field + if securityAttr == descriptionAttr { + if description != "" { + description += "\n" + } + description += value + } + + // next securityDefinitions + if strings.Index(securityAttr, "@securitydefinitions.") == 0 { + // Go back to the previous line and break + *index-- + + break + } + } + + if len(attrMap) != len(search) { + return nil, fmt.Errorf("%s is %v required", context, search) + } + + var scheme *spec.SecurityScheme + + switch attribute { + case secAPIKeyAttr: + scheme = spec.APIKeyAuth(attrMap[name], attrMap[in]) + case secApplicationAttr: + scheme = spec.OAuth2Application(attrMap[tokenURL]) + case secImplicitAttr: + scheme = spec.OAuth2Implicit(attrMap[authorizationURL]) + case secPasswordAttr: + scheme = spec.OAuth2Password(attrMap[tokenURL]) + case secAccessCodeAttr: + scheme = spec.OAuth2AccessToken(attrMap[authorizationURL], attrMap[tokenURL]) + } + + scheme.Description = description + + for extKey, extValue := range extensions { + scheme.AddExtension(extKey, extValue) + } + + for scope, scopeDescription := range scopes { + scheme.AddScope(scope, scopeDescription) + } + + return scheme, nil +} + +func parseSecurity(commentLine string) map[string][]string { + securityMap := make(map[string][]string) + + for _, securityOption := range strings.Split(commentLine, "||") { + securityOption = strings.TrimSpace(securityOption) + + left, right := strings.Index(securityOption, "["), strings.Index(securityOption, "]") + + if !(left == -1 && right == -1) { + scopes := securityOption[left+1 : right] + + var options []string + + for _, scope := range strings.Split(scopes, ",") { + options = append(options, strings.TrimSpace(scope)) + } + + securityKey := securityOption[0:left] + securityMap[securityKey] = append(securityMap[securityKey], options...) + } else { + securityKey := strings.TrimSpace(securityOption) + securityMap[securityKey] = []string{} + } + } + + return securityMap +} + +func initIfEmpty(license *spec.License) *spec.License { + if license == nil { + return new(spec.License) + } + + return license +} + +// ParseAcceptComment parses comment for given `accept` comment string. +func (parser *Parser) ParseAcceptComment(commentLine string) error { + return parseMimeTypeList(commentLine, &parser.swagger.Consumes, "%v accept type can't be accepted") +} + +// ParseProduceComment parses comment for given `produce` comment string. +func (parser *Parser) ParseProduceComment(commentLine string) error { + return parseMimeTypeList(commentLine, &parser.swagger.Produces, "%v produce type can't be accepted") +} + +func isGeneralAPIComment(comment string) bool { + // for _, commentLine := range comments { + commentLine := strings.TrimSpace(comment) + if len(commentLine) == 0 { + return false + } + + attribute := strings.ToLower(FieldsByAnySpace(commentLine, 2)[0]) + switch attribute { + // The @summary, @router, @success, @failure annotation belongs to Operation + case summaryAttr, routerAttr, successAttr, failureAttr, responseAttr: + return false + } + // } + + return true +} + +func getMarkdownForTag(tagName string, dirPath string) ([]byte, error) { + if tagName == "" { + // this happens when parsing the @description.markdown attribute + // it will be called properly another time with tagName="api" + // so we can safely return an empty byte slice here + return make([]byte, 0), nil + } + + dirEntries, err := os.ReadDir(dirPath) + if err != nil { + return nil, err + } + + for _, entry := range dirEntries { + if entry.IsDir() { + continue + } + + fileName := entry.Name() + + expectedFileName := tagName + if !strings.HasSuffix(tagName, ".md") { + expectedFileName = tagName + ".md" + } + + if fileName == expectedFileName { + fullPath := filepath.Join(dirPath, fileName) + + commentInfo, err := os.ReadFile(fullPath) + if err != nil { + return nil, fmt.Errorf("Failed to read markdown file %s error: %s ", fullPath, err) + } + + return commentInfo, nil + } + } + + return nil, fmt.Errorf("Unable to find markdown file for tag %s in the given directory", tagName) +} + +func isExistsScope(scope string) (bool, error) { + s := strings.Fields(scope) + for _, v := range s { + if strings.HasPrefix(v, scopeAttrPrefix) { + if strings.Contains(v, ",") { + return false, fmt.Errorf("@scope can't use comma(,) get=" + v) + } + } + } + + return strings.HasPrefix(scope, scopeAttrPrefix), nil +} + +func getTagsFromComment(comment string) (tags []string) { + commentLine := strings.TrimSpace(strings.TrimLeft(comment, "/")) + if len(commentLine) == 0 { + return nil + } + + attribute := strings.Fields(commentLine)[0] + lineRemainder, lowerAttribute := strings.TrimSpace(commentLine[len(attribute):]), strings.ToLower(attribute) + + if lowerAttribute == tagsAttr { + for _, tag := range strings.Split(lineRemainder, ",") { + tags = append(tags, strings.TrimSpace(tag)) + } + } + return + +} + +func (parser *Parser) matchTag(tag string) bool { + if len(parser.tags) == 0 { + return true + } + + if _, has := parser.tags["!"+tag]; has { + return false + } + if _, has := parser.tags[tag]; has { + return true + } + + // If all tags are negation then we should return true + for key := range parser.tags { + if key[0] != '!' { + return false + } + } + return true +} + +func (parser *Parser) matchTags(comments []*ast.Comment) (match bool) { + if len(parser.tags) == 0 { + return true + } + + match = false + for _, comment := range comments { + for _, tag := range getTagsFromComment(comment.Text) { + if _, has := parser.tags["!"+tag]; has { + return false + } + if _, has := parser.tags[tag]; has { + match = true // keep iterating as it may contain a tag that is excluded + } + } + } + + if !match { + // If all tags are negation then we should return true + for key := range parser.tags { + if key[0] != '!' { + return false + } + } + } + + return true +} + +func matchExtension(extensionToMatch string, comments []*ast.Comment) (match bool) { + if len(extensionToMatch) == 0 { + return true + } + + for _, comment := range comments { + commentLine := strings.TrimSpace(strings.TrimLeft(comment.Text, "/")) + fields := FieldsByAnySpace(commentLine, 2) + if len(fields) > 0 { + lowerAttribute := strings.ToLower(fields[0]) + + if lowerAttribute == fmt.Sprintf("@x-%s", strings.ToLower(extensionToMatch)) { + return true + } + } + } + + return false +} + +// ParseRouterAPIInfo parses router api info for given astFile. +func (parser *Parser) ParseRouterAPIInfo(fileInfo *AstFileInfo) error { + if (fileInfo.ParseFlag & ParseOperations) == ParseNone { + return nil + } + + // parse File.Comments instead of File.Decls.Doc if ParseFuncBody flag set to "true" + if parser.ParseFuncBody { + for _, astComments := range fileInfo.File.Comments { + if astComments.List != nil { + if err := parser.parseRouterAPIInfoComment(astComments.List, fileInfo); err != nil { + return err + } + } + } + + return nil + } + + for _, astDescription := range fileInfo.File.Decls { + astDeclaration, ok := astDescription.(*ast.FuncDecl) + if ok && astDeclaration.Doc != nil && astDeclaration.Doc.List != nil { + if err := parser.parseRouterAPIInfoComment(astDeclaration.Doc.List, fileInfo); err != nil { + return err + } + } + } + + return nil +} + +func (parser *Parser) parseRouterAPIInfoComment(comments []*ast.Comment, fileInfo *AstFileInfo) error { + if parser.matchTags(comments) && matchExtension(parser.parseExtension, comments) { + // for per 'function' comment, create a new 'Operation' object + operation := NewOperation(parser, SetCodeExampleFilesDirectory(parser.codeExampleFilesDir)) + for _, comment := range comments { + err := operation.ParseComment(comment.Text, fileInfo.File) + if err != nil { + return fmt.Errorf("ParseComment error in file %s :%+v", fileInfo.Path, err) + } + if operation.State != "" && operation.State != parser.HostState { + return nil + } + } + err := processRouterOperation(parser, operation) + if err != nil { + return err + } + } + + return nil +} + +func refRouteMethodOp(item *spec.PathItem, method string) (op **spec.Operation) { + switch method { + case http.MethodGet: + op = &item.Get + case http.MethodPost: + op = &item.Post + case http.MethodDelete: + op = &item.Delete + case http.MethodPut: + op = &item.Put + case http.MethodPatch: + op = &item.Patch + case http.MethodHead: + op = &item.Head + case http.MethodOptions: + op = &item.Options + } + + return +} + +func processRouterOperation(parser *Parser, operation *Operation) error { + for _, routeProperties := range operation.RouterProperties { + var ( + pathItem spec.PathItem + ok bool + ) + + pathItem, ok = parser.swagger.Paths.Paths[routeProperties.Path] + if !ok { + pathItem = spec.PathItem{} + } + + op := refRouteMethodOp(&pathItem, routeProperties.HTTPMethod) + + // check if we already have an operation for this path and method + if *op != nil { + err := fmt.Errorf("route %s %s is declared multiple times", routeProperties.HTTPMethod, routeProperties.Path) + if parser.Strict { + return err + } + + parser.debug.Printf("warning: %s\n", err) + } + + if len(operation.RouterProperties) > 1 { + newOp := *operation + var validParams []spec.Parameter + for _, param := range newOp.Operation.OperationProps.Parameters { + if param.In == "path" && !strings.Contains(routeProperties.Path, param.Name) { + // This path param is not actually contained in the path, skip adding it to the final params + continue + } + validParams = append(validParams, param) + } + newOp.Operation.OperationProps.Parameters = validParams + *op = &newOp.Operation + } else { + *op = &operation.Operation + } + + if routeProperties.Deprecated { + (*op).Deprecated = routeProperties.Deprecated + } + + parser.swagger.Paths.Paths[routeProperties.Path] = pathItem + } + + return nil +} + +func convertFromSpecificToPrimitive(typeName string) (string, error) { + name := typeName + if strings.ContainsRune(name, '.') { + name = strings.Split(name, ".")[1] + } + + switch strings.ToUpper(name) { + case "TIME", "OBJECTID", "UUID": + return STRING, nil + case "DECIMAL": + return NUMBER, nil + } + + return typeName, ErrFailedConvertPrimitiveType +} + +func (parser *Parser) getTypeSchema(typeName string, file *ast.File, ref bool) (*spec.Schema, error) { + if override, ok := parser.Overrides[typeName]; ok { + parser.debug.Printf("Override detected for %s: using %s instead", typeName, override) + return parseObjectSchema(parser, override, file) + } + + if IsInterfaceLike(typeName) { + return &spec.Schema{}, nil + } + if IsGolangPrimitiveType(typeName) { + return PrimitiveSchema(TransToValidSchemeType(typeName)), nil + } + + schemaType, err := convertFromSpecificToPrimitive(typeName) + if err == nil { + return PrimitiveSchema(schemaType), nil + } + + typeSpecDef := parser.packages.FindTypeSpec(typeName, file) + if typeSpecDef == nil { + parser.packages.FindTypeSpec(typeName, file) // uncomment for debugging + return nil, fmt.Errorf("cannot find type definition: %s", typeName) + } + + if override, ok := parser.Overrides[typeSpecDef.FullPath()]; ok { + if override == "" { + parser.debug.Printf("Override detected for %s: ignoring", typeSpecDef.FullPath()) + + return nil, ErrSkippedField + } + + parser.debug.Printf("Override detected for %s: using %s instead", typeSpecDef.FullPath(), override) + + separator := strings.LastIndex(override, ".") + if separator == -1 { + // treat as a swaggertype tag + parts := strings.Split(override, ",") + + return BuildCustomSchema(parts) + } + + typeSpecDef = parser.packages.findTypeSpec(override[0:separator], override[separator+1:]) + } + + schema, ok := parser.parsedSchemas[typeSpecDef] + if !ok { + var err error + + schema, err = parser.ParseDefinition(typeSpecDef) + if err != nil { + if err == ErrRecursiveParseStruct && ref { + return parser.getRefTypeSchema(typeSpecDef, schema), nil + } + return nil, fmt.Errorf("%s: %w", typeName, err) + } + } + + if ref { + if IsComplexSchema(schema.Schema) { + return parser.getRefTypeSchema(typeSpecDef, schema), nil + } + // if it is a simple schema, just return a copy + newSchema := *schema.Schema + return &newSchema, nil + } + + return schema.Schema, nil +} + +func (parser *Parser) getRefTypeSchema(typeSpecDef *TypeSpecDef, schema *Schema) *spec.Schema { + _, ok := parser.outputSchemas[typeSpecDef] + if !ok { + parser.swagger.Definitions[schema.Name] = spec.Schema{} + + if schema.Schema != nil { + parser.swagger.Definitions[schema.Name] = *schema.Schema + } + + parser.outputSchemas[typeSpecDef] = schema + } + + refSchema := RefSchema(schema.Name) + + return refSchema +} + +func (parser *Parser) isInStructStack(typeSpecDef *TypeSpecDef) bool { + for _, specDef := range parser.structStack { + if typeSpecDef == specDef { + return true + } + } + + return false +} + +// ParseDefinition parses given type spec that corresponds to the type under +// given name and package, and populates swagger schema definitions registry +// with a schema for the given type +func (parser *Parser) ParseDefinition(typeSpecDef *TypeSpecDef) (*Schema, error) { + typeName := typeSpecDef.TypeName() + schema, found := parser.parsedSchemas[typeSpecDef] + if found { + parser.debug.Printf("Skipping '%s', already parsed.", typeName) + + return schema, nil + } + + if parser.isInStructStack(typeSpecDef) { + parser.debug.Printf("Skipping '%s', recursion detected.", typeName) + + return &Schema{ + Name: typeName, + PkgPath: typeSpecDef.PkgPath, + Schema: PrimitiveSchema(OBJECT), + }, + ErrRecursiveParseStruct + } + + parser.structStack = append(parser.structStack, typeSpecDef) + + parser.debug.Printf("Generating %s", typeName) + + definition, err := parser.parseTypeExpr(typeSpecDef.File, typeSpecDef.TypeSpec.Type, false) + if err != nil { + parser.debug.Printf("Error parsing type definition '%s': %s", typeName, err) + return nil, err + } + + if definition.Description == "" { + err = parser.fillDefinitionDescription(definition, typeSpecDef.File, typeSpecDef) + if err != nil { + return nil, err + } + } + + if len(typeSpecDef.Enums) > 0 { + var varnames []string + var enumComments = make(map[string]string) + for _, value := range typeSpecDef.Enums { + definition.Enum = append(definition.Enum, value.Value) + varnames = append(varnames, value.key) + if len(value.Comment) > 0 { + enumComments[value.key] = value.Comment + } + } + if definition.Extensions == nil { + definition.Extensions = make(spec.Extensions) + } + definition.Extensions[enumVarNamesExtension] = varnames + if len(enumComments) > 0 { + definition.Extensions[enumCommentsExtension] = enumComments + } + } + + schemaName := typeName + + if typeSpecDef.SchemaName != "" { + schemaName = typeSpecDef.SchemaName + } + + sch := Schema{ + Name: schemaName, + PkgPath: typeSpecDef.PkgPath, + Schema: definition, + } + parser.parsedSchemas[typeSpecDef] = &sch + + // update an empty schema as a result of recursion + s2, found := parser.outputSchemas[typeSpecDef] + if found { + parser.swagger.Definitions[s2.Name] = *definition + } + + return &sch, nil +} + +func fullTypeName(parts ...string) string { + return strings.Join(parts, ".") +} + +// fillDefinitionDescription additionally fills fields in definition (spec.Schema) +// TODO: If .go file contains many types, it may work for a long time +func (parser *Parser) fillDefinitionDescription(definition *spec.Schema, file *ast.File, typeSpecDef *TypeSpecDef) (err error) { + if file == nil { + return + } + for _, astDeclaration := range file.Decls { + generalDeclaration, ok := astDeclaration.(*ast.GenDecl) + if !ok || generalDeclaration.Tok != token.TYPE { + continue + } + + for _, astSpec := range generalDeclaration.Specs { + typeSpec, ok := astSpec.(*ast.TypeSpec) + if !ok || typeSpec != typeSpecDef.TypeSpec { + continue + } + var typeName string + if typeSpec.Name != nil { + typeName = typeSpec.Name.Name + } + definition.Description, err = + parser.extractDeclarationDescription(typeName, typeSpec.Doc, typeSpec.Comment, generalDeclaration.Doc) + if err != nil { + return + } + } + } + return nil +} + +// extractDeclarationDescription gets first description +// from attribute descriptionAttr in commentGroups (ast.CommentGroup) +func (parser *Parser) extractDeclarationDescription(typeName string, commentGroups ...*ast.CommentGroup) (string, error) { + var description string + + for _, commentGroup := range commentGroups { + if commentGroup == nil { + continue + } + + isHandlingDescription := false + + for _, comment := range commentGroup.List { + commentText := strings.TrimSpace(strings.TrimLeft(comment.Text, "/")) + if len(commentText) == 0 { + continue + } + fields := FieldsByAnySpace(commentText, 2) + attribute := fields[0] + + if attr := strings.ToLower(attribute); attr == descriptionMarkdownAttr { + if len(fields) > 1 { + typeName = fields[1] + } + if typeName == "" { + continue + } + desc, err := getMarkdownForTag(typeName, parser.markdownFileDir) + if err != nil { + return "", err + } + // if found markdown description, we will only use the markdown file content + return string(desc), nil + } else if attr != descriptionAttr { + if !isHandlingDescription { + continue + } + + break + } + + isHandlingDescription = true + description += " " + strings.TrimSpace(commentText[len(attribute):]) + } + } + + return strings.TrimLeft(description, " "), nil +} + +// parseTypeExpr parses given type expression that corresponds to the type under +// given name and package, and returns swagger schema for it. +func (parser *Parser) parseTypeExpr(file *ast.File, typeExpr ast.Expr, ref bool) (*spec.Schema, error) { + switch expr := typeExpr.(type) { + // type Foo interface{} + case *ast.InterfaceType: + return &spec.Schema{}, nil + + // type Foo struct {...} + case *ast.StructType: + return parser.parseStruct(file, expr.Fields) + + // type Foo Baz + case *ast.Ident: + return parser.getTypeSchema(expr.Name, file, ref) + + // type Foo *Baz + case *ast.StarExpr: + return parser.parseTypeExpr(file, expr.X, ref) + + // type Foo pkg.Bar + case *ast.SelectorExpr: + if xIdent, ok := expr.X.(*ast.Ident); ok { + return parser.getTypeSchema(fullTypeName(xIdent.Name, expr.Sel.Name), file, ref) + } + // type Foo []Baz + case *ast.ArrayType: + itemSchema, err := parser.parseTypeExpr(file, expr.Elt, true) + if err != nil { + return nil, err + } + + return spec.ArrayProperty(itemSchema), nil + // type Foo map[string]Bar + case *ast.MapType: + if _, ok := expr.Value.(*ast.InterfaceType); ok { + return spec.MapProperty(nil), nil + } + schema, err := parser.parseTypeExpr(file, expr.Value, true) + if err != nil { + return nil, err + } + + return spec.MapProperty(schema), nil + + case *ast.FuncType: + return nil, ErrFuncTypeField + // ... + } + + return parser.parseGenericTypeExpr(file, typeExpr) +} + +func (parser *Parser) parseStruct(file *ast.File, fields *ast.FieldList) (*spec.Schema, error) { + required, properties := make([]string, 0), make(map[string]spec.Schema) + + for _, field := range fields.List { + fieldProps, requiredFromAnon, err := parser.parseStructField(file, field) + if err != nil { + if errors.Is(err, ErrFuncTypeField) || errors.Is(err, ErrSkippedField) { + continue + } + + return nil, err + } + + if len(fieldProps) == 0 { + continue + } + + required = append(required, requiredFromAnon...) + + for k, v := range fieldProps { + properties[k] = v + } + } + + sort.Strings(required) + + return &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{OBJECT}, + Properties: properties, + Required: required, + }, + }, nil +} + +func (parser *Parser) parseStructField(file *ast.File, field *ast.Field) (map[string]spec.Schema, []string, error) { + if field.Tag != nil { + skip, ok := reflect.StructTag(strings.ReplaceAll(field.Tag.Value, "`", "")).Lookup("swaggerignore") + if ok && strings.EqualFold(skip, "true") { + return nil, nil, nil + } + } + + ps := parser.fieldParserFactory(parser, field) + + if ps.ShouldSkip() { + return nil, nil, nil + } + + fieldNames, err := ps.FieldNames() + if err != nil { + return nil, nil, err + } + + if len(fieldNames) == 0 { + typeName, err := getFieldType(file, field.Type, nil) + if err != nil { + return nil, nil, err + } + + schema, err := parser.getTypeSchema(typeName, file, false) + if err != nil { + return nil, nil, err + } + + if len(schema.Type) > 0 && schema.Type[0] == OBJECT { + if len(schema.Properties) == 0 { + return nil, nil, nil + } + + properties := map[string]spec.Schema{} + for k, v := range schema.Properties { + properties[k] = v + } + + return properties, schema.SchemaProps.Required, nil + } + // for alias type of non-struct types ,such as array,map, etc. ignore field tag. + return map[string]spec.Schema{typeName: *schema}, nil, nil + + } + + schema, err := ps.CustomSchema() + if err != nil { + return nil, nil, fmt.Errorf("%v: %w", fieldNames, err) + } + + if schema == nil { + typeName, err := getFieldType(file, field.Type, nil) + if err == nil { + // named type + schema, err = parser.getTypeSchema(typeName, file, true) + } else { + // unnamed type + schema, err = parser.parseTypeExpr(file, field.Type, false) + } + + if err != nil { + return nil, nil, fmt.Errorf("%v: %w", fieldNames, err) + } + } + + err = ps.ComplementSchema(schema) + if err != nil { + return nil, nil, fmt.Errorf("%v: %w", fieldNames, err) + } + + var tagRequired []string + + required, err := ps.IsRequired() + if err != nil { + return nil, nil, fmt.Errorf("%v: %w", fieldNames, err) + } + + if required { + tagRequired = append(tagRequired, fieldNames...) + } + + if schema.Extensions == nil { + schema.Extensions = make(spec.Extensions) + } + if formName := ps.FormName(); len(formName) > 0 { + schema.Extensions["formData"] = formName + } + if headerName := ps.HeaderName(); len(headerName) > 0 { + schema.Extensions["header"] = headerName + } + if pathName := ps.PathName(); len(pathName) > 0 { + schema.Extensions["path"] = pathName + } + fields := make(map[string]spec.Schema) + for _, name := range fieldNames { + fields[name] = *schema + } + return fields, tagRequired, nil +} + +func getFieldType(file *ast.File, field ast.Expr, genericParamTypeDefs map[string]*genericTypeSpec) (string, error) { + switch fieldType := field.(type) { + case *ast.Ident: + return fieldType.Name, nil + case *ast.SelectorExpr: + packageName, err := getFieldType(file, fieldType.X, genericParamTypeDefs) + if err != nil { + return "", err + } + + return fullTypeName(packageName, fieldType.Sel.Name), nil + case *ast.StarExpr: + fullName, err := getFieldType(file, fieldType.X, genericParamTypeDefs) + if err != nil { + return "", err + } + + return fullName, nil + default: + return getGenericFieldType(file, field, genericParamTypeDefs) + } +} + +func (parser *Parser) getUnderlyingSchema(schema *spec.Schema) *spec.Schema { + if schema == nil { + return nil + } + + if url := schema.Ref.GetURL(); url != nil { + if pos := strings.LastIndexByte(url.Fragment, '/'); pos >= 0 { + name := url.Fragment[pos+1:] + if schema, ok := parser.swagger.Definitions[name]; ok { + return &schema + } + } + } + + if len(schema.AllOf) > 0 { + merged := &spec.Schema{} + MergeSchema(merged, schema) + for _, s := range schema.AllOf { + MergeSchema(merged, parser.getUnderlyingSchema(&s)) + } + return merged + } + return nil +} + +// GetSchemaTypePath get path of schema type. +func (parser *Parser) GetSchemaTypePath(schema *spec.Schema, depth int) []string { + if schema == nil || depth == 0 { + return nil + } + + if underlying := parser.getUnderlyingSchema(schema); underlying != nil { + return parser.GetSchemaTypePath(underlying, depth) + } + + if len(schema.Type) > 0 { + switch schema.Type[0] { + case ARRAY: + depth-- + + s := []string{schema.Type[0]} + + return append(s, parser.GetSchemaTypePath(schema.Items.Schema, depth)...) + case OBJECT: + if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil { + // for map + depth-- + + s := []string{schema.Type[0]} + + return append(s, parser.GetSchemaTypePath(schema.AdditionalProperties.Schema, depth)...) + } + } + + return []string{schema.Type[0]} + } + + return []string{ANY} +} + +// defineTypeOfExample example value define the type (object and array unsupported). +func defineTypeOfExample(schemaType, arrayType, exampleValue string) (interface{}, error) { + switch schemaType { + case STRING: + return exampleValue, nil + case NUMBER: + v, err := strconv.ParseFloat(exampleValue, 64) + if err != nil { + return nil, fmt.Errorf("example value %s can't convert to %s err: %s", exampleValue, schemaType, err) + } + + return v, nil + case INTEGER: + v, err := strconv.Atoi(exampleValue) + if err != nil { + return nil, fmt.Errorf("example value %s can't convert to %s err: %s", exampleValue, schemaType, err) + } + + return v, nil + case BOOLEAN: + v, err := strconv.ParseBool(exampleValue) + if err != nil { + return nil, fmt.Errorf("example value %s can't convert to %s err: %s", exampleValue, schemaType, err) + } + + return v, nil + case ARRAY: + values := strings.Split(exampleValue, ",") + result := make([]interface{}, 0) + for _, value := range values { + v, err := defineTypeOfExample(arrayType, "", value) + if err != nil { + return nil, err + } + + result = append(result, v) + } + + return result, nil + case OBJECT: + if arrayType == "" { + return nil, fmt.Errorf("%s is unsupported type in example value `%s`", schemaType, exampleValue) + } + + values := strings.Split(exampleValue, ",") + + result := map[string]interface{}{} + + for _, value := range values { + mapData := strings.SplitN(value, ":", 2) + + if len(mapData) == 2 { + v, err := defineTypeOfExample(arrayType, "", mapData[1]) + if err != nil { + return nil, err + } + + result[mapData[0]] = v + + continue + } + + return nil, fmt.Errorf("example value %s should format: key:value", exampleValue) + } + + return result, nil + case ANY: + return exampleValue, nil + } + + return nil, fmt.Errorf("%s is unsupported type in example value %s", schemaType, exampleValue) +} + +// GetAllGoFileInfo gets all Go source files information for given searchDir. +func (parser *Parser) getAllGoFileInfo(packageDir, searchDir string) error { + if parser.skipPackageByPrefix(packageDir) { + return nil // ignored by user-defined package path prefixes + } + return filepath.Walk(searchDir, func(path string, f os.FileInfo, _ error) error { + err := parser.Skip(path, f) + if err != nil { + return err + } + + if f.IsDir() { + return nil + } + + relPath, err := filepath.Rel(searchDir, path) + if err != nil { + return err + } + + return parser.parseFile(filepath.ToSlash(filepath.Dir(filepath.Clean(filepath.Join(packageDir, relPath)))), path, nil, ParseAll) + }) +} + +func (parser *Parser) getAllGoFileInfoFromDeps(pkg *depth.Pkg, parseFlag ParseFlag) error { + ignoreInternal := pkg.Internal && !parser.ParseInternal + if ignoreInternal || !pkg.Resolved { // ignored internal and not resolved dependencies + return nil + } + + if pkg.Raw != nil && parser.skipPackageByPrefix(pkg.Raw.ImportPath) { + return nil // ignored by user-defined package path prefixes + } + + // Skip cgo + if pkg.Raw == nil && pkg.Name == "C" { + return nil + } + + srcDir := pkg.Raw.Dir + + files, err := os.ReadDir(srcDir) // only parsing files in the dir(don't contain sub dir files) + if err != nil { + return err + } + + for _, f := range files { + if f.IsDir() { + continue + } + + path := filepath.Join(srcDir, f.Name()) + if err := parser.parseFile(pkg.Name, path, nil, parseFlag); err != nil { + return err + } + } + + for i := 0; i < len(pkg.Deps); i++ { + if err := parser.getAllGoFileInfoFromDeps(&pkg.Deps[i], parseFlag); err != nil { + return err + } + } + + return nil +} + +func (parser *Parser) parseFile(packageDir, path string, src interface{}, flag ParseFlag) error { + if strings.HasSuffix(strings.ToLower(path), "_test.go") || filepath.Ext(path) != ".go" { + return nil + } + + return parser.packages.ParseFile(packageDir, path, src, flag) +} + +func (parser *Parser) checkOperationIDUniqueness() error { + // operationsIds contains all operationId annotations to check it's unique + operationsIds := make(map[string]string) + + for path, item := range parser.swagger.Paths.Paths { + var method, id string + + for method = range allMethod { + op := refRouteMethodOp(&item, method) + if *op != nil { + id = (**op).ID + + break + } + } + + if id == "" { + continue + } + + current := fmt.Sprintf("%s %s", method, path) + + previous, ok := operationsIds[id] + if ok { + return fmt.Errorf( + "duplicated @id annotation '%s' found in '%s', previously declared in: '%s'", + id, current, previous) + } + + operationsIds[id] = current + } + + return nil +} + +// Skip returns filepath.SkipDir error if match vendor and hidden folder. +func (parser *Parser) Skip(path string, f os.FileInfo) error { + return walkWith(parser.excludes, parser.ParseVendor)(path, f) +} + +func walkWith(excludes map[string]struct{}, parseVendor bool) func(path string, fileInfo os.FileInfo) error { + return func(path string, f os.FileInfo) error { + if f.IsDir() { + if !parseVendor && f.Name() == "vendor" || // ignore "vendor" + f.Name() == "docs" || // exclude docs + len(f.Name()) > 1 && f.Name()[0] == '.' && f.Name() != ".." { // exclude all hidden folder + return filepath.SkipDir + } + + if excludes != nil { + if _, ok := excludes[path]; ok { + return filepath.SkipDir + } + } + } + + return nil + } +} + +// GetSwagger returns *spec.Swagger which is the root document object for the API specification. +func (parser *Parser) GetSwagger() *spec.Swagger { + return parser.swagger +} + +// addTestType just for tests. +func (parser *Parser) addTestType(typename string) { + typeDef := &TypeSpecDef{} + parser.packages.uniqueDefinitions[typename] = typeDef + parser.parsedSchemas[typeDef] = &Schema{ + PkgPath: "", + Name: typename, + Schema: PrimitiveSchema(OBJECT), + } + + parser.parsedSchemasV3[typeDef] = &SchemaV3{ + PkgPath: "", + Name: typename, + Schema: PrimitiveSchemaV3(OBJECT).Spec, + } +} diff --git a/vendor/github.com/swaggo/swag/v2/parserv3.go b/vendor/github.com/swaggo/swag/v2/parserv3.go new file mode 100644 index 00000000..5027d1e4 --- /dev/null +++ b/vendor/github.com/swaggo/swag/v2/parserv3.go @@ -0,0 +1,1087 @@ +package swag + +import ( + "encoding/json" + "fmt" + "go/ast" + "go/token" + "net/http" + "reflect" + "regexp" + "sort" + "strings" + + "github.com/pkg/errors" + "github.com/sv-tools/openapi/spec" +) + +// FieldParserFactoryV3 create FieldParser. +type FieldParserFactoryV3 func(ps *Parser, file *ast.File, field *ast.Field) FieldParserV3 + +// FieldParserV3 parse struct field. +type FieldParserV3 interface { + ShouldSkip() bool + FieldName() (string, error) + FormName() string + CustomSchema() (*spec.RefOrSpec[spec.Schema], error) + ComplementSchema(schema *spec.RefOrSpec[spec.Schema]) error + IsRequired() (bool, error) +} + +// GetOpenAPI returns *spec.OpenAPI which is the root document object for the API specification. +func (p *Parser) GetOpenAPI() *spec.OpenAPI { + return p.openAPI +} + +var ( + serversURLPattern = regexp.MustCompile(`\{([^}]+)\}`) + serversVariablesPattern = regexp.MustCompile(`^(\w+)\s+(.+)$`) +) + +func (p *Parser) parseGeneralAPIInfoV3(comments []string) error { + previousAttribute := "" + + // parsing classic meta data model + for line := 0; line < len(comments); line++ { + commentLine := comments[line] + commentLine = strings.TrimSpace(commentLine) + if len(commentLine) == 0 { + continue + } + fields := FieldsByAnySpace(commentLine, 2) + + attribute := fields[0] + var value string + if len(fields) > 1 { + value = fields[1] + } + + switch attr := strings.ToLower(attribute); attr { + case versionAttr, titleAttr, tosAttr, licNameAttr, licURLAttr, conNameAttr, conURLAttr, conEmailAttr: + setspecInfo(p.openAPI, attr, value) + case descriptionAttr: + if previousAttribute == attribute { + p.openAPI.Info.Spec.Description += "\n" + value + + continue + } + + setspecInfo(p.openAPI, attr, value) + case descriptionMarkdownAttr: + commentInfo, err := getMarkdownForTag("api", p.markdownFileDir) + if err != nil { + return err + } + + setspecInfo(p.openAPI, attr, string(commentInfo)) + case "@host": + if len(p.openAPI.Servers) == 0 { + server := spec.NewServer() + server.Spec.URL = value + p.openAPI.Servers = append(p.openAPI.Servers, server) + } + + println("@host is deprecated use servers instead") + case "@basepath": + if len(p.openAPI.Servers) == 0 { + server := spec.NewServer() + p.openAPI.Servers = append(p.openAPI.Servers, server) + } + p.openAPI.Servers[0].Spec.URL += value + + println("@basepath is deprecated use servers instead") + + case acceptAttr: + println("acceptAttribute is deprecated, as there is no such field on top level in spec V3.1") + case produceAttr: + println("produce is deprecated, as there is no such field on top level in spec V3.1") + case "@schemes": + println("@schemes is deprecated use servers instead") + case "@tag.name": + tag := &spec.Extendable[spec.Tag]{ + Spec: &spec.Tag{ + Name: value, + }, + } + + p.openAPI.Tags = append(p.openAPI.Tags, tag) + case "@tag.description": + tag := p.openAPI.Tags[len(p.openAPI.Tags)-1] + tag.Spec.Description = value + case "@tag.description.markdown": + tag := p.openAPI.Tags[len(p.openAPI.Tags)-1] + + commentInfo, err := getMarkdownForTag(tag.Spec.Name, p.markdownFileDir) + if err != nil { + return err + } + + tag.Spec.Description = string(commentInfo) + case "@tag.docs.url": + tag := p.openAPI.Tags[len(p.openAPI.Tags)-1] + tag.Spec.ExternalDocs = spec.NewExternalDocs() + tag.Spec.ExternalDocs.Spec.URL = value + case "@tag.docs.description": + tag := p.openAPI.Tags[len(p.openAPI.Tags)-1] + if tag.Spec.ExternalDocs == nil { + return fmt.Errorf("%s needs to come after a @tags.docs.url", attribute) + } + + tag.Spec.ExternalDocs.Spec.Description = value + case secBasicAttr, secAPIKeyAttr, secApplicationAttr, secImplicitAttr, secPasswordAttr, secAccessCodeAttr, secBearerAuthAttr: + key, scheme, err := parseSecAttributesV3(attribute, comments, &line) + if err != nil { + return err + } + + schemeSpec := spec.NewSecuritySchemeSpec() + schemeSpec.Spec.Spec = scheme + + if p.openAPI.Components.Spec.SecuritySchemes == nil { + p.openAPI.Components.Spec.SecuritySchemes = make(map[string]*spec.RefOrSpec[spec.Extendable[spec.SecurityScheme]]) + } + + p.openAPI.Components.Spec.SecuritySchemes[key] = schemeSpec + + case "@query.collection.format": + p.collectionFormatInQuery = TransToValidCollectionFormat(value) + + case extDocsDescAttr, extDocsURLAttr: + if p.openAPI.ExternalDocs == nil { + p.openAPI.ExternalDocs = spec.NewExternalDocs() + } + + switch attr { + case extDocsDescAttr: + p.openAPI.ExternalDocs.Spec.Description = value + case extDocsURLAttr: + p.openAPI.ExternalDocs.Spec.URL = value + } + + case "@x-taggroups": + originalAttribute := strings.Split(commentLine, " ")[0] + if len(value) == 0 { + return fmt.Errorf("annotation %s need a value", attribute) + } + + var valueJSON interface{} + if err := json.Unmarshal([]byte(value), &valueJSON); err != nil { + return fmt.Errorf("annotation %s need a valid json value. error: %s", originalAttribute, err.Error()) + } + + p.openAPI.Info.Extensions[originalAttribute[1:]] = valueJSON + case "@servers.url": + server := spec.NewServer() + server.Spec.URL = value + matches := serversURLPattern.FindAllStringSubmatch(value, -1) + server.Spec.Variables = make(map[string]*spec.Extendable[spec.ServerVariable]) + for _, match := range matches { + server.Spec.Variables[match[1]] = spec.NewServerVariable() + } + + p.openAPI.Servers = append(p.openAPI.Servers, server) + case "@servers.description": + server := p.openAPI.Servers[len(p.openAPI.Servers)-1] + server.Spec.Description = value + case "@servers.variables.enum": + server := p.openAPI.Servers[len(p.openAPI.Servers)-1] + matches := serversVariablesPattern.FindStringSubmatch(value) + if len(matches) > 0 { + variable, ok := server.Spec.Variables[matches[1]] + if !ok { + p.debug.Printf("Variables are not detected.") + continue + } + variable.Spec.Enum = append(variable.Spec.Enum, matches[2]) + } + case "@servers.variables.default": + server := p.openAPI.Servers[len(p.openAPI.Servers)-1] + matches := serversVariablesPattern.FindStringSubmatch(value) + if len(matches) > 0 { + variable, ok := server.Spec.Variables[matches[1]] + if !ok { + p.debug.Printf("Variables are not detected.") + continue + } + variable.Spec.Default = matches[2] + } + case "@servers.variables.description": + server := p.openAPI.Servers[len(p.openAPI.Servers)-1] + matches := serversVariablesPattern.FindStringSubmatch(value) + if len(matches) > 0 { + variable, ok := server.Spec.Variables[matches[1]] + if !ok { + p.debug.Printf("Variables are not detected.") + continue + } + variable.Spec.Default = matches[2] + } + case "@servers.variables.description.markdown": + server := p.openAPI.Servers[len(p.openAPI.Servers)-1] + matches := serversVariablesPattern.FindStringSubmatch(value) + if len(matches) > 0 { + variable, ok := server.Spec.Variables[matches[1]] + if !ok { + p.debug.Printf("Variables are not detected.") + continue + } + commentInfo, err := getMarkdownForTag(matches[1], p.markdownFileDir) + if err != nil { + return err + } + variable.Spec.Description = string(commentInfo) + } + default: + if strings.HasPrefix(attribute, "@x-") { + err := p.parseExtensionsV3(value, attribute) + if err != nil { + return errors.Wrap(err, "could not parse extension comment") + } + } + } + + previousAttribute = attribute + } + + return nil +} + +func (p *Parser) parseExtensionsV3(value, attribute string) error { + extensionName := attribute[1:] + + // // for each security definition + // for _, v := range p.openAPI.Components.Spec.SecuritySchemes{ + // // check if extension exists + // _, extExistsInSecurityDef := v.VendorExtensible.Extensions.GetString(extensionName) + // // if it exists in at least one, then we stop iterating + // if extExistsInSecurityDef { + // return nil + // } + // } + + if len(value) == 0 { + return fmt.Errorf("annotation %s need a value", attribute) + } + + if p.openAPI.Info.Extensions == nil { + p.openAPI.Info.Extensions = map[string]any{} + } + + var valueJSON interface{} + err := json.Unmarshal([]byte(value), &valueJSON) + if err != nil { + return fmt.Errorf("annotation %s need a valid json value. error: %s", attribute, err.Error()) + } + + if strings.Contains(extensionName, "logo") { + p.openAPI.Info.Extensions[extensionName] = valueJSON + return nil + } + + p.openAPI.Info.Extensions[attribute[1:]] = valueJSON + + return nil +} + +func setspecInfo(openAPI *spec.OpenAPI, attribute, value string) { + switch attribute { + case versionAttr: + openAPI.Info.Spec.Version = value + case titleAttr: + openAPI.Info.Spec.Title = value + case tosAttr: + openAPI.Info.Spec.TermsOfService = value + case descriptionAttr: + openAPI.Info.Spec.Description = value + case conNameAttr: + if openAPI.Info.Spec.Contact == nil { + openAPI.Info.Spec.Contact = spec.NewContact() + } + + openAPI.Info.Spec.Contact.Spec.Name = value + case conEmailAttr: + if openAPI.Info.Spec.Contact == nil { + openAPI.Info.Spec.Contact = spec.NewContact() + } + + openAPI.Info.Spec.Contact.Spec.Email = value + case conURLAttr: + if openAPI.Info.Spec.Contact == nil { + openAPI.Info.Spec.Contact = spec.NewContact() + } + + openAPI.Info.Spec.Contact.Spec.URL = value + case licNameAttr: + if openAPI.Info.Spec.License == nil { + openAPI.Info.Spec.License = spec.NewLicense() + } + openAPI.Info.Spec.License.Spec.Name = value + case licURLAttr: + if openAPI.Info.Spec.License == nil { + openAPI.Info.Spec.License = spec.NewLicense() + } + openAPI.Info.Spec.License.Spec.URL = value + } +} + +func parseSecAttributesV3(context string, lines []string, index *int) (string, *spec.SecurityScheme, error) { + const ( + in = "@in" + name = "@name" + descriptionAttr = "@description" + tokenURL = "@tokenurl" + authorizationURL = "@authorizationurl" + ) + + var search []string + + attribute := strings.ToLower(FieldsByAnySpace(lines[*index], 2)[0]) + switch attribute { + case secBasicAttr: + scheme := spec.SecurityScheme{ + Type: "http", + Scheme: "basic", + } + return "basic", &scheme, nil + case secAPIKeyAttr: + search = []string{in, name} + case secApplicationAttr, secPasswordAttr: + search = []string{tokenURL, in, name} + case secImplicitAttr: + search = []string{authorizationURL, in} + case secAccessCodeAttr: + search = []string{tokenURL, authorizationURL, in} + case secBearerAuthAttr: + scheme := spec.SecurityScheme{ + Type: "http", + Scheme: "bearer", + BearerFormat: "JWT", + } + return "bearerauth", &scheme, nil + } + + // For the first line we get the attributes in the context parameter, so we skip to the next one + *index++ + + attrMap, scopes := make(map[string]string), make(map[string]string) + extensions, description := make(map[string]interface{}), "" + + for ; *index < len(lines); *index++ { + v := strings.TrimSpace(lines[*index]) + if len(v) == 0 { + continue + } + + fields := FieldsByAnySpace(v, 2) + securityAttr := strings.ToLower(fields[0]) + var value string + if len(fields) > 1 { + value = fields[1] + } + + for _, findTerm := range search { + if securityAttr == findTerm { + attrMap[securityAttr] = value + + break + } + } + + isExists, err := isExistsScope(securityAttr) + if err != nil { + return "", nil, err + } + + if isExists { + scopes[securityAttr[len(scopeAttrPrefix):]] = v[len(securityAttr):] + } + + if strings.HasPrefix(securityAttr, "@x-") { + // Add the custom attribute without the @ + extensions[securityAttr[1:]] = value + } + + // Not mandatory field + if securityAttr == descriptionAttr { + description = value + } + + // next securityDefinitions + if strings.Index(securityAttr, "@securitydefinitions.") == 0 { + // Go back to the previous line and break + *index-- + + break + } + } + + if len(attrMap) != len(search) { + return "", nil, fmt.Errorf("%s is %v required", context, search) + } + + scheme := &spec.SecurityScheme{} + key := getSecurityDefinitionKey(lines) + + switch attribute { + case secAPIKeyAttr: + scheme.Type = "apiKey" + scheme.In = attrMap[in] + scheme.Name = attrMap[name] + case secApplicationAttr: + scheme.Type = "oauth2" + scheme.In = attrMap[in] + scheme.Flows = spec.NewOAuthFlows() + scheme.Flows.Spec.ClientCredentials = spec.NewOAuthFlow() + scheme.Flows.Spec.ClientCredentials.Spec.TokenURL = attrMap[tokenURL] + + scheme.Flows.Spec.ClientCredentials.Spec.Scopes = make(map[string]string) + for k, v := range scopes { + scheme.Flows.Spec.ClientCredentials.Spec.Scopes[k] = v + } + case secImplicitAttr: + scheme.Type = "oauth2" + scheme.In = attrMap[in] + scheme.Flows = spec.NewOAuthFlows() + scheme.Flows.Spec.Implicit = spec.NewOAuthFlow() + scheme.Flows.Spec.Implicit.Spec.AuthorizationURL = attrMap[authorizationURL] + scheme.Flows.Spec.Implicit.Spec.Scopes = make(map[string]string) + for k, v := range scopes { + scheme.Flows.Spec.Implicit.Spec.Scopes[k] = v + } + case secPasswordAttr: + scheme.Type = "oauth2" + scheme.In = attrMap[in] + scheme.Flows = spec.NewOAuthFlows() + scheme.Flows.Spec.Password = spec.NewOAuthFlow() + scheme.Flows.Spec.Password.Spec.TokenURL = attrMap[tokenURL] + + scheme.Flows.Spec.Password.Spec.Scopes = make(map[string]string) + for k, v := range scopes { + scheme.Flows.Spec.Password.Spec.Scopes[k] = v + } + + case secAccessCodeAttr: + scheme.Type = "oauth2" + scheme.In = attrMap[in] + scheme.Flows = spec.NewOAuthFlows() + scheme.Flows.Spec.AuthorizationCode = spec.NewOAuthFlow() + scheme.Flows.Spec.AuthorizationCode.Spec.AuthorizationURL = attrMap[authorizationURL] + scheme.Flows.Spec.AuthorizationCode.Spec.TokenURL = attrMap[tokenURL] + } + + scheme.Description = description + + if scheme.Flows != nil && scheme.Flows.Extensions == nil && len(extensions) > 0 { + scheme.Flows.Extensions = make(map[string]interface{}) + } + + for k, v := range extensions { + scheme.Flows.Extensions[k] = v + } + + return key, scheme, nil +} + +func getSecurityDefinitionKey(lines []string) string { + for _, line := range lines { + if strings.HasPrefix(strings.ToLower(line), "@securitydefinitions") { + splittedLine := strings.Split(line, " ") + return splittedLine[len(splittedLine)-1] + } + } + + return "" +} + +// ParseRouterAPIInfoV3 parses router api info for given astFile. +func (p *Parser) ParseRouterAPIInfoV3(fileInfo *AstFileInfo) error { + for _, astDescription := range fileInfo.File.Decls { + if (fileInfo.ParseFlag & ParseOperations) == ParseNone { + continue + } + + astDeclaration, ok := astDescription.(*ast.FuncDecl) + if !ok || astDeclaration.Doc == nil || astDeclaration.Doc.List == nil { + continue + } + + if p.matchTags(astDeclaration.Doc.List) && + matchExtension(p.parseExtension, astDeclaration.Doc.List) { + // for per 'function' comment, create a new 'Operation' object + operation := NewOperationV3(p, SetCodeExampleFilesDirectoryV3(p.codeExampleFilesDir)) + + for _, comment := range astDeclaration.Doc.List { + err := operation.ParseComment(comment.Text, fileInfo.File) + if err != nil { + return fmt.Errorf("ParseComment error in file %s :%+v", fileInfo.Path, err) + } + } + + // workaround until we replace the produce comment with a new @Success syntax + // We first need to setup all responses before we can set the mimetypes + err := operation.ProcessProduceComment() + if err != nil { + return err + } + + err = processRouterOperationV3(p, operation) + if err != nil { + return err + } + } + } + + return nil +} + +func processRouterOperationV3(p *Parser, o *OperationV3) error { + for _, routeProperties := range o.RouterProperties { + var ( + pathItem *spec.RefOrSpec[spec.Extendable[spec.PathItem]] + ok bool + ) + + pathItem, ok = p.openAPI.Paths.Spec.Paths[routeProperties.Path] + if !ok { + pathItem = &spec.RefOrSpec[spec.Extendable[spec.PathItem]]{ + Spec: &spec.Extendable[spec.PathItem]{ + Spec: &spec.PathItem{}, + }, + } + } + + op := refRouteMethodOpV3(pathItem.Spec.Spec, routeProperties.HTTPMethod) + + // check if we already have an operation for this path and method + if *op != nil { + err := fmt.Errorf("route %s %s is declared multiple times", routeProperties.HTTPMethod, routeProperties.Path) + if p.Strict { + return err + } + + p.debug.Printf("warning: %s\n", err) + } + + *op = &o.Operation + + p.openAPI.Paths.Spec.Paths[routeProperties.Path] = pathItem + } + + return nil +} + +func refRouteMethodOpV3(item *spec.PathItem, method string) **spec.Operation { + switch method { + case http.MethodGet: + if item.Get == nil { + item.Get = &spec.Extendable[spec.Operation]{} + } + return &item.Get.Spec + case http.MethodPost: + if item.Post == nil { + item.Post = &spec.Extendable[spec.Operation]{} + } + return &item.Post.Spec + case http.MethodDelete: + if item.Delete == nil { + item.Delete = &spec.Extendable[spec.Operation]{} + } + return &item.Delete.Spec + case http.MethodPut: + if item.Put == nil { + item.Put = &spec.Extendable[spec.Operation]{} + } + return &item.Put.Spec + case http.MethodPatch: + if item.Patch == nil { + item.Patch = &spec.Extendable[spec.Operation]{} + } + return &item.Patch.Spec + case http.MethodHead: + if item.Head == nil { + item.Head = &spec.Extendable[spec.Operation]{} + } + return &item.Head.Spec + case http.MethodOptions: + if item.Options == nil { + item.Options = &spec.Extendable[spec.Operation]{} + } + return &item.Options.Spec + default: + return nil + } +} + +func (p *Parser) getTypeSchemaV3(typeName string, file *ast.File, ref bool) (*spec.RefOrSpec[spec.Schema], error) { + if override, ok := p.Overrides[typeName]; ok { + p.debug.Printf("Override detected for %s: using %s instead", typeName, override) + schema, err := parseObjectSchemaV3(p, override, file) + if err != nil { + return nil, err + } + + return schema, nil + + } + + if IsInterfaceLike(typeName) { + return spec.NewSchemaSpec(), nil + } + + if IsGolangPrimitiveType(typeName) { + return PrimitiveSchemaV3(TransToValidSchemeType(typeName)), nil + } + + schemaType, err := convertFromSpecificToPrimitive(typeName) + if err == nil { + return PrimitiveSchemaV3(schemaType), nil + } + + typeSpecDef := p.packages.FindTypeSpec(typeName, file) + if typeSpecDef == nil { + p.packages.FindTypeSpec(typeName, file) // uncomment for debugging + return nil, fmt.Errorf("cannot find type definition: %s", typeName) + } + + if override, ok := p.Overrides[typeSpecDef.FullPath()]; ok { + if override == "" { + p.debug.Printf("Override detected for %s: ignoring", typeSpecDef.FullPath()) + + return nil, ErrSkippedField + } + + p.debug.Printf("Override detected for %s: using %s instead", typeSpecDef.FullPath(), override) + + separator := strings.LastIndex(override, ".") + if separator == -1 { + // treat as a swaggertype tag + parts := strings.Split(override, ",") + return BuildCustomSchemaV3(parts) + } + + typeSpecDef = p.packages.findTypeSpec(override[0:separator], override[separator+1:]) + } + + schema, ok := p.parsedSchemasV3[typeSpecDef] + if !ok { + var err error + + schema, err = p.ParseDefinitionV3(typeSpecDef) + if err != nil { + if err == ErrRecursiveParseStruct && ref { + return p.getRefTypeSchemaV3(typeSpecDef, schema), nil + } + return nil, err + } + } + + if ref { + if IsComplexSchemaV3(schema) { + return p.getRefTypeSchemaV3(typeSpecDef, schema), nil + } + + // if it is a simple schema, just return a copy + newSchema := *schema.Schema + return spec.NewRefOrSpec(nil, &newSchema), nil + } + + return spec.NewRefOrSpec(nil, schema.Schema), nil +} + +// ParseDefinitionV3 parses given type spec that corresponds to the type under +// given name and package, and populates swagger schema definitions registry +// with a schema for the given type +func (p *Parser) ParseDefinitionV3(typeSpecDef *TypeSpecDef) (*SchemaV3, error) { + typeName := typeSpecDef.TypeName() + schema, found := p.parsedSchemasV3[typeSpecDef] + if found { + p.debug.Printf("Skipping '%s', already parsed.", typeName) + + return schema, nil + } + + if p.isInStructStack(typeSpecDef) { + p.debug.Printf("Skipping '%s', recursion detected.", typeName) + + return &SchemaV3{ + Name: typeName, + PkgPath: typeSpecDef.PkgPath, + Schema: PrimitiveSchemaV3(OBJECT).Spec, + }, + ErrRecursiveParseStruct + } + + p.structStack = append(p.structStack, typeSpecDef) + + p.debug.Printf("Generating %s", typeName) + + definition, err := p.parseTypeExprV3(typeSpecDef.File, typeSpecDef.TypeSpec.Type, false) + if err != nil { + p.debug.Printf("Error parsing type definition '%s': %s", typeName, err) + return nil, err + } + + if definition.Spec.Description == "" { + fillDefinitionDescriptionV3(p, definition.Spec, typeSpecDef.File, typeSpecDef) + } + + if len(typeSpecDef.Enums) > 0 { + var varNames []string + var enumComments = make(map[string]string) + for _, value := range typeSpecDef.Enums { + definition.Spec.Enum = append(definition.Spec.Enum, value.Value) + varNames = append(varNames, value.key) + if len(value.Comment) > 0 { + enumComments[value.key] = value.Comment + } + } + + if definition.Spec.Extensions == nil { + definition.Spec.Extensions = make(map[string]any) + } + + definition.Spec.Extensions[enumVarNamesExtension] = varNames + if len(enumComments) > 0 { + definition.Spec.Extensions[enumCommentsExtension] = enumComments + } + } + + sch := SchemaV3{ + Name: typeName, + PkgPath: typeSpecDef.PkgPath, + Schema: definition.Spec, + } + p.parsedSchemasV3[typeSpecDef] = &sch + + // update an empty schema as a result of recursion + s2, found := p.outputSchemasV3[typeSpecDef] + if found { + p.openAPI.Components.Spec.Schemas[s2.Name] = definition + } + + return &sch, nil +} + +// fillDefinitionDescription additionally fills fields in definition (spec.Schema) +// TODO: If .go file contains many types, it may work for a long time +func fillDefinitionDescriptionV3(parser *Parser, definition *spec.Schema, file *ast.File, typeSpecDef *TypeSpecDef) { + for _, astDeclaration := range file.Decls { + generalDeclaration, ok := astDeclaration.(*ast.GenDecl) + if !ok || generalDeclaration.Tok != token.TYPE { + continue + } + + for _, astSpec := range generalDeclaration.Specs { + typeSpec, ok := astSpec.(*ast.TypeSpec) + if !ok || typeSpec != typeSpecDef.TypeSpec { + continue + } + + var typeName string + if typeSpec.Name != nil { + typeName = typeSpec.Name.Name + } + + text, err := parser.extractDeclarationDescription(typeName, typeSpec.Comment, generalDeclaration.Doc) + if err != nil { + parser.debug.Printf("Error extracting declaration description: %s", err) + continue + } + + definition.Description = text + } + } +} + +// parseTypeExprV3 parses given type expression that corresponds to the type under +// given name and package, and returns swagger schema for it. +func (p *Parser) parseTypeExprV3(file *ast.File, typeExpr ast.Expr, ref bool) (*spec.RefOrSpec[spec.Schema], error) { + const errMessage = "parse type expression v3" + + switch expr := typeExpr.(type) { + // type Foo interface{} + case *ast.InterfaceType: + return spec.NewSchemaSpec(), nil + + // type Foo struct {...} + case *ast.StructType: + return p.parseStructV3(file, expr.Fields) + + // type Foo Baz + case *ast.Ident: + result, err := p.getTypeSchemaV3(expr.Name, file, ref) + if err != nil { + return nil, errors.Wrap(err, errMessage) + } + + return result, nil + // type Foo *Baz + case *ast.StarExpr: + return p.parseTypeExprV3(file, expr.X, ref) + + // type Foo pkg.Bar + case *ast.SelectorExpr: + if xIdent, ok := expr.X.(*ast.Ident); ok { + result, err := p.getTypeSchemaV3(fullTypeName(xIdent.Name, expr.Sel.Name), file, ref) + if err != nil { + return nil, errors.Wrap(err, errMessage) + } + + return result, nil + } + // type Foo []Baz + case *ast.ArrayType: + itemSchema, err := p.parseTypeExprV3(file, expr.Elt, true) + if err != nil { + return nil, err + } + + if itemSchema == nil { + schema := &spec.Schema{} + schema.Type = spec.NewSingleOrArray(ARRAY) + schema.Items = spec.NewBoolOrSchema(false, spec.NewSchemaSpec()) + p.debug.Printf("Creating array with empty item schema %v", expr.Elt) + + return spec.NewRefOrSpec(nil, schema), nil + } + + result := &spec.Schema{} + result.Type = spec.NewSingleOrArray(ARRAY) + result.Items = spec.NewBoolOrSchema(false, itemSchema) + + return spec.NewRefOrSpec(nil, result), nil + // type Foo map[string]Bar + case *ast.MapType: + if _, ok := expr.Value.(*ast.InterfaceType); ok { + result := &spec.Schema{} + result.AdditionalProperties = spec.NewBoolOrSchema(false, spec.NewSchemaSpec()) + result.Type = spec.NewSingleOrArray(OBJECT) + + return spec.NewRefOrSpec(nil, result), nil + } + + schema, err := p.parseTypeExprV3(file, expr.Value, true) + if err != nil { + return nil, err + } + + result := &spec.Schema{} + result.AdditionalProperties = spec.NewBoolOrSchema(false, schema) + result.Type = spec.NewSingleOrArray(OBJECT) + + return spec.NewRefOrSpec(nil, result), nil + case *ast.FuncType: + return nil, ErrFuncTypeField + // ... + } + + return p.parseGenericTypeExprV3(file, typeExpr) +} + +func (p *Parser) parseStructV3(file *ast.File, fields *ast.FieldList) (*spec.RefOrSpec[spec.Schema], error) { + required, properties := make([]string, 0), make(map[string]*spec.RefOrSpec[spec.Schema]) + + for _, field := range fields.List { + fieldProps, requiredFromAnon, err := p.parseStructFieldV3(file, field) + if err != nil { + if err == ErrFuncTypeField || err == ErrSkippedField { + continue + } + + return nil, err + } + + if len(fieldProps) == 0 { + continue + } + + required = append(required, requiredFromAnon...) + + for k, v := range fieldProps { + properties[k] = v + } + } + + sort.Strings(required) + + result := spec.NewSchemaSpec() + result.Spec.Type = spec.NewSingleOrArray(OBJECT) + result.Spec.Properties = properties + result.Spec.Required = required + + return result, nil +} + +func (p *Parser) parseStructFieldV3(file *ast.File, field *ast.Field) (map[string]*spec.RefOrSpec[spec.Schema], []string, error) { + if field.Tag != nil { + skip, ok := reflect.StructTag(strings.ReplaceAll(field.Tag.Value, "`", "")).Lookup("swaggerignore") + if ok && strings.EqualFold(skip, "true") { + return nil, nil, nil + } + } + + ps := p.fieldParserFactoryV3(p, file, field) + + if ps.ShouldSkip() { + return nil, nil, nil + } + + fieldName, err := ps.FieldName() + if err != nil { + return nil, nil, err + } + + if fieldName == "" { + typeName, err := getFieldType(file, field.Type, nil) + if err != nil { + return nil, nil, err + } + + schema, err := p.getTypeSchemaV3(typeName, file, false) + if err != nil { + return nil, nil, err + } + + if len(schema.Spec.Type) > 0 && schema.Spec.Type[0] == OBJECT { + if len(schema.Spec.Properties) == 0 { + return nil, nil, nil + } + + properties := make(map[string]*spec.RefOrSpec[spec.Schema]) + for k, v := range schema.Spec.Properties { + properties[k] = v + } + + return properties, schema.Spec.Required, nil + } + // for alias type of non-struct types ,such as array,map, etc. ignore field tag. + return map[string]*spec.RefOrSpec[spec.Schema]{ + typeName: schema, + }, nil, nil + + } + + schema, err := ps.CustomSchema() + if err != nil { + return nil, nil, err + } + + if schema == nil { + typeName, err := getFieldType(file, field.Type, nil) + if err == nil { + // named type + schema, err = p.getTypeSchemaV3(typeName, file, true) + if err != nil { + return nil, nil, err + } + + } else { + // unnamed type + parsedSchema, err := p.parseTypeExprV3(file, field.Type, false) + if err != nil { + return nil, nil, err + } + + schema = parsedSchema + } + } + + err = ps.ComplementSchema(schema) + if err != nil { + return nil, nil, err + } + + var tagRequired []string + + required, err := ps.IsRequired() + if err != nil { + return nil, nil, err + } + + if required { + tagRequired = append(tagRequired, fieldName) + } + + if formName := ps.FormName(); len(formName) > 0 { + if schema.Spec.Extensions == nil { + schema.Spec.Extensions = make(map[string]any) + } + schema.Spec.Extensions[formTag] = formName + } + + return map[string]*spec.RefOrSpec[spec.Schema]{fieldName: schema}, tagRequired, nil +} + +func (p *Parser) getRefTypeSchemaV3(typeSpecDef *TypeSpecDef, schema *SchemaV3) *spec.RefOrSpec[spec.Schema] { + _, ok := p.outputSchemasV3[typeSpecDef] + if !ok { + if p.openAPI.Components.Spec.Schemas == nil { + p.openAPI.Components.Spec.Schemas = make(map[string]*spec.RefOrSpec[spec.Schema]) + } + + p.openAPI.Components.Spec.Schemas[schema.Name] = spec.NewSchemaSpec() + + if schema.Schema != nil { + p.openAPI.Components.Spec.Schemas[schema.Name] = spec.NewRefOrSpec(nil, schema.Schema) + } + + p.outputSchemasV3[typeSpecDef] = schema + } + + refSchema := RefSchemaV3(schema.Name) + + return refSchema +} + +// GetSchemaTypePathV3 get path of schema type. +func (p *Parser) GetSchemaTypePathV3(schema *spec.RefOrSpec[spec.Schema], depth int) []string { + if schema == nil || depth == 0 { + return nil + } + + name := "" + if schema.Ref != nil { + name = schema.Ref.Ref + } + + if name != "" { + if pos := strings.LastIndexByte(name, '/'); pos >= 0 { + name = name[pos+1:] + if schema, ok := p.openAPI.Components.Spec.Schemas[name]; ok { + return p.GetSchemaTypePathV3(schema, depth) + } + } + + return nil + } + + if schema.Spec != nil && len(schema.Spec.Type) > 0 { + switch schema.Spec.Type[0] { + case ARRAY: + depth-- + + s := []string{schema.Spec.Type[0]} + + return append(s, p.GetSchemaTypePathV3(schema.Spec.Items.Schema, depth)...) + case OBJECT: + if schema.Spec.AdditionalProperties != nil && schema.Spec.AdditionalProperties.Schema != nil { + // for map + depth-- + + s := []string{schema.Spec.Type[0]} + + return append(s, p.GetSchemaTypePathV3(schema.Spec.AdditionalProperties.Schema, depth)...) + } + } + + return []string{schema.Spec.Type[0]} + } + + println("found schema with no Type, returning any") + return []string{ANY} +} + +func (p *Parser) getSchemaByRef(ref *spec.Ref) *spec.Schema { + searchString := strings.ReplaceAll(ref.Ref, "#/components/schemas/", "") + return p.openAPI.Components.Spec.Schemas[searchString].Spec +} diff --git a/vendor/github.com/swaggo/swag/v2/schema.go b/vendor/github.com/swaggo/swag/v2/schema.go new file mode 100644 index 00000000..c78a4352 --- /dev/null +++ b/vendor/github.com/swaggo/swag/v2/schema.go @@ -0,0 +1,294 @@ +package swag + +import ( + "errors" + "fmt" + + "github.com/go-openapi/spec" +) + +const ( + // ARRAY represent a array value. + ARRAY = "array" + // OBJECT represent a object value. + OBJECT = "object" + // PRIMITIVE represent a primitive value. + PRIMITIVE = "primitive" + // BOOLEAN represent a boolean value. + BOOLEAN = "boolean" + // INTEGER represent a integer value. + INTEGER = "integer" + // NUMBER represent a number value. + NUMBER = "number" + // STRING represent a string value. + STRING = "string" + // FUNC represent a function value. + FUNC = "func" + // ERROR represent a error value. + ERROR = "error" + // INTERFACE represent a interface value. + INTERFACE = "interface{}" + // ANY represent a any value. + ANY = "any" + // NIL represent a empty value. + NIL = "nil" + + // IgnoreNameOverridePrefix Prepend to model to avoid renaming based on comment. + IgnoreNameOverridePrefix = '$' +) + +// CheckSchemaType checks if typeName is not a name of primitive type. +func CheckSchemaType(typeName string) error { + if !IsPrimitiveType(typeName) { + return fmt.Errorf("%s is not basic types", typeName) + } + + return nil +} + +// IsSimplePrimitiveType determine whether the type name is a simple primitive type. +func IsSimplePrimitiveType(typeName string) bool { + switch typeName { + case STRING, NUMBER, INTEGER, BOOLEAN: + return true + } + + return false +} + +// IsPrimitiveType determine whether the type name is a primitive type. +func IsPrimitiveType(typeName string) bool { + switch typeName { + case STRING, NUMBER, INTEGER, BOOLEAN, ARRAY, OBJECT, FUNC: + return true + } + + return false +} + +// IsInterfaceLike determines whether the swagger type name is an go named interface type like error type. +func IsInterfaceLike(typeName string) bool { + return typeName == ERROR || typeName == ANY +} + +// IsNumericType determines whether the swagger type name is a numeric type. +func IsNumericType(typeName string) bool { + return typeName == INTEGER || typeName == NUMBER +} + +// TransToValidSchemeType indicates type will transfer golang basic type to swagger supported type. +func TransToValidSchemeType(typeName string) string { + switch typeName { + case "uint", "int", "uint8", "int8", "uint16", "int16", "byte": + return INTEGER + case "uint32", "int32", "rune": + return INTEGER + case "uint64", "int64": + return INTEGER + case "float32", "float64": + return NUMBER + case "bool": + return BOOLEAN + case "string": + return STRING + } + + return typeName +} + +// IsGolangPrimitiveType determine whether the type name is a golang primitive type. +func IsGolangPrimitiveType(typeName string) bool { + switch typeName { + case "uint", + "int", + "uint8", + "int8", + "uint16", + "int16", + "byte", + "uint32", + "int32", + "rune", + "uint64", + "int64", + "float32", + "float64", + "bool", + "string": + return true + } + + return false +} + +// TransToValidCollectionFormat determine valid collection format. +func TransToValidCollectionFormat(format string) string { + switch format { + case "csv", "multi", "pipes", "tsv", "ssv": + return format + } + + return "" +} + +func ignoreNameOverride(name string) bool { + return len(name) != 0 && name[0] == IgnoreNameOverridePrefix +} + +// IsComplexSchema whether a schema is complex and should be a ref schema +func IsComplexSchema(schema *spec.Schema) bool { + // a enum type should be complex + if len(schema.Enum) > 0 { + return true + } + + // a deep array type is complex, how to determine deep? here more than 2 ,for example: [][]object,[][][]int + if len(schema.Type) > 2 { + return true + } + + //Object included, such as Object or []Object + for _, st := range schema.Type { + if st == OBJECT { + return true + } + } + return false +} + +// IsRefSchema whether a schema is a reference schema. +func IsRefSchema(schema *spec.Schema) bool { + return schema.Ref.Ref.GetURL() != nil +} + +// RefSchema build a reference schema. +func RefSchema(refType string) *spec.Schema { + return spec.RefSchema("#/definitions/" + refType) +} + +// PrimitiveSchema build a primitive schema. +func PrimitiveSchema(refType string) *spec.Schema { + return &spec.Schema{SchemaProps: spec.SchemaProps{Type: []string{refType}}} +} + +// BuildCustomSchema build custom schema specified by tag swaggertype. +func BuildCustomSchema(types []string) (*spec.Schema, error) { + if len(types) == 0 { + return nil, nil + } + + switch types[0] { + case PRIMITIVE: + if len(types) == 1 { + return nil, errors.New("need primitive type after primitive") + } + + return BuildCustomSchema(types[1:]) + case ARRAY: + if len(types) == 1 { + return nil, errors.New("need array item type after array") + } + + schema, err := BuildCustomSchema(types[1:]) + if err != nil { + return nil, err + } + + return spec.ArrayProperty(schema), nil + case OBJECT: + if len(types) == 1 { + return PrimitiveSchema(types[0]), nil + } + + schema, err := BuildCustomSchema(types[1:]) + if err != nil { + return nil, err + } + + return spec.MapProperty(schema), nil + default: + err := CheckSchemaType(types[0]) + if err != nil { + return nil, err + } + + return PrimitiveSchema(types[0]), nil + } +} + +// MergeSchema merge schemas +func MergeSchema(dst *spec.Schema, src *spec.Schema) *spec.Schema { + if len(src.Type) > 0 { + dst.Type = src.Type + } + if len(src.Properties) > 0 { + dst.Properties = src.Properties + } + if src.Items != nil { + dst.Items = src.Items + } + if src.AdditionalProperties != nil { + dst.AdditionalProperties = src.AdditionalProperties + } + if len(src.Description) > 0 { + dst.Description = src.Description + } + if src.Nullable { + dst.Nullable = src.Nullable + } + if len(src.Format) > 0 { + dst.Format = src.Format + } + if src.Default != nil { + dst.Default = src.Default + } + if src.Example != nil { + dst.Example = src.Example + } + if len(src.Extensions) > 0 { + dst.Extensions = src.Extensions + } + if src.Maximum != nil { + dst.Maximum = src.Maximum + } + if src.Minimum != nil { + dst.Minimum = src.Minimum + } + if src.ExclusiveMaximum { + dst.ExclusiveMaximum = src.ExclusiveMaximum + } + if src.ExclusiveMinimum { + dst.ExclusiveMinimum = src.ExclusiveMinimum + } + if src.MaxLength != nil { + dst.MaxLength = src.MaxLength + } + if src.MinLength != nil { + dst.MinLength = src.MinLength + } + if len(src.Pattern) > 0 { + dst.Pattern = src.Pattern + } + if src.MaxItems != nil { + dst.MaxItems = src.MaxItems + } + if src.MinItems != nil { + dst.MinItems = src.MinItems + } + if src.UniqueItems { + dst.UniqueItems = src.UniqueItems + } + if src.MultipleOf != nil { + dst.MultipleOf = src.MultipleOf + } + if len(src.Enum) > 0 { + dst.Enum = src.Enum + } + if len(src.Extensions) > 0 { + dst.Extensions = src.Extensions + } + if len(src.ExtraProps) > 0 { + dst.ExtraProps = src.ExtraProps + } + return dst +} diff --git a/vendor/github.com/swaggo/swag/v2/schemav3.go b/vendor/github.com/swaggo/swag/v2/schemav3.go new file mode 100644 index 00000000..f00a2df9 --- /dev/null +++ b/vendor/github.com/swaggo/swag/v2/schemav3.go @@ -0,0 +1,141 @@ +package swag + +import ( + "errors" + + "github.com/sv-tools/openapi/spec" +) + +// PrimitiveSchemaV3 build a primitive schema. +func PrimitiveSchemaV3(refType string) *spec.RefOrSpec[spec.Schema] { + result := spec.NewSchemaSpec() + result.Spec.Type = spec.SingleOrArray[string]{refType} + + return result +} + +// IsComplexSchemaV3 whether a schema is complex and should be a ref schema +func IsComplexSchemaV3(schema *SchemaV3) bool { + // a enum type should be complex + if len(schema.Enum) > 0 { + return true + } + + // a deep array type is complex, how to determine deep? here more than 2 ,for example: [][]object,[][][]int + if len(schema.Type) > 2 { + return true + } + + //Object included, such as Object or []Object + for _, st := range schema.Type { + if st == OBJECT { + return true + } + } + return false +} + +// RefSchemaV3 build a reference schema. +func RefSchemaV3(refType string) *spec.RefOrSpec[spec.Schema] { + return spec.NewRefOrSpec[spec.Schema](spec.NewRef("#/components/schemas/"+refType), nil) +} + +// BuildCustomSchemaV3 build custom schema specified by tag swaggertype. +func BuildCustomSchemaV3(types []string) (*spec.RefOrSpec[spec.Schema], error) { + if len(types) == 0 { + return nil, nil + } + + switch types[0] { + case PRIMITIVE: + if len(types) == 1 { + return nil, errors.New("need primitive type after primitive") + } + + return BuildCustomSchemaV3(types[1:]) + case ARRAY: + if len(types) == 1 { + return nil, errors.New("need array item type after array") + } + + schema, err := BuildCustomSchemaV3(types[1:]) + if err != nil { + return nil, err + } + + // TODO: check if this is correct + result := spec.NewSchemaSpec() + result.Spec.Type = []string{"array"} + result.Spec.AdditionalProperties = spec.NewBoolOrSchema(true, schema) + + return result, nil + case OBJECT: + if len(types) == 1 { + return PrimitiveSchemaV3(types[0]), nil + } + + schema, err := BuildCustomSchemaV3(types[1:]) + if err != nil { + return nil, err + } + + result := spec.NewSchemaSpec() + result.Spec.AdditionalProperties = spec.NewBoolOrSchema(true, schema) + result.Spec.Type = spec.NewSingleOrArray("object") + + return result, nil + default: + err := CheckSchemaType(types[0]) + if err != nil { + return nil, err + } + + return PrimitiveSchemaV3(types[0]), nil + } +} + +// TransToValidCollectionFormatV3 determine valid collection format. +func TransToValidCollectionFormatV3(format, in string) string { + switch in { + case "query": + switch format { + case "form", "spaceDelimited", "pipeDelimited", "deepObject": + return format + case "ssv": + return "spaceDelimited" + case "pipes": + return "pipe" + case "multi": + return "form" + case "csv": + return "form" + default: + return "" + } + case "path": + switch format { + case "matrix", "label", "simple": + return format + case "csv": + return "simple" + default: + return "" + } + case "header": + switch format { + case "form", "simple": + return format + case "csv": + return "simple" + default: + return "" + } + case "cookie": + switch format { + case "form": + return format + } + } + + return "" +} diff --git a/vendor/github.com/swaggo/swag/v2/spec.go b/vendor/github.com/swaggo/swag/v2/spec.go new file mode 100644 index 00000000..c18a365b --- /dev/null +++ b/vendor/github.com/swaggo/swag/v2/spec.go @@ -0,0 +1,64 @@ +package swag + +import ( + "bytes" + "encoding/json" + "strings" + "text/template" +) + +// Spec holds exported Swagger Info so clients can modify it. +type Spec struct { + Version string + Host string + BasePath string + Schemes []string + Title string + Description string + InfoInstanceName string + SwaggerTemplate string + LeftDelim string + RightDelim string +} + +// ReadDoc parses SwaggerTemplate into swagger document. +func (i *Spec) ReadDoc() string { + i.Description = strings.ReplaceAll(i.Description, "\n", "\\n") + + tpl := template.New("swagger_info").Funcs(template.FuncMap{ + "marshal": func(v interface{}) string { + a, _ := json.Marshal(v) + + return string(a) + }, + "escape": func(v interface{}) string { + // escape tabs + var str = strings.ReplaceAll(v.(string), "\t", "\\t") + // replace " with \", and if that results in \\", replace that with \\\" + str = strings.ReplaceAll(str, "\"", "\\\"") + + return strings.ReplaceAll(str, "\\\\\"", "\\\\\\\"") + }, + }) + + if i.LeftDelim != "" && i.RightDelim != "" { + tpl = tpl.Delims(i.LeftDelim, i.RightDelim) + } + + parsed, err := tpl.Parse(i.SwaggerTemplate) + if err != nil { + return i.SwaggerTemplate + } + + var doc bytes.Buffer + if err = parsed.Execute(&doc, i); err != nil { + return i.SwaggerTemplate + } + + return doc.String() +} + +// InstanceName returns Spec instance name. +func (i *Spec) InstanceName() string { + return i.InfoInstanceName +} diff --git a/vendor/github.com/swaggo/swag/v2/swagger.go b/vendor/github.com/swaggo/swag/v2/swagger.go new file mode 100644 index 00000000..74c162c2 --- /dev/null +++ b/vendor/github.com/swaggo/swag/v2/swagger.go @@ -0,0 +1,72 @@ +package swag + +import ( + "errors" + "fmt" + "sync" +) + +// Name is a unique name be used to register swag instance. +const Name = "swagger" + +var ( + swaggerMu sync.RWMutex + swags map[string]Swagger +) + +// Swagger is an interface to read swagger document. +type Swagger interface { + ReadDoc() string +} + +// Register registers swagger for given name. +func Register(name string, swagger Swagger) { + swaggerMu.Lock() + defer swaggerMu.Unlock() + + if swagger == nil { + panic("swagger is nil") + } + + if swags == nil { + swags = make(map[string]Swagger) + } + + if _, ok := swags[name]; ok { + panic("Register called twice for swag: " + name) + } + + swags[name] = swagger +} + +// GetSwagger returns the swagger instance for given name. +// If not found, returns nil. +func GetSwagger(name string) Swagger { + swaggerMu.RLock() + defer swaggerMu.RUnlock() + + return swags[name] +} + +// ReadDoc reads swagger document. An optional name parameter can be passed to read a specific document. +// The default name is "swagger". +func ReadDoc(optionalName ...string) (string, error) { + swaggerMu.RLock() + defer swaggerMu.RUnlock() + + if swags == nil { + return "", errors.New("no swag has yet been registered") + } + + name := Name + if len(optionalName) != 0 && optionalName[0] != "" { + name = optionalName[0] + } + + swag, ok := swags[name] + if !ok { + return "", fmt.Errorf("no swag named \"%s\" was registered", name) + } + + return swag.ReadDoc(), nil +} diff --git a/vendor/github.com/swaggo/swag/v2/types.go b/vendor/github.com/swaggo/swag/v2/types.go new file mode 100644 index 00000000..5f3031e0 --- /dev/null +++ b/vendor/github.com/swaggo/swag/v2/types.go @@ -0,0 +1,123 @@ +package swag + +import ( + "go/ast" + "go/token" + "regexp" + "strings" + + "github.com/go-openapi/spec" +) + +// Schema parsed schema. +type Schema struct { + *spec.Schema // + PkgPath string // package import path used to rename Name of a definition int case of conflict + Name string // Name in definitions +} + +// TypeSpecDef the whole information of a typeSpec. +type TypeSpecDef struct { + // ast file where TypeSpec is + File *ast.File + + // the TypeSpec of this type definition + TypeSpec *ast.TypeSpec + + Enums []EnumValue + + // path of package starting from under ${GOPATH}/src or from module path in go.mod + PkgPath string + ParentSpec ast.Decl + + SchemaName string + + NotUnique bool +} + +// Name the name of the typeSpec. +func (t *TypeSpecDef) Name() string { + if t.TypeSpec != nil && t.TypeSpec.Name != nil { + return t.TypeSpec.Name.Name + } + + return "" +} + +// TypeName the type name of the typeSpec. +func (t *TypeSpecDef) TypeName() string { + if ignoreNameOverride(t.TypeSpec.Name.Name) { + return t.TypeSpec.Name.Name[1:] + } + + var names []string + if t.NotUnique { + pkgPath := strings.Map(func(r rune) rune { + if r == '\\' || r == '/' || r == '.' { + return '_' + } + return r + }, t.PkgPath) + names = append(names, pkgPath) + } else if t.File != nil { + names = append(names, t.File.Name.Name) + } + if parentFun, ok := (t.ParentSpec).(*ast.FuncDecl); ok && parentFun != nil { + names = append(names, parentFun.Name.Name) + } + names = append(names, t.TypeSpec.Name.Name) + return fullTypeName(names...) +} + +// FullPath return the full path of the typeSpec. +func (t *TypeSpecDef) FullPath() string { + return t.PkgPath + "." + t.Name() +} + +const regexCaseInsensitive = "(?i)" + +var reTypeName = regexp.MustCompile(regexCaseInsensitive + `^@name\s+(\S+)`) + +func (t *TypeSpecDef) Alias() string { + if t.TypeSpec.Comment == nil { + return "" + } + + // get alias from comment '// @name ' + for _, comment := range t.TypeSpec.Comment.List { + trimmedComment := strings.TrimSpace(strings.TrimLeft(comment.Text, "/")) + texts := reTypeName.FindStringSubmatch(trimmedComment) + if len(texts) > 1 { + return texts[1] + } + } + + return "" +} + +func (t *TypeSpecDef) SetSchemaName() { + if alias := t.Alias(); alias != "" { + t.SchemaName = alias + return + } + + t.SchemaName = t.TypeName() +} + +// AstFileInfo information of an ast.File. +type AstFileInfo struct { + //FileSet the FileSet object which is used to parse this go source file + FileSet *token.FileSet + + // File ast.File + File *ast.File + + // Path the path of the ast.File + Path string + + // PackagePath package import path of the ast.File + PackagePath string + + // ParseFlag determine what to parse + ParseFlag ParseFlag +} diff --git a/vendor/github.com/swaggo/swag/v2/typesv3.go b/vendor/github.com/swaggo/swag/v2/typesv3.go new file mode 100644 index 00000000..a7143bcc --- /dev/null +++ b/vendor/github.com/swaggo/swag/v2/typesv3.go @@ -0,0 +1,10 @@ +package swag + +import "github.com/sv-tools/openapi/spec" + +// SchemaV3 parsed schema. +type SchemaV3 struct { + *spec.Schema // + PkgPath string // package import path used to rename Name of a definition int case of conflict + Name string // Name in definitions +} diff --git a/vendor/github.com/swaggo/swag/v2/utils.go b/vendor/github.com/swaggo/swag/v2/utils.go new file mode 100644 index 00000000..8272c4cc --- /dev/null +++ b/vendor/github.com/swaggo/swag/v2/utils.go @@ -0,0 +1,81 @@ +package swag + +import ( + "reflect" + "unicode" + "unicode/utf8" +) + +// FieldsFunc split a string s by a func splitter into max n parts +func FieldsFunc(s string, f func(rune2 rune) bool, n int) []string { + // A span is used to record a slice of s of the form s[start:end]. + // The start index is inclusive and the end index is exclusive. + type span struct { + start int + end int + } + spans := make([]span, 0, 32) + + // Find the field start and end indices. + // Doing this in a separate pass (rather than slicing the string s + // and collecting the result substrings right away) is significantly + // more efficient, possibly due to cache effects. + start := -1 // valid span start if >= 0 + for end, rune := range s { + if f(rune) { + if start >= 0 { + spans = append(spans, span{start, end}) + // Set start to a negative value. + // Note: using -1 here consistently and reproducibly + // slows down this code by a several percent on amd64. + start = ^start + } + } else { + if start < 0 { + start = end + if n > 0 && len(spans)+1 >= n { + break + } + } + } + } + + // Last field might end at EOF. + if start >= 0 { + spans = append(spans, span{start, len(s)}) + } + + // Create strings from recorded field indices. + a := make([]string, len(spans)) + for i, span := range spans { + a[i] = s[span.start:span.end] + } + return a +} + +// FieldsByAnySpace split a string s by any space character into max n parts +func FieldsByAnySpace(s string, n int) []string { + return FieldsFunc(s, unicode.IsSpace, n) +} + +// AppendUtf8Rune appends the UTF-8 encoding of r to the end of p and +// returns the extended buffer. If the rune is out of range, +// it appends the encoding of RuneError. +func AppendUtf8Rune(p []byte, r rune) []byte { + return utf8.AppendRune(p, r) +} + +// CanIntegerValue a wrapper of reflect.Value +type CanIntegerValue struct { + reflect.Value +} + +// CanInt reports whether Uint can be used without panicking. +func (v CanIntegerValue) CanInt() bool { + return v.Value.CanInt() +} + +// CanUint reports whether Uint can be used without panicking. +func (v CanIntegerValue) CanUint() bool { + return v.Value.CanUint() +} diff --git a/vendor/github.com/swaggo/swag/v2/version.go b/vendor/github.com/swaggo/swag/v2/version.go new file mode 100644 index 00000000..d91efdbb --- /dev/null +++ b/vendor/github.com/swaggo/swag/v2/version.go @@ -0,0 +1,4 @@ +package swag + +// Version of swag. +const Version = "v2.0.0" diff --git a/vendor/github.com/swaggo/swag/version.go b/vendor/github.com/swaggo/swag/version.go index ff2810e6..cf52320f 100644 --- a/vendor/github.com/swaggo/swag/version.go +++ b/vendor/github.com/swaggo/swag/version.go @@ -1,4 +1,4 @@ package swag // Version of swag. -const Version = "v1.16.3" +const Version = "v1.16.4" diff --git a/vendor/go.uber.org/atomic/.codecov.yml b/vendor/go.uber.org/atomic/.codecov.yml new file mode 100644 index 00000000..571116cc --- /dev/null +++ b/vendor/go.uber.org/atomic/.codecov.yml @@ -0,0 +1,19 @@ +coverage: + range: 80..100 + round: down + precision: 2 + + status: + project: # measuring the overall project coverage + default: # context, you can create multiple ones with custom titles + enabled: yes # must be yes|true to enable this status + target: 100 # specify the target coverage for each commit status + # option: "auto" (must increase from parent commit or pull request base) + # option: "X%" a static target percentage to hit + if_not_found: success # if parent is not found report status as success, error, or failure + if_ci_failed: error # if ci fails report status as success, error, or failure + +# Also update COVER_IGNORE_PKGS in the Makefile. +ignore: + - /internal/gen-atomicint/ + - /internal/gen-valuewrapper/ diff --git a/vendor/go.uber.org/atomic/.gitignore b/vendor/go.uber.org/atomic/.gitignore new file mode 100644 index 00000000..2e337a0e --- /dev/null +++ b/vendor/go.uber.org/atomic/.gitignore @@ -0,0 +1,15 @@ +/bin +.DS_Store +/vendor +cover.html +cover.out +lint.log + +# Binaries +*.test + +# Profiling output +*.prof + +# Output of fossa analyzer +/fossa diff --git a/vendor/go.uber.org/atomic/CHANGELOG.md b/vendor/go.uber.org/atomic/CHANGELOG.md new file mode 100644 index 00000000..6f87f33f --- /dev/null +++ b/vendor/go.uber.org/atomic/CHANGELOG.md @@ -0,0 +1,127 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [1.11.0] - 2023-05-02 +### Fixed +- Fix initialization of `Value` wrappers. + +### Added +- Add `String` method to `atomic.Pointer[T]` type allowing users to safely print +underlying values of pointers. + +[1.11.0]: https://github.com/uber-go/atomic/compare/v1.10.0...v1.11.0 + +## [1.10.0] - 2022-08-11 +### Added +- Add `atomic.Float32` type for atomic operations on `float32`. +- Add `CompareAndSwap` and `Swap` methods to `atomic.String`, `atomic.Error`, + and `atomic.Value`. +- Add generic `atomic.Pointer[T]` type for atomic operations on pointers of any + type. This is present only for Go 1.18 or higher, and is a drop-in for + replacement for the standard library's `sync/atomic.Pointer` type. + +### Changed +- Deprecate `CAS` methods on all types in favor of corresponding + `CompareAndSwap` methods. + +Thanks to @eNV25 and @icpd for their contributions to this release. + +[1.10.0]: https://github.com/uber-go/atomic/compare/v1.9.0...v1.10.0 + +## [1.9.0] - 2021-07-15 +### Added +- Add `Float64.Swap` to match int atomic operations. +- Add `atomic.Time` type for atomic operations on `time.Time` values. + +[1.9.0]: https://github.com/uber-go/atomic/compare/v1.8.0...v1.9.0 + +## [1.8.0] - 2021-06-09 +### Added +- Add `atomic.Uintptr` type for atomic operations on `uintptr` values. +- Add `atomic.UnsafePointer` type for atomic operations on `unsafe.Pointer` values. + +[1.8.0]: https://github.com/uber-go/atomic/compare/v1.7.0...v1.8.0 + +## [1.7.0] - 2020-09-14 +### Added +- Support JSON serialization and deserialization of primitive atomic types. +- Support Text marshalling and unmarshalling for string atomics. + +### Changed +- Disallow incorrect comparison of atomic values in a non-atomic way. + +### Removed +- Remove dependency on `golang.org/x/{lint, tools}`. + +[1.7.0]: https://github.com/uber-go/atomic/compare/v1.6.0...v1.7.0 + +## [1.6.0] - 2020-02-24 +### Changed +- Drop library dependency on `golang.org/x/{lint, tools}`. + +[1.6.0]: https://github.com/uber-go/atomic/compare/v1.5.1...v1.6.0 + +## [1.5.1] - 2019-11-19 +- Fix bug where `Bool.CAS` and `Bool.Toggle` do work correctly together + causing `CAS` to fail even though the old value matches. + +[1.5.1]: https://github.com/uber-go/atomic/compare/v1.5.0...v1.5.1 + +## [1.5.0] - 2019-10-29 +### Changed +- With Go modules, only the `go.uber.org/atomic` import path is supported now. + If you need to use the old import path, please add a `replace` directive to + your `go.mod`. + +[1.5.0]: https://github.com/uber-go/atomic/compare/v1.4.0...v1.5.0 + +## [1.4.0] - 2019-05-01 +### Added + - Add `atomic.Error` type for atomic operations on `error` values. + +[1.4.0]: https://github.com/uber-go/atomic/compare/v1.3.2...v1.4.0 + +## [1.3.2] - 2018-05-02 +### Added +- Add `atomic.Duration` type for atomic operations on `time.Duration` values. + +[1.3.2]: https://github.com/uber-go/atomic/compare/v1.3.1...v1.3.2 + +## [1.3.1] - 2017-11-14 +### Fixed +- Revert optimization for `atomic.String.Store("")` which caused data races. + +[1.3.1]: https://github.com/uber-go/atomic/compare/v1.3.0...v1.3.1 + +## [1.3.0] - 2017-11-13 +### Added +- Add `atomic.Bool.CAS` for compare-and-swap semantics on bools. + +### Changed +- Optimize `atomic.String.Store("")` by avoiding an allocation. + +[1.3.0]: https://github.com/uber-go/atomic/compare/v1.2.0...v1.3.0 + +## [1.2.0] - 2017-04-12 +### Added +- Shadow `atomic.Value` from `sync/atomic`. + +[1.2.0]: https://github.com/uber-go/atomic/compare/v1.1.0...v1.2.0 + +## [1.1.0] - 2017-03-10 +### Added +- Add atomic `Float64` type. + +### Changed +- Support new `go.uber.org/atomic` import path. + +[1.1.0]: https://github.com/uber-go/atomic/compare/v1.0.0...v1.1.0 + +## [1.0.0] - 2016-07-18 + +- Initial release. + +[1.0.0]: https://github.com/uber-go/atomic/releases/tag/v1.0.0 diff --git a/vendor/go.uber.org/atomic/LICENSE.txt b/vendor/go.uber.org/atomic/LICENSE.txt new file mode 100644 index 00000000..8765c9fb --- /dev/null +++ b/vendor/go.uber.org/atomic/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2016 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/go.uber.org/atomic/Makefile b/vendor/go.uber.org/atomic/Makefile new file mode 100644 index 00000000..46c945b3 --- /dev/null +++ b/vendor/go.uber.org/atomic/Makefile @@ -0,0 +1,79 @@ +# Directory to place `go install`ed binaries into. +export GOBIN ?= $(shell pwd)/bin + +GOLINT = $(GOBIN)/golint +GEN_ATOMICINT = $(GOBIN)/gen-atomicint +GEN_ATOMICWRAPPER = $(GOBIN)/gen-atomicwrapper +STATICCHECK = $(GOBIN)/staticcheck + +GO_FILES ?= $(shell find . '(' -path .git -o -path vendor ')' -prune -o -name '*.go' -print) + +# Also update ignore section in .codecov.yml. +COVER_IGNORE_PKGS = \ + go.uber.org/atomic/internal/gen-atomicint \ + go.uber.org/atomic/internal/gen-atomicwrapper + +.PHONY: build +build: + go build ./... + +.PHONY: test +test: + go test -race ./... + +.PHONY: gofmt +gofmt: + $(eval FMT_LOG := $(shell mktemp -t gofmt.XXXXX)) + gofmt -e -s -l $(GO_FILES) > $(FMT_LOG) || true + @[ ! -s "$(FMT_LOG)" ] || (echo "gofmt failed:" && cat $(FMT_LOG) && false) + +$(GOLINT): + cd tools && go install golang.org/x/lint/golint + +$(STATICCHECK): + cd tools && go install honnef.co/go/tools/cmd/staticcheck + +$(GEN_ATOMICWRAPPER): $(wildcard ./internal/gen-atomicwrapper/*) + go build -o $@ ./internal/gen-atomicwrapper + +$(GEN_ATOMICINT): $(wildcard ./internal/gen-atomicint/*) + go build -o $@ ./internal/gen-atomicint + +.PHONY: golint +golint: $(GOLINT) + $(GOLINT) ./... + +.PHONY: staticcheck +staticcheck: $(STATICCHECK) + $(STATICCHECK) ./... + +.PHONY: lint +lint: gofmt golint staticcheck generatenodirty + +# comma separated list of packages to consider for code coverage. +COVER_PKG = $(shell \ + go list -find ./... | \ + grep -v $(foreach pkg,$(COVER_IGNORE_PKGS),-e "^$(pkg)$$") | \ + paste -sd, -) + +.PHONY: cover +cover: + go test -coverprofile=cover.out -coverpkg $(COVER_PKG) -v ./... + go tool cover -html=cover.out -o cover.html + +.PHONY: generate +generate: $(GEN_ATOMICINT) $(GEN_ATOMICWRAPPER) + go generate ./... + +.PHONY: generatenodirty +generatenodirty: + @[ -z "$$(git status --porcelain)" ] || ( \ + echo "Working tree is dirty. Commit your changes first."; \ + git status; \ + exit 1 ) + @make generate + @status=$$(git status --porcelain); \ + [ -z "$$status" ] || ( \ + echo "Working tree is dirty after `make generate`:"; \ + echo "$$status"; \ + echo "Please ensure that the generated code is up-to-date." ) diff --git a/vendor/go.uber.org/atomic/README.md b/vendor/go.uber.org/atomic/README.md new file mode 100644 index 00000000..96b47a1f --- /dev/null +++ b/vendor/go.uber.org/atomic/README.md @@ -0,0 +1,63 @@ +# atomic [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![Go Report Card][reportcard-img]][reportcard] + +Simple wrappers for primitive types to enforce atomic access. + +## Installation + +```shell +$ go get -u go.uber.org/atomic@v1 +``` + +### Legacy Import Path + +As of v1.5.0, the import path `go.uber.org/atomic` is the only supported way +of using this package. If you are using Go modules, this package will fail to +compile with the legacy import path path `github.com/uber-go/atomic`. + +We recommend migrating your code to the new import path but if you're unable +to do so, or if your dependencies are still using the old import path, you +will have to add a `replace` directive to your `go.mod` file downgrading the +legacy import path to an older version. + +``` +replace github.com/uber-go/atomic => github.com/uber-go/atomic v1.4.0 +``` + +You can do so automatically by running the following command. + +```shell +$ go mod edit -replace github.com/uber-go/atomic=github.com/uber-go/atomic@v1.4.0 +``` + +## Usage + +The standard library's `sync/atomic` is powerful, but it's easy to forget which +variables must be accessed atomically. `go.uber.org/atomic` preserves all the +functionality of the standard library, but wraps the primitive types to +provide a safer, more convenient API. + +```go +var atom atomic.Uint32 +atom.Store(42) +atom.Sub(2) +atom.CAS(40, 11) +``` + +See the [documentation][doc] for a complete API specification. + +## Development Status + +Stable. + +--- + +Released under the [MIT License](LICENSE.txt). + +[doc-img]: https://godoc.org/github.com/uber-go/atomic?status.svg +[doc]: https://godoc.org/go.uber.org/atomic +[ci-img]: https://github.com/uber-go/atomic/actions/workflows/go.yml/badge.svg +[ci]: https://github.com/uber-go/atomic/actions/workflows/go.yml +[cov-img]: https://codecov.io/gh/uber-go/atomic/branch/master/graph/badge.svg +[cov]: https://codecov.io/gh/uber-go/atomic +[reportcard-img]: https://goreportcard.com/badge/go.uber.org/atomic +[reportcard]: https://goreportcard.com/report/go.uber.org/atomic diff --git a/vendor/go.uber.org/atomic/bool.go b/vendor/go.uber.org/atomic/bool.go new file mode 100644 index 00000000..f0a2ddd1 --- /dev/null +++ b/vendor/go.uber.org/atomic/bool.go @@ -0,0 +1,88 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" +) + +// Bool is an atomic type-safe wrapper for bool values. +type Bool struct { + _ nocmp // disallow non-atomic comparison + + v Uint32 +} + +var _zeroBool bool + +// NewBool creates a new Bool. +func NewBool(val bool) *Bool { + x := &Bool{} + if val != _zeroBool { + x.Store(val) + } + return x +} + +// Load atomically loads the wrapped bool. +func (x *Bool) Load() bool { + return truthy(x.v.Load()) +} + +// Store atomically stores the passed bool. +func (x *Bool) Store(val bool) { + x.v.Store(boolToInt(val)) +} + +// CAS is an atomic compare-and-swap for bool values. +// +// Deprecated: Use CompareAndSwap. +func (x *Bool) CAS(old, new bool) (swapped bool) { + return x.CompareAndSwap(old, new) +} + +// CompareAndSwap is an atomic compare-and-swap for bool values. +func (x *Bool) CompareAndSwap(old, new bool) (swapped bool) { + return x.v.CompareAndSwap(boolToInt(old), boolToInt(new)) +} + +// Swap atomically stores the given bool and returns the old +// value. +func (x *Bool) Swap(val bool) (old bool) { + return truthy(x.v.Swap(boolToInt(val))) +} + +// MarshalJSON encodes the wrapped bool into JSON. +func (x *Bool) MarshalJSON() ([]byte, error) { + return json.Marshal(x.Load()) +} + +// UnmarshalJSON decodes a bool from JSON. +func (x *Bool) UnmarshalJSON(b []byte) error { + var v bool + if err := json.Unmarshal(b, &v); err != nil { + return err + } + x.Store(v) + return nil +} diff --git a/vendor/go.uber.org/atomic/bool_ext.go b/vendor/go.uber.org/atomic/bool_ext.go new file mode 100644 index 00000000..a2e60e98 --- /dev/null +++ b/vendor/go.uber.org/atomic/bool_ext.go @@ -0,0 +1,53 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "strconv" +) + +//go:generate bin/gen-atomicwrapper -name=Bool -type=bool -wrapped=Uint32 -pack=boolToInt -unpack=truthy -cas -swap -json -file=bool.go + +func truthy(n uint32) bool { + return n == 1 +} + +func boolToInt(b bool) uint32 { + if b { + return 1 + } + return 0 +} + +// Toggle atomically negates the Boolean and returns the previous value. +func (b *Bool) Toggle() (old bool) { + for { + old := b.Load() + if b.CAS(old, !old) { + return old + } + } +} + +// String encodes the wrapped value as a string. +func (b *Bool) String() string { + return strconv.FormatBool(b.Load()) +} diff --git a/vendor/go.uber.org/atomic/doc.go b/vendor/go.uber.org/atomic/doc.go new file mode 100644 index 00000000..ae7390ee --- /dev/null +++ b/vendor/go.uber.org/atomic/doc.go @@ -0,0 +1,23 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package atomic provides simple wrappers around numerics to enforce atomic +// access. +package atomic diff --git a/vendor/go.uber.org/atomic/duration.go b/vendor/go.uber.org/atomic/duration.go new file mode 100644 index 00000000..7c23868f --- /dev/null +++ b/vendor/go.uber.org/atomic/duration.go @@ -0,0 +1,89 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "time" +) + +// Duration is an atomic type-safe wrapper for time.Duration values. +type Duration struct { + _ nocmp // disallow non-atomic comparison + + v Int64 +} + +var _zeroDuration time.Duration + +// NewDuration creates a new Duration. +func NewDuration(val time.Duration) *Duration { + x := &Duration{} + if val != _zeroDuration { + x.Store(val) + } + return x +} + +// Load atomically loads the wrapped time.Duration. +func (x *Duration) Load() time.Duration { + return time.Duration(x.v.Load()) +} + +// Store atomically stores the passed time.Duration. +func (x *Duration) Store(val time.Duration) { + x.v.Store(int64(val)) +} + +// CAS is an atomic compare-and-swap for time.Duration values. +// +// Deprecated: Use CompareAndSwap. +func (x *Duration) CAS(old, new time.Duration) (swapped bool) { + return x.CompareAndSwap(old, new) +} + +// CompareAndSwap is an atomic compare-and-swap for time.Duration values. +func (x *Duration) CompareAndSwap(old, new time.Duration) (swapped bool) { + return x.v.CompareAndSwap(int64(old), int64(new)) +} + +// Swap atomically stores the given time.Duration and returns the old +// value. +func (x *Duration) Swap(val time.Duration) (old time.Duration) { + return time.Duration(x.v.Swap(int64(val))) +} + +// MarshalJSON encodes the wrapped time.Duration into JSON. +func (x *Duration) MarshalJSON() ([]byte, error) { + return json.Marshal(x.Load()) +} + +// UnmarshalJSON decodes a time.Duration from JSON. +func (x *Duration) UnmarshalJSON(b []byte) error { + var v time.Duration + if err := json.Unmarshal(b, &v); err != nil { + return err + } + x.Store(v) + return nil +} diff --git a/vendor/go.uber.org/atomic/duration_ext.go b/vendor/go.uber.org/atomic/duration_ext.go new file mode 100644 index 00000000..4c18b0a9 --- /dev/null +++ b/vendor/go.uber.org/atomic/duration_ext.go @@ -0,0 +1,40 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import "time" + +//go:generate bin/gen-atomicwrapper -name=Duration -type=time.Duration -wrapped=Int64 -pack=int64 -unpack=time.Duration -cas -swap -json -imports time -file=duration.go + +// Add atomically adds to the wrapped time.Duration and returns the new value. +func (d *Duration) Add(delta time.Duration) time.Duration { + return time.Duration(d.v.Add(int64(delta))) +} + +// Sub atomically subtracts from the wrapped time.Duration and returns the new value. +func (d *Duration) Sub(delta time.Duration) time.Duration { + return time.Duration(d.v.Sub(int64(delta))) +} + +// String encodes the wrapped value as a string. +func (d *Duration) String() string { + return d.Load().String() +} diff --git a/vendor/go.uber.org/atomic/error.go b/vendor/go.uber.org/atomic/error.go new file mode 100644 index 00000000..b7e3f129 --- /dev/null +++ b/vendor/go.uber.org/atomic/error.go @@ -0,0 +1,72 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// Error is an atomic type-safe wrapper for error values. +type Error struct { + _ nocmp // disallow non-atomic comparison + + v Value +} + +var _zeroError error + +// NewError creates a new Error. +func NewError(val error) *Error { + x := &Error{} + if val != _zeroError { + x.Store(val) + } + return x +} + +// Load atomically loads the wrapped error. +func (x *Error) Load() error { + return unpackError(x.v.Load()) +} + +// Store atomically stores the passed error. +func (x *Error) Store(val error) { + x.v.Store(packError(val)) +} + +// CompareAndSwap is an atomic compare-and-swap for error values. +func (x *Error) CompareAndSwap(old, new error) (swapped bool) { + if x.v.CompareAndSwap(packError(old), packError(new)) { + return true + } + + if old == _zeroError { + // If the old value is the empty value, then it's possible the + // underlying Value hasn't been set and is nil, so retry with nil. + return x.v.CompareAndSwap(nil, packError(new)) + } + + return false +} + +// Swap atomically stores the given error and returns the old +// value. +func (x *Error) Swap(val error) (old error) { + return unpackError(x.v.Swap(packError(val))) +} diff --git a/vendor/go.uber.org/atomic/error_ext.go b/vendor/go.uber.org/atomic/error_ext.go new file mode 100644 index 00000000..d31fb633 --- /dev/null +++ b/vendor/go.uber.org/atomic/error_ext.go @@ -0,0 +1,39 @@ +// Copyright (c) 2020-2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// atomic.Value panics on nil inputs, or if the underlying type changes. +// Stabilize by always storing a custom struct that we control. + +//go:generate bin/gen-atomicwrapper -name=Error -type=error -wrapped=Value -pack=packError -unpack=unpackError -compareandswap -swap -file=error.go + +type packedError struct{ Value error } + +func packError(v error) interface{} { + return packedError{v} +} + +func unpackError(v interface{}) error { + if err, ok := v.(packedError); ok { + return err.Value + } + return nil +} diff --git a/vendor/go.uber.org/atomic/float32.go b/vendor/go.uber.org/atomic/float32.go new file mode 100644 index 00000000..62c36334 --- /dev/null +++ b/vendor/go.uber.org/atomic/float32.go @@ -0,0 +1,77 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "math" +) + +// Float32 is an atomic type-safe wrapper for float32 values. +type Float32 struct { + _ nocmp // disallow non-atomic comparison + + v Uint32 +} + +var _zeroFloat32 float32 + +// NewFloat32 creates a new Float32. +func NewFloat32(val float32) *Float32 { + x := &Float32{} + if val != _zeroFloat32 { + x.Store(val) + } + return x +} + +// Load atomically loads the wrapped float32. +func (x *Float32) Load() float32 { + return math.Float32frombits(x.v.Load()) +} + +// Store atomically stores the passed float32. +func (x *Float32) Store(val float32) { + x.v.Store(math.Float32bits(val)) +} + +// Swap atomically stores the given float32 and returns the old +// value. +func (x *Float32) Swap(val float32) (old float32) { + return math.Float32frombits(x.v.Swap(math.Float32bits(val))) +} + +// MarshalJSON encodes the wrapped float32 into JSON. +func (x *Float32) MarshalJSON() ([]byte, error) { + return json.Marshal(x.Load()) +} + +// UnmarshalJSON decodes a float32 from JSON. +func (x *Float32) UnmarshalJSON(b []byte) error { + var v float32 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + x.Store(v) + return nil +} diff --git a/vendor/go.uber.org/atomic/float32_ext.go b/vendor/go.uber.org/atomic/float32_ext.go new file mode 100644 index 00000000..b0cd8d9c --- /dev/null +++ b/vendor/go.uber.org/atomic/float32_ext.go @@ -0,0 +1,76 @@ +// Copyright (c) 2020-2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "math" + "strconv" +) + +//go:generate bin/gen-atomicwrapper -name=Float32 -type=float32 -wrapped=Uint32 -pack=math.Float32bits -unpack=math.Float32frombits -swap -json -imports math -file=float32.go + +// Add atomically adds to the wrapped float32 and returns the new value. +func (f *Float32) Add(delta float32) float32 { + for { + old := f.Load() + new := old + delta + if f.CAS(old, new) { + return new + } + } +} + +// Sub atomically subtracts from the wrapped float32 and returns the new value. +func (f *Float32) Sub(delta float32) float32 { + return f.Add(-delta) +} + +// CAS is an atomic compare-and-swap for float32 values. +// +// Deprecated: Use CompareAndSwap +func (f *Float32) CAS(old, new float32) (swapped bool) { + return f.CompareAndSwap(old, new) +} + +// CompareAndSwap is an atomic compare-and-swap for float32 values. +// +// Note: CompareAndSwap handles NaN incorrectly. NaN != NaN using Go's inbuilt operators +// but CompareAndSwap allows a stored NaN to compare equal to a passed in NaN. +// This avoids typical CompareAndSwap loops from blocking forever, e.g., +// +// for { +// old := atom.Load() +// new = f(old) +// if atom.CompareAndSwap(old, new) { +// break +// } +// } +// +// If CompareAndSwap did not match NaN to match, then the above would loop forever. +func (f *Float32) CompareAndSwap(old, new float32) (swapped bool) { + return f.v.CompareAndSwap(math.Float32bits(old), math.Float32bits(new)) +} + +// String encodes the wrapped value as a string. +func (f *Float32) String() string { + // 'g' is the behavior for floats with %v. + return strconv.FormatFloat(float64(f.Load()), 'g', -1, 32) +} diff --git a/vendor/go.uber.org/atomic/float64.go b/vendor/go.uber.org/atomic/float64.go new file mode 100644 index 00000000..5bc11caa --- /dev/null +++ b/vendor/go.uber.org/atomic/float64.go @@ -0,0 +1,77 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "math" +) + +// Float64 is an atomic type-safe wrapper for float64 values. +type Float64 struct { + _ nocmp // disallow non-atomic comparison + + v Uint64 +} + +var _zeroFloat64 float64 + +// NewFloat64 creates a new Float64. +func NewFloat64(val float64) *Float64 { + x := &Float64{} + if val != _zeroFloat64 { + x.Store(val) + } + return x +} + +// Load atomically loads the wrapped float64. +func (x *Float64) Load() float64 { + return math.Float64frombits(x.v.Load()) +} + +// Store atomically stores the passed float64. +func (x *Float64) Store(val float64) { + x.v.Store(math.Float64bits(val)) +} + +// Swap atomically stores the given float64 and returns the old +// value. +func (x *Float64) Swap(val float64) (old float64) { + return math.Float64frombits(x.v.Swap(math.Float64bits(val))) +} + +// MarshalJSON encodes the wrapped float64 into JSON. +func (x *Float64) MarshalJSON() ([]byte, error) { + return json.Marshal(x.Load()) +} + +// UnmarshalJSON decodes a float64 from JSON. +func (x *Float64) UnmarshalJSON(b []byte) error { + var v float64 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + x.Store(v) + return nil +} diff --git a/vendor/go.uber.org/atomic/float64_ext.go b/vendor/go.uber.org/atomic/float64_ext.go new file mode 100644 index 00000000..48c52b0a --- /dev/null +++ b/vendor/go.uber.org/atomic/float64_ext.go @@ -0,0 +1,76 @@ +// Copyright (c) 2020-2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "math" + "strconv" +) + +//go:generate bin/gen-atomicwrapper -name=Float64 -type=float64 -wrapped=Uint64 -pack=math.Float64bits -unpack=math.Float64frombits -swap -json -imports math -file=float64.go + +// Add atomically adds to the wrapped float64 and returns the new value. +func (f *Float64) Add(delta float64) float64 { + for { + old := f.Load() + new := old + delta + if f.CAS(old, new) { + return new + } + } +} + +// Sub atomically subtracts from the wrapped float64 and returns the new value. +func (f *Float64) Sub(delta float64) float64 { + return f.Add(-delta) +} + +// CAS is an atomic compare-and-swap for float64 values. +// +// Deprecated: Use CompareAndSwap +func (f *Float64) CAS(old, new float64) (swapped bool) { + return f.CompareAndSwap(old, new) +} + +// CompareAndSwap is an atomic compare-and-swap for float64 values. +// +// Note: CompareAndSwap handles NaN incorrectly. NaN != NaN using Go's inbuilt operators +// but CompareAndSwap allows a stored NaN to compare equal to a passed in NaN. +// This avoids typical CompareAndSwap loops from blocking forever, e.g., +// +// for { +// old := atom.Load() +// new = f(old) +// if atom.CompareAndSwap(old, new) { +// break +// } +// } +// +// If CompareAndSwap did not match NaN to match, then the above would loop forever. +func (f *Float64) CompareAndSwap(old, new float64) (swapped bool) { + return f.v.CompareAndSwap(math.Float64bits(old), math.Float64bits(new)) +} + +// String encodes the wrapped value as a string. +func (f *Float64) String() string { + // 'g' is the behavior for floats with %v. + return strconv.FormatFloat(f.Load(), 'g', -1, 64) +} diff --git a/vendor/go.uber.org/atomic/gen.go b/vendor/go.uber.org/atomic/gen.go new file mode 100644 index 00000000..1e9ef4f8 --- /dev/null +++ b/vendor/go.uber.org/atomic/gen.go @@ -0,0 +1,27 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +//go:generate bin/gen-atomicint -name=Int32 -wrapped=int32 -file=int32.go +//go:generate bin/gen-atomicint -name=Int64 -wrapped=int64 -file=int64.go +//go:generate bin/gen-atomicint -name=Uint32 -wrapped=uint32 -unsigned -file=uint32.go +//go:generate bin/gen-atomicint -name=Uint64 -wrapped=uint64 -unsigned -file=uint64.go +//go:generate bin/gen-atomicint -name=Uintptr -wrapped=uintptr -unsigned -file=uintptr.go diff --git a/vendor/go.uber.org/atomic/int32.go b/vendor/go.uber.org/atomic/int32.go new file mode 100644 index 00000000..5320eac1 --- /dev/null +++ b/vendor/go.uber.org/atomic/int32.go @@ -0,0 +1,109 @@ +// @generated Code generated by gen-atomicint. + +// Copyright (c) 2020-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "strconv" + "sync/atomic" +) + +// Int32 is an atomic wrapper around int32. +type Int32 struct { + _ nocmp // disallow non-atomic comparison + + v int32 +} + +// NewInt32 creates a new Int32. +func NewInt32(val int32) *Int32 { + return &Int32{v: val} +} + +// Load atomically loads the wrapped value. +func (i *Int32) Load() int32 { + return atomic.LoadInt32(&i.v) +} + +// Add atomically adds to the wrapped int32 and returns the new value. +func (i *Int32) Add(delta int32) int32 { + return atomic.AddInt32(&i.v, delta) +} + +// Sub atomically subtracts from the wrapped int32 and returns the new value. +func (i *Int32) Sub(delta int32) int32 { + return atomic.AddInt32(&i.v, -delta) +} + +// Inc atomically increments the wrapped int32 and returns the new value. +func (i *Int32) Inc() int32 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped int32 and returns the new value. +func (i *Int32) Dec() int32 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +// +// Deprecated: Use CompareAndSwap. +func (i *Int32) CAS(old, new int32) (swapped bool) { + return i.CompareAndSwap(old, new) +} + +// CompareAndSwap is an atomic compare-and-swap. +func (i *Int32) CompareAndSwap(old, new int32) (swapped bool) { + return atomic.CompareAndSwapInt32(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Int32) Store(val int32) { + atomic.StoreInt32(&i.v, val) +} + +// Swap atomically swaps the wrapped int32 and returns the old value. +func (i *Int32) Swap(val int32) (old int32) { + return atomic.SwapInt32(&i.v, val) +} + +// MarshalJSON encodes the wrapped int32 into JSON. +func (i *Int32) MarshalJSON() ([]byte, error) { + return json.Marshal(i.Load()) +} + +// UnmarshalJSON decodes JSON into the wrapped int32. +func (i *Int32) UnmarshalJSON(b []byte) error { + var v int32 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + i.Store(v) + return nil +} + +// String encodes the wrapped value as a string. +func (i *Int32) String() string { + v := i.Load() + return strconv.FormatInt(int64(v), 10) +} diff --git a/vendor/go.uber.org/atomic/int64.go b/vendor/go.uber.org/atomic/int64.go new file mode 100644 index 00000000..460821d0 --- /dev/null +++ b/vendor/go.uber.org/atomic/int64.go @@ -0,0 +1,109 @@ +// @generated Code generated by gen-atomicint. + +// Copyright (c) 2020-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "strconv" + "sync/atomic" +) + +// Int64 is an atomic wrapper around int64. +type Int64 struct { + _ nocmp // disallow non-atomic comparison + + v int64 +} + +// NewInt64 creates a new Int64. +func NewInt64(val int64) *Int64 { + return &Int64{v: val} +} + +// Load atomically loads the wrapped value. +func (i *Int64) Load() int64 { + return atomic.LoadInt64(&i.v) +} + +// Add atomically adds to the wrapped int64 and returns the new value. +func (i *Int64) Add(delta int64) int64 { + return atomic.AddInt64(&i.v, delta) +} + +// Sub atomically subtracts from the wrapped int64 and returns the new value. +func (i *Int64) Sub(delta int64) int64 { + return atomic.AddInt64(&i.v, -delta) +} + +// Inc atomically increments the wrapped int64 and returns the new value. +func (i *Int64) Inc() int64 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped int64 and returns the new value. +func (i *Int64) Dec() int64 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +// +// Deprecated: Use CompareAndSwap. +func (i *Int64) CAS(old, new int64) (swapped bool) { + return i.CompareAndSwap(old, new) +} + +// CompareAndSwap is an atomic compare-and-swap. +func (i *Int64) CompareAndSwap(old, new int64) (swapped bool) { + return atomic.CompareAndSwapInt64(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Int64) Store(val int64) { + atomic.StoreInt64(&i.v, val) +} + +// Swap atomically swaps the wrapped int64 and returns the old value. +func (i *Int64) Swap(val int64) (old int64) { + return atomic.SwapInt64(&i.v, val) +} + +// MarshalJSON encodes the wrapped int64 into JSON. +func (i *Int64) MarshalJSON() ([]byte, error) { + return json.Marshal(i.Load()) +} + +// UnmarshalJSON decodes JSON into the wrapped int64. +func (i *Int64) UnmarshalJSON(b []byte) error { + var v int64 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + i.Store(v) + return nil +} + +// String encodes the wrapped value as a string. +func (i *Int64) String() string { + v := i.Load() + return strconv.FormatInt(int64(v), 10) +} diff --git a/vendor/go.uber.org/atomic/nocmp.go b/vendor/go.uber.org/atomic/nocmp.go new file mode 100644 index 00000000..54b74174 --- /dev/null +++ b/vendor/go.uber.org/atomic/nocmp.go @@ -0,0 +1,35 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// nocmp is an uncomparable struct. Embed this inside another struct to make +// it uncomparable. +// +// type Foo struct { +// nocmp +// // ... +// } +// +// This DOES NOT: +// +// - Disallow shallow copies of structs +// - Disallow comparison of pointers to uncomparable structs +type nocmp [0]func() diff --git a/vendor/go.uber.org/atomic/pointer_go118.go b/vendor/go.uber.org/atomic/pointer_go118.go new file mode 100644 index 00000000..1fb6c03b --- /dev/null +++ b/vendor/go.uber.org/atomic/pointer_go118.go @@ -0,0 +1,31 @@ +// Copyright (c) 2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build go1.18 +// +build go1.18 + +package atomic + +import "fmt" + +// String returns a human readable representation of a Pointer's underlying value. +func (p *Pointer[T]) String() string { + return fmt.Sprint(p.Load()) +} diff --git a/vendor/go.uber.org/atomic/pointer_go118_pre119.go b/vendor/go.uber.org/atomic/pointer_go118_pre119.go new file mode 100644 index 00000000..e0f47dba --- /dev/null +++ b/vendor/go.uber.org/atomic/pointer_go118_pre119.go @@ -0,0 +1,60 @@ +// Copyright (c) 2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build go1.18 && !go1.19 +// +build go1.18,!go1.19 + +package atomic + +import "unsafe" + +type Pointer[T any] struct { + _ nocmp // disallow non-atomic comparison + p UnsafePointer +} + +// NewPointer creates a new Pointer. +func NewPointer[T any](v *T) *Pointer[T] { + var p Pointer[T] + if v != nil { + p.p.Store(unsafe.Pointer(v)) + } + return &p +} + +// Load atomically loads the wrapped value. +func (p *Pointer[T]) Load() *T { + return (*T)(p.p.Load()) +} + +// Store atomically stores the passed value. +func (p *Pointer[T]) Store(val *T) { + p.p.Store(unsafe.Pointer(val)) +} + +// Swap atomically swaps the wrapped pointer and returns the old value. +func (p *Pointer[T]) Swap(val *T) (old *T) { + return (*T)(p.p.Swap(unsafe.Pointer(val))) +} + +// CompareAndSwap is an atomic compare-and-swap. +func (p *Pointer[T]) CompareAndSwap(old, new *T) (swapped bool) { + return p.p.CompareAndSwap(unsafe.Pointer(old), unsafe.Pointer(new)) +} diff --git a/vendor/go.uber.org/atomic/pointer_go119.go b/vendor/go.uber.org/atomic/pointer_go119.go new file mode 100644 index 00000000..6726f17a --- /dev/null +++ b/vendor/go.uber.org/atomic/pointer_go119.go @@ -0,0 +1,61 @@ +// Copyright (c) 2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build go1.19 +// +build go1.19 + +package atomic + +import "sync/atomic" + +// Pointer is an atomic pointer of type *T. +type Pointer[T any] struct { + _ nocmp // disallow non-atomic comparison + p atomic.Pointer[T] +} + +// NewPointer creates a new Pointer. +func NewPointer[T any](v *T) *Pointer[T] { + var p Pointer[T] + if v != nil { + p.p.Store(v) + } + return &p +} + +// Load atomically loads the wrapped value. +func (p *Pointer[T]) Load() *T { + return p.p.Load() +} + +// Store atomically stores the passed value. +func (p *Pointer[T]) Store(val *T) { + p.p.Store(val) +} + +// Swap atomically swaps the wrapped pointer and returns the old value. +func (p *Pointer[T]) Swap(val *T) (old *T) { + return p.p.Swap(val) +} + +// CompareAndSwap is an atomic compare-and-swap. +func (p *Pointer[T]) CompareAndSwap(old, new *T) (swapped bool) { + return p.p.CompareAndSwap(old, new) +} diff --git a/vendor/go.uber.org/atomic/string.go b/vendor/go.uber.org/atomic/string.go new file mode 100644 index 00000000..061466c5 --- /dev/null +++ b/vendor/go.uber.org/atomic/string.go @@ -0,0 +1,72 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// String is an atomic type-safe wrapper for string values. +type String struct { + _ nocmp // disallow non-atomic comparison + + v Value +} + +var _zeroString string + +// NewString creates a new String. +func NewString(val string) *String { + x := &String{} + if val != _zeroString { + x.Store(val) + } + return x +} + +// Load atomically loads the wrapped string. +func (x *String) Load() string { + return unpackString(x.v.Load()) +} + +// Store atomically stores the passed string. +func (x *String) Store(val string) { + x.v.Store(packString(val)) +} + +// CompareAndSwap is an atomic compare-and-swap for string values. +func (x *String) CompareAndSwap(old, new string) (swapped bool) { + if x.v.CompareAndSwap(packString(old), packString(new)) { + return true + } + + if old == _zeroString { + // If the old value is the empty value, then it's possible the + // underlying Value hasn't been set and is nil, so retry with nil. + return x.v.CompareAndSwap(nil, packString(new)) + } + + return false +} + +// Swap atomically stores the given string and returns the old +// value. +func (x *String) Swap(val string) (old string) { + return unpackString(x.v.Swap(packString(val))) +} diff --git a/vendor/go.uber.org/atomic/string_ext.go b/vendor/go.uber.org/atomic/string_ext.go new file mode 100644 index 00000000..019109c8 --- /dev/null +++ b/vendor/go.uber.org/atomic/string_ext.go @@ -0,0 +1,54 @@ +// Copyright (c) 2020-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +//go:generate bin/gen-atomicwrapper -name=String -type=string -wrapped Value -pack packString -unpack unpackString -compareandswap -swap -file=string.go + +func packString(s string) interface{} { + return s +} + +func unpackString(v interface{}) string { + if s, ok := v.(string); ok { + return s + } + return "" +} + +// String returns the wrapped value. +func (s *String) String() string { + return s.Load() +} + +// MarshalText encodes the wrapped string into a textual form. +// +// This makes it encodable as JSON, YAML, XML, and more. +func (s *String) MarshalText() ([]byte, error) { + return []byte(s.Load()), nil +} + +// UnmarshalText decodes text and replaces the wrapped string with it. +// +// This makes it decodable from JSON, YAML, XML, and more. +func (s *String) UnmarshalText(b []byte) error { + s.Store(string(b)) + return nil +} diff --git a/vendor/go.uber.org/atomic/time.go b/vendor/go.uber.org/atomic/time.go new file mode 100644 index 00000000..cc2a230c --- /dev/null +++ b/vendor/go.uber.org/atomic/time.go @@ -0,0 +1,55 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "time" +) + +// Time is an atomic type-safe wrapper for time.Time values. +type Time struct { + _ nocmp // disallow non-atomic comparison + + v Value +} + +var _zeroTime time.Time + +// NewTime creates a new Time. +func NewTime(val time.Time) *Time { + x := &Time{} + if val != _zeroTime { + x.Store(val) + } + return x +} + +// Load atomically loads the wrapped time.Time. +func (x *Time) Load() time.Time { + return unpackTime(x.v.Load()) +} + +// Store atomically stores the passed time.Time. +func (x *Time) Store(val time.Time) { + x.v.Store(packTime(val)) +} diff --git a/vendor/go.uber.org/atomic/time_ext.go b/vendor/go.uber.org/atomic/time_ext.go new file mode 100644 index 00000000..1e3dc978 --- /dev/null +++ b/vendor/go.uber.org/atomic/time_ext.go @@ -0,0 +1,36 @@ +// Copyright (c) 2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import "time" + +//go:generate bin/gen-atomicwrapper -name=Time -type=time.Time -wrapped=Value -pack=packTime -unpack=unpackTime -imports time -file=time.go + +func packTime(t time.Time) interface{} { + return t +} + +func unpackTime(v interface{}) time.Time { + if t, ok := v.(time.Time); ok { + return t + } + return time.Time{} +} diff --git a/vendor/go.uber.org/atomic/uint32.go b/vendor/go.uber.org/atomic/uint32.go new file mode 100644 index 00000000..4adc294a --- /dev/null +++ b/vendor/go.uber.org/atomic/uint32.go @@ -0,0 +1,109 @@ +// @generated Code generated by gen-atomicint. + +// Copyright (c) 2020-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "strconv" + "sync/atomic" +) + +// Uint32 is an atomic wrapper around uint32. +type Uint32 struct { + _ nocmp // disallow non-atomic comparison + + v uint32 +} + +// NewUint32 creates a new Uint32. +func NewUint32(val uint32) *Uint32 { + return &Uint32{v: val} +} + +// Load atomically loads the wrapped value. +func (i *Uint32) Load() uint32 { + return atomic.LoadUint32(&i.v) +} + +// Add atomically adds to the wrapped uint32 and returns the new value. +func (i *Uint32) Add(delta uint32) uint32 { + return atomic.AddUint32(&i.v, delta) +} + +// Sub atomically subtracts from the wrapped uint32 and returns the new value. +func (i *Uint32) Sub(delta uint32) uint32 { + return atomic.AddUint32(&i.v, ^(delta - 1)) +} + +// Inc atomically increments the wrapped uint32 and returns the new value. +func (i *Uint32) Inc() uint32 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped uint32 and returns the new value. +func (i *Uint32) Dec() uint32 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +// +// Deprecated: Use CompareAndSwap. +func (i *Uint32) CAS(old, new uint32) (swapped bool) { + return i.CompareAndSwap(old, new) +} + +// CompareAndSwap is an atomic compare-and-swap. +func (i *Uint32) CompareAndSwap(old, new uint32) (swapped bool) { + return atomic.CompareAndSwapUint32(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Uint32) Store(val uint32) { + atomic.StoreUint32(&i.v, val) +} + +// Swap atomically swaps the wrapped uint32 and returns the old value. +func (i *Uint32) Swap(val uint32) (old uint32) { + return atomic.SwapUint32(&i.v, val) +} + +// MarshalJSON encodes the wrapped uint32 into JSON. +func (i *Uint32) MarshalJSON() ([]byte, error) { + return json.Marshal(i.Load()) +} + +// UnmarshalJSON decodes JSON into the wrapped uint32. +func (i *Uint32) UnmarshalJSON(b []byte) error { + var v uint32 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + i.Store(v) + return nil +} + +// String encodes the wrapped value as a string. +func (i *Uint32) String() string { + v := i.Load() + return strconv.FormatUint(uint64(v), 10) +} diff --git a/vendor/go.uber.org/atomic/uint64.go b/vendor/go.uber.org/atomic/uint64.go new file mode 100644 index 00000000..0e2eddb3 --- /dev/null +++ b/vendor/go.uber.org/atomic/uint64.go @@ -0,0 +1,109 @@ +// @generated Code generated by gen-atomicint. + +// Copyright (c) 2020-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "strconv" + "sync/atomic" +) + +// Uint64 is an atomic wrapper around uint64. +type Uint64 struct { + _ nocmp // disallow non-atomic comparison + + v uint64 +} + +// NewUint64 creates a new Uint64. +func NewUint64(val uint64) *Uint64 { + return &Uint64{v: val} +} + +// Load atomically loads the wrapped value. +func (i *Uint64) Load() uint64 { + return atomic.LoadUint64(&i.v) +} + +// Add atomically adds to the wrapped uint64 and returns the new value. +func (i *Uint64) Add(delta uint64) uint64 { + return atomic.AddUint64(&i.v, delta) +} + +// Sub atomically subtracts from the wrapped uint64 and returns the new value. +func (i *Uint64) Sub(delta uint64) uint64 { + return atomic.AddUint64(&i.v, ^(delta - 1)) +} + +// Inc atomically increments the wrapped uint64 and returns the new value. +func (i *Uint64) Inc() uint64 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped uint64 and returns the new value. +func (i *Uint64) Dec() uint64 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +// +// Deprecated: Use CompareAndSwap. +func (i *Uint64) CAS(old, new uint64) (swapped bool) { + return i.CompareAndSwap(old, new) +} + +// CompareAndSwap is an atomic compare-and-swap. +func (i *Uint64) CompareAndSwap(old, new uint64) (swapped bool) { + return atomic.CompareAndSwapUint64(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Uint64) Store(val uint64) { + atomic.StoreUint64(&i.v, val) +} + +// Swap atomically swaps the wrapped uint64 and returns the old value. +func (i *Uint64) Swap(val uint64) (old uint64) { + return atomic.SwapUint64(&i.v, val) +} + +// MarshalJSON encodes the wrapped uint64 into JSON. +func (i *Uint64) MarshalJSON() ([]byte, error) { + return json.Marshal(i.Load()) +} + +// UnmarshalJSON decodes JSON into the wrapped uint64. +func (i *Uint64) UnmarshalJSON(b []byte) error { + var v uint64 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + i.Store(v) + return nil +} + +// String encodes the wrapped value as a string. +func (i *Uint64) String() string { + v := i.Load() + return strconv.FormatUint(uint64(v), 10) +} diff --git a/vendor/go.uber.org/atomic/uintptr.go b/vendor/go.uber.org/atomic/uintptr.go new file mode 100644 index 00000000..7d5b000d --- /dev/null +++ b/vendor/go.uber.org/atomic/uintptr.go @@ -0,0 +1,109 @@ +// @generated Code generated by gen-atomicint. + +// Copyright (c) 2020-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "strconv" + "sync/atomic" +) + +// Uintptr is an atomic wrapper around uintptr. +type Uintptr struct { + _ nocmp // disallow non-atomic comparison + + v uintptr +} + +// NewUintptr creates a new Uintptr. +func NewUintptr(val uintptr) *Uintptr { + return &Uintptr{v: val} +} + +// Load atomically loads the wrapped value. +func (i *Uintptr) Load() uintptr { + return atomic.LoadUintptr(&i.v) +} + +// Add atomically adds to the wrapped uintptr and returns the new value. +func (i *Uintptr) Add(delta uintptr) uintptr { + return atomic.AddUintptr(&i.v, delta) +} + +// Sub atomically subtracts from the wrapped uintptr and returns the new value. +func (i *Uintptr) Sub(delta uintptr) uintptr { + return atomic.AddUintptr(&i.v, ^(delta - 1)) +} + +// Inc atomically increments the wrapped uintptr and returns the new value. +func (i *Uintptr) Inc() uintptr { + return i.Add(1) +} + +// Dec atomically decrements the wrapped uintptr and returns the new value. +func (i *Uintptr) Dec() uintptr { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +// +// Deprecated: Use CompareAndSwap. +func (i *Uintptr) CAS(old, new uintptr) (swapped bool) { + return i.CompareAndSwap(old, new) +} + +// CompareAndSwap is an atomic compare-and-swap. +func (i *Uintptr) CompareAndSwap(old, new uintptr) (swapped bool) { + return atomic.CompareAndSwapUintptr(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Uintptr) Store(val uintptr) { + atomic.StoreUintptr(&i.v, val) +} + +// Swap atomically swaps the wrapped uintptr and returns the old value. +func (i *Uintptr) Swap(val uintptr) (old uintptr) { + return atomic.SwapUintptr(&i.v, val) +} + +// MarshalJSON encodes the wrapped uintptr into JSON. +func (i *Uintptr) MarshalJSON() ([]byte, error) { + return json.Marshal(i.Load()) +} + +// UnmarshalJSON decodes JSON into the wrapped uintptr. +func (i *Uintptr) UnmarshalJSON(b []byte) error { + var v uintptr + if err := json.Unmarshal(b, &v); err != nil { + return err + } + i.Store(v) + return nil +} + +// String encodes the wrapped value as a string. +func (i *Uintptr) String() string { + v := i.Load() + return strconv.FormatUint(uint64(v), 10) +} diff --git a/vendor/go.uber.org/atomic/unsafe_pointer.go b/vendor/go.uber.org/atomic/unsafe_pointer.go new file mode 100644 index 00000000..34868baf --- /dev/null +++ b/vendor/go.uber.org/atomic/unsafe_pointer.go @@ -0,0 +1,65 @@ +// Copyright (c) 2021-2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "sync/atomic" + "unsafe" +) + +// UnsafePointer is an atomic wrapper around unsafe.Pointer. +type UnsafePointer struct { + _ nocmp // disallow non-atomic comparison + + v unsafe.Pointer +} + +// NewUnsafePointer creates a new UnsafePointer. +func NewUnsafePointer(val unsafe.Pointer) *UnsafePointer { + return &UnsafePointer{v: val} +} + +// Load atomically loads the wrapped value. +func (p *UnsafePointer) Load() unsafe.Pointer { + return atomic.LoadPointer(&p.v) +} + +// Store atomically stores the passed value. +func (p *UnsafePointer) Store(val unsafe.Pointer) { + atomic.StorePointer(&p.v, val) +} + +// Swap atomically swaps the wrapped unsafe.Pointer and returns the old value. +func (p *UnsafePointer) Swap(val unsafe.Pointer) (old unsafe.Pointer) { + return atomic.SwapPointer(&p.v, val) +} + +// CAS is an atomic compare-and-swap. +// +// Deprecated: Use CompareAndSwap +func (p *UnsafePointer) CAS(old, new unsafe.Pointer) (swapped bool) { + return p.CompareAndSwap(old, new) +} + +// CompareAndSwap is an atomic compare-and-swap. +func (p *UnsafePointer) CompareAndSwap(old, new unsafe.Pointer) (swapped bool) { + return atomic.CompareAndSwapPointer(&p.v, old, new) +} diff --git a/vendor/go.uber.org/atomic/value.go b/vendor/go.uber.org/atomic/value.go new file mode 100644 index 00000000..52caedb9 --- /dev/null +++ b/vendor/go.uber.org/atomic/value.go @@ -0,0 +1,31 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import "sync/atomic" + +// Value shadows the type of the same name from sync/atomic +// https://godoc.org/sync/atomic#Value +type Value struct { + _ nocmp // disallow non-atomic comparison + + atomic.Value +} diff --git a/vendor/golang.org/x/crypto/LICENSE b/vendor/golang.org/x/crypto/LICENSE index 6a66aea5..2a7cf70d 100644 --- a/vendor/golang.org/x/crypto/LICENSE +++ b/vendor/golang.org/x/crypto/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/crypto/acme/acme.go b/vendor/golang.org/x/crypto/acme/acme.go index aaafea2b..b53ea288 100644 --- a/vendor/golang.org/x/crypto/acme/acme.go +++ b/vendor/golang.org/x/crypto/acme/acme.go @@ -31,12 +31,11 @@ import ( "crypto/x509/pkix" "encoding/asn1" "encoding/base64" - "encoding/hex" "encoding/json" - "encoding/pem" "errors" "fmt" "math/big" + "net" "net/http" "strings" "sync" @@ -353,6 +352,10 @@ func (c *Client) authorize(ctx context.Context, typ, val string) (*Authorization if _, err := c.Discover(ctx); err != nil { return nil, err } + if c.dir.AuthzURL == "" { + // Pre-Authorization is unsupported + return nil, errPreAuthorizationNotSupported + } type authzID struct { Type string `json:"type"` @@ -467,7 +470,7 @@ func (c *Client) WaitAuthorization(ctx context.Context, url string) (*Authorizat // while waiting for a final authorization status. d := retryAfter(res.Header.Get("Retry-After")) if d == 0 { - // Given that the fastest challenges TLS-SNI and HTTP-01 + // Given that the fastest challenges TLS-ALPN and HTTP-01 // require a CA to make at least 1 network round trip // and most likely persist a challenge state, // this default delay seems reasonable. @@ -514,7 +517,11 @@ func (c *Client) Accept(ctx context.Context, chal *Challenge) (*Challenge, error return nil, err } - res, err := c.post(ctx, nil, chal.URI, json.RawMessage("{}"), wantStatus( + payload := json.RawMessage("{}") + if len(chal.Payload) != 0 { + payload = chal.Payload + } + res, err := c.post(ctx, nil, chal.URI, payload, wantStatus( http.StatusOK, // according to the spec http.StatusAccepted, // Let's Encrypt: see https://goo.gl/WsJ7VT (acme-divergences.md) )) @@ -564,50 +571,28 @@ func (c *Client) HTTP01ChallengePath(token string) string { } // TLSSNI01ChallengeCert creates a certificate for TLS-SNI-01 challenge response. +// Always returns an error. // -// Deprecated: This challenge type is unused in both draft-02 and RFC versions of the ACME spec. -func (c *Client) TLSSNI01ChallengeCert(token string, opt ...CertOption) (cert tls.Certificate, name string, err error) { - ka, err := keyAuth(c.Key.Public(), token) - if err != nil { - return tls.Certificate{}, "", err - } - b := sha256.Sum256([]byte(ka)) - h := hex.EncodeToString(b[:]) - name = fmt.Sprintf("%s.%s.acme.invalid", h[:32], h[32:]) - cert, err = tlsChallengeCert([]string{name}, opt) - if err != nil { - return tls.Certificate{}, "", err - } - return cert, name, nil +// Deprecated: This challenge type was only present in pre-standardized ACME +// protocol drafts and is insecure for use in shared hosting environments. +func (c *Client) TLSSNI01ChallengeCert(token string, opt ...CertOption) (tls.Certificate, string, error) { + return tls.Certificate{}, "", errPreRFC } // TLSSNI02ChallengeCert creates a certificate for TLS-SNI-02 challenge response. +// Always returns an error. // -// Deprecated: This challenge type is unused in both draft-02 and RFC versions of the ACME spec. -func (c *Client) TLSSNI02ChallengeCert(token string, opt ...CertOption) (cert tls.Certificate, name string, err error) { - b := sha256.Sum256([]byte(token)) - h := hex.EncodeToString(b[:]) - sanA := fmt.Sprintf("%s.%s.token.acme.invalid", h[:32], h[32:]) - - ka, err := keyAuth(c.Key.Public(), token) - if err != nil { - return tls.Certificate{}, "", err - } - b = sha256.Sum256([]byte(ka)) - h = hex.EncodeToString(b[:]) - sanB := fmt.Sprintf("%s.%s.ka.acme.invalid", h[:32], h[32:]) - - cert, err = tlsChallengeCert([]string{sanA, sanB}, opt) - if err != nil { - return tls.Certificate{}, "", err - } - return cert, sanA, nil +// Deprecated: This challenge type was only present in pre-standardized ACME +// protocol drafts and is insecure for use in shared hosting environments. +func (c *Client) TLSSNI02ChallengeCert(token string, opt ...CertOption) (tls.Certificate, string, error) { + return tls.Certificate{}, "", errPreRFC } // TLSALPN01ChallengeCert creates a certificate for TLS-ALPN-01 challenge response. // Servers can present the certificate to validate the challenge and prove control -// over a domain name. For more details on TLS-ALPN-01 see -// https://tools.ietf.org/html/draft-shoemaker-acme-tls-alpn-00#section-3 +// over an identifier (either a DNS name or the textual form of an IPv4 or IPv6 +// address). For more details on TLS-ALPN-01 see +// https://www.rfc-editor.org/rfc/rfc8737 and https://www.rfc-editor.org/rfc/rfc8738 // // The token argument is a Challenge.Token value. // If a WithKey option is provided, its private part signs the returned cert, @@ -615,9 +600,13 @@ func (c *Client) TLSSNI02ChallengeCert(token string, opt ...CertOption) (cert tl // If no WithKey option is provided, a new ECDSA key is generated using P-256 curve. // // The returned certificate is valid for the next 24 hours and must be presented only when -// the server name in the TLS ClientHello matches the domain, and the special acme-tls/1 ALPN protocol +// the server name in the TLS ClientHello matches the identifier, and the special acme-tls/1 ALPN protocol // has been specified. -func (c *Client) TLSALPN01ChallengeCert(token, domain string, opt ...CertOption) (cert tls.Certificate, err error) { +// +// Validation requests for IP address identifiers will use the reverse DNS form in the server name +// in the TLS ClientHello since the SNI extension is not supported for IP addresses. +// See RFC 8738 Section 6 for more information. +func (c *Client) TLSALPN01ChallengeCert(token, identifier string, opt ...CertOption) (cert tls.Certificate, err error) { ka, err := keyAuth(c.Key.Public(), token) if err != nil { return tls.Certificate{}, err @@ -647,7 +636,7 @@ func (c *Client) TLSALPN01ChallengeCert(token, domain string, opt ...CertOption) } tmpl.ExtraExtensions = append(tmpl.ExtraExtensions, acmeExtension) newOpt = append(newOpt, WithTemplate(tmpl)) - return tlsChallengeCert([]string{domain}, newOpt) + return tlsChallengeCert(identifier, newOpt) } // popNonce returns a nonce value previously stored with c.addNonce @@ -701,7 +690,7 @@ func (c *Client) addNonce(h http.Header) { } func (c *Client) fetchNonce(ctx context.Context, url string) (string, error) { - r, err := http.NewRequest("HEAD", url, nil) + r, err := http.NewRequestWithContext(ctx, "HEAD", url, nil) if err != nil { return "", err } @@ -765,11 +754,15 @@ func defaultTLSChallengeCertTemplate() *x509.Certificate { } } -// tlsChallengeCert creates a temporary certificate for TLS-SNI challenges -// with the given SANs and auto-generated public/private key pair. -// The Subject Common Name is set to the first SAN to aid debugging. +// tlsChallengeCert creates a temporary certificate for TLS-ALPN challenges +// for the given identifier, using an auto-generated public/private key pair. +// +// If the provided identifier is a domain name, it will be used as a DNS type SAN and for the +// subject common name. If the provided identifier is an IP address it will be used as an IP type +// SAN. +// // To create a cert with a custom key pair, specify WithKey option. -func tlsChallengeCert(san []string, opt []CertOption) (tls.Certificate, error) { +func tlsChallengeCert(identifier string, opt []CertOption) (tls.Certificate, error) { var key crypto.Signer tmpl := defaultTLSChallengeCertTemplate() for _, o := range opt { @@ -793,9 +786,12 @@ func tlsChallengeCert(san []string, opt []CertOption) (tls.Certificate, error) { return tls.Certificate{}, err } } - tmpl.DNSNames = san - if len(san) > 0 { - tmpl.Subject.CommonName = san[0] + + if ip := net.ParseIP(identifier); ip != nil { + tmpl.IPAddresses = []net.IP{ip} + } else { + tmpl.DNSNames = []string{identifier} + tmpl.Subject.CommonName = identifier } der, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, key.Public(), key) @@ -808,11 +804,5 @@ func tlsChallengeCert(san []string, opt []CertOption) (tls.Certificate, error) { }, nil } -// encodePEM returns b encoded as PEM with block of type typ. -func encodePEM(typ string, b []byte) []byte { - pb := &pem.Block{Type: typ, Bytes: b} - return pem.EncodeToMemory(pb) -} - // timeNow is time.Now, except in tests which can mess with it. var timeNow = time.Now diff --git a/vendor/golang.org/x/crypto/acme/autocert/autocert.go b/vendor/golang.org/x/crypto/acme/autocert/autocert.go index 6b4cdf40..cde9066f 100644 --- a/vendor/golang.org/x/crypto/acme/autocert/autocert.go +++ b/vendor/golang.org/x/crypto/acme/autocert/autocert.go @@ -134,7 +134,8 @@ type Manager struct { // RenewBefore optionally specifies how early certificates should // be renewed before they expire. // - // If zero, they're renewed 30 days before expiration. + // If zero, they're renewed at the lesser of 30 days or + // 1/3 of the certificate lifetime. RenewBefore time.Duration // Client is used to perform low-level operations, such as account registration @@ -292,6 +293,10 @@ func (m *Manager) GetCertificate(hello *tls.ClientHelloInfo) (*tls.Certificate, } // regular domain + if err := m.hostPolicy()(ctx, name); err != nil { + return nil, err + } + ck := certKey{ domain: strings.TrimSuffix(name, "."), // golang.org/issue/18114 isRSA: !supportsECDSA(hello), @@ -305,9 +310,6 @@ func (m *Manager) GetCertificate(hello *tls.ClientHelloInfo) (*tls.Certificate, } // first-time - if err := m.hostPolicy()(ctx, name); err != nil { - return nil, err - } cert, err = m.createCert(ctx, ck) if err != nil { return nil, err @@ -463,7 +465,7 @@ func (m *Manager) cert(ctx context.Context, ck certKey) (*tls.Certificate, error leaf: cert.Leaf, } m.state[ck] = s - m.startRenew(ck, s.key, s.leaf.NotAfter) + m.startRenew(ck, s.key, s.leaf.NotBefore, s.leaf.NotAfter) return cert, nil } @@ -609,7 +611,7 @@ func (m *Manager) createCert(ctx context.Context, ck certKey) (*tls.Certificate, } state.cert = der state.leaf = leaf - m.startRenew(ck, state.key, state.leaf.NotAfter) + m.startRenew(ck, state.key, state.leaf.NotBefore, state.leaf.NotAfter) return state.tlscert() } @@ -907,7 +909,7 @@ func httpTokenCacheKey(tokenPath string) string { // // The key argument is a certificate private key. // The exp argument is the cert expiration time (NotAfter). -func (m *Manager) startRenew(ck certKey, key crypto.Signer, exp time.Time) { +func (m *Manager) startRenew(ck certKey, key crypto.Signer, notBefore, notAfter time.Time) { m.renewalMu.Lock() defer m.renewalMu.Unlock() if m.renewal[ck] != nil { @@ -919,7 +921,7 @@ func (m *Manager) startRenew(ck certKey, key crypto.Signer, exp time.Time) { } dr := &domainRenewal{m: m, ck: ck, key: key} m.renewal[ck] = dr - dr.start(exp) + dr.start(notBefore, notAfter) } // stopRenew stops all currently running cert renewal timers. @@ -1027,13 +1029,6 @@ func (m *Manager) hostPolicy() HostPolicy { return defaultHostPolicy } -func (m *Manager) renewBefore() time.Duration { - if m.RenewBefore > renewJitter { - return m.RenewBefore - } - return 720 * time.Hour // 30 days -} - func (m *Manager) now() time.Time { if m.nowFunc != nil { return m.nowFunc() diff --git a/vendor/golang.org/x/crypto/acme/autocert/listener.go b/vendor/golang.org/x/crypto/acme/autocert/listener.go index 9d62f8ce..460133e0 100644 --- a/vendor/golang.org/x/crypto/acme/autocert/listener.go +++ b/vendor/golang.org/x/crypto/acme/autocert/listener.go @@ -10,7 +10,6 @@ import ( "net" "os" "path/filepath" - "runtime" "time" ) @@ -124,32 +123,13 @@ func (ln *listener) Close() error { return ln.tcpListener.Close() } -func homeDir() string { - if runtime.GOOS == "windows" { - return os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH") - } - if h := os.Getenv("HOME"); h != "" { - return h - } - return "/" -} - func cacheDir() string { const base = "golang-autocert" - switch runtime.GOOS { - case "darwin": - return filepath.Join(homeDir(), "Library", "Caches", base) - case "windows": - for _, ev := range []string{"APPDATA", "CSIDL_APPDATA", "TEMP", "TMP"} { - if v := os.Getenv(ev); v != "" { - return filepath.Join(v, base) - } - } - // Worst case: - return filepath.Join(homeDir(), base) + cache, err := os.UserCacheDir() + if err != nil { + // Fall back to the root directory. + cache = "/.cache" } - if xdg := os.Getenv("XDG_CACHE_HOME"); xdg != "" { - return filepath.Join(xdg, base) - } - return filepath.Join(homeDir(), ".cache", base) + + return filepath.Join(cache, base) } diff --git a/vendor/golang.org/x/crypto/acme/autocert/renewal.go b/vendor/golang.org/x/crypto/acme/autocert/renewal.go index 0df7da78..93984f38 100644 --- a/vendor/golang.org/x/crypto/acme/autocert/renewal.go +++ b/vendor/golang.org/x/crypto/acme/autocert/renewal.go @@ -11,9 +11,6 @@ import ( "time" ) -// renewJitter is the maximum deviation from Manager.RenewBefore. -const renewJitter = time.Hour - // domainRenewal tracks the state used by the periodic timers // renewing a single domain's cert. type domainRenewal struct { @@ -30,13 +27,13 @@ type domainRenewal struct { // defined by the certificate expiration time exp. // // If the timer is already started, calling start is a noop. -func (dr *domainRenewal) start(exp time.Time) { +func (dr *domainRenewal) start(notBefore, notAfter time.Time) { dr.timerMu.Lock() defer dr.timerMu.Unlock() if dr.timer != nil { return } - dr.timer = time.AfterFunc(dr.next(exp), dr.renew) + dr.timer = time.AfterFunc(dr.next(notBefore, notAfter), dr.renew) } // stop stops the cert renewal timer and waits for any in-flight calls to renew @@ -79,7 +76,7 @@ func (dr *domainRenewal) renew() { // TODO: rotate dr.key at some point? next, err := dr.do(ctx) if err != nil { - next = renewJitter / 2 + next = time.Hour / 2 next += time.Duration(pseudoRand.int63n(int64(next))) } testDidRenewLoop(next, err) @@ -107,8 +104,8 @@ func (dr *domainRenewal) do(ctx context.Context) (time.Duration, error) { // a race is likely unavoidable in a distributed environment // but we try nonetheless if tlscert, err := dr.m.cacheGet(ctx, dr.ck); err == nil { - next := dr.next(tlscert.Leaf.NotAfter) - if next > dr.m.renewBefore()+renewJitter { + next := dr.next(tlscert.Leaf.NotBefore, tlscert.Leaf.NotAfter) + if next > 0 { signer, ok := tlscert.PrivateKey.(crypto.Signer) if ok { state := &certState{ @@ -139,18 +136,23 @@ func (dr *domainRenewal) do(ctx context.Context) (time.Duration, error) { return 0, err } dr.updateState(state) - return dr.next(leaf.NotAfter), nil + return dr.next(leaf.NotBefore, leaf.NotAfter), nil } -func (dr *domainRenewal) next(expiry time.Time) time.Duration { - d := expiry.Sub(dr.m.now()) - dr.m.renewBefore() - // add a bit of randomness to renew deadline - n := pseudoRand.int63n(int64(renewJitter)) - d -= time.Duration(n) - if d < 0 { - return 0 +// next returns the wait time before the next renewal should start. +// If manager.RenewBefore is set, it uses that capped at 30 days, +// otherwise it uses a default of 1/3 of the cert lifetime. +// It builds in a jitter of 10% of the renew threshold, capped at 1 hour. +func (dr *domainRenewal) next(notBefore, notAfter time.Time) time.Duration { + threshold := min(notAfter.Sub(notBefore)/3, 30*24*time.Hour) + if dr.m.RenewBefore > 0 { + threshold = min(dr.m.RenewBefore, 30*24*time.Hour) } - return d + maxJitter := min(threshold/10, time.Hour) + jitter := pseudoRand.int63n(int64(maxJitter)) + renewAt := notAfter.Add(-(threshold - time.Duration(jitter))) + renewWait := renewAt.Sub(dr.m.now()) + return max(0, renewWait) } var testDidRenewLoop = func(next time.Duration, err error) {} diff --git a/vendor/golang.org/x/crypto/acme/http.go b/vendor/golang.org/x/crypto/acme/http.go index 58836e5d..7d1052ac 100644 --- a/vendor/golang.org/x/crypto/acme/http.go +++ b/vendor/golang.org/x/crypto/acme/http.go @@ -15,6 +15,7 @@ import ( "io" "math/big" "net/http" + "runtime/debug" "strconv" "strings" "time" @@ -65,7 +66,7 @@ func (c *Client) retryTimer() *retryTimer { // The n argument is always bounded between 1 and 30. // The returned value is always greater than 0. func defaultBackoff(n int, r *http.Request, res *http.Response) time.Duration { - const max = 10 * time.Second + const maxVal = 10 * time.Second var jitter time.Duration if x, err := rand.Int(rand.Reader, big.NewInt(1000)); err == nil { // Set the minimum to 1ms to avoid a case where @@ -85,10 +86,7 @@ func defaultBackoff(n int, r *http.Request, res *http.Response) time.Duration { n = 30 } d := time.Duration(1< max { - return max - } - return d + return min(d, maxVal) } // retryAfter parses a Retry-After HTTP header value, @@ -130,7 +128,7 @@ func wantStatus(codes ...int) resOkay { func (c *Client) get(ctx context.Context, url string, ok resOkay) (*http.Response, error) { retry := c.retryTimer() for { - req, err := http.NewRequest("GET", url, nil) + req, err := http.NewRequestWithContext(ctx, "GET", url, nil) if err != nil { return nil, err } @@ -230,7 +228,7 @@ func (c *Client) postNoRetry(ctx context.Context, key crypto.Signer, url string, if err != nil { return nil, nil, err } - req, err := http.NewRequest("POST", url, bytes.NewReader(b)) + req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(b)) if err != nil { return nil, nil, err } @@ -271,9 +269,27 @@ func (c *Client) httpClient() *http.Client { } // packageVersion is the version of the module that contains this package, for -// sending as part of the User-Agent header. It's set in version_go112.go. +// sending as part of the User-Agent header. var packageVersion string +func init() { + // Set packageVersion if the binary was built in modules mode and x/crypto + // was not replaced with a different module. + info, ok := debug.ReadBuildInfo() + if !ok { + return + } + for _, m := range info.Deps { + if m.Path != "golang.org/x/crypto" { + continue + } + if m.Replace == nil { + packageVersion = m.Version + } + break + } +} + // userAgent returns the User-Agent header value. It includes the package name, // the module version (if available), and the c.UserAgent value (if set). func (c *Client) userAgent() string { diff --git a/vendor/golang.org/x/crypto/acme/jws.go b/vendor/golang.org/x/crypto/acme/jws.go index b38828d8..68502756 100644 --- a/vendor/golang.org/x/crypto/acme/jws.go +++ b/vendor/golang.org/x/crypto/acme/jws.go @@ -92,7 +92,7 @@ func jwsEncodeJSON(claimset interface{}, key crypto.Signer, kid KeyID, nonce, ur if err != nil { return nil, err } - phead := base64.RawURLEncoding.EncodeToString([]byte(phJSON)) + phead := base64.RawURLEncoding.EncodeToString(phJSON) var payload string if val, ok := claimset.(string); ok { payload = val diff --git a/vendor/golang.org/x/crypto/acme/rfc8555.go b/vendor/golang.org/x/crypto/acme/rfc8555.go index 3152e531..976b2770 100644 --- a/vendor/golang.org/x/crypto/acme/rfc8555.go +++ b/vendor/golang.org/x/crypto/acme/rfc8555.go @@ -232,7 +232,7 @@ func (c *Client) AuthorizeOrder(ctx context.Context, id []AuthzID, opt ...OrderO return responseOrder(res) } -// GetOrder retrives an order identified by the given URL. +// GetOrder retrieves an order identified by the given URL. // For orders created with AuthorizeOrder, the url value is Order.URI. // // If a caller needs to poll an order until its status is final, @@ -272,7 +272,7 @@ func (c *Client) WaitOrder(ctx context.Context, url string) (*Order, error) { case err != nil: // Skip and retry. case o.Status == StatusInvalid: - return nil, &OrderError{OrderURL: o.URI, Status: o.Status} + return nil, &OrderError{OrderURL: o.URI, Status: o.Status, Problem: o.Error} case o.Status == StatusReady || o.Status == StatusValid: return o, nil } @@ -369,7 +369,7 @@ func (c *Client) CreateOrderCert(ctx context.Context, url string, csr []byte, bu } // The only acceptable status post finalize and WaitOrder is "valid". if o.Status != StatusValid { - return nil, "", &OrderError{OrderURL: o.URI, Status: o.Status} + return nil, "", &OrderError{OrderURL: o.URI, Status: o.Status, Problem: o.Error} } crt, err := c.fetchCertRFC(ctx, o.CertURL, bundle) return crt, o.CertURL, err diff --git a/vendor/golang.org/x/crypto/acme/types.go b/vendor/golang.org/x/crypto/acme/types.go index 4888726f..322640c4 100644 --- a/vendor/golang.org/x/crypto/acme/types.go +++ b/vendor/golang.org/x/crypto/acme/types.go @@ -7,6 +7,7 @@ package acme import ( "crypto" "crypto/x509" + "encoding/json" "errors" "fmt" "net/http" @@ -55,6 +56,10 @@ var ( // ErrNoAccount indicates that the Client's key has not been registered with the CA. ErrNoAccount = errors.New("acme: account does not exist") + + // errPreAuthorizationNotSupported indicates that the server does not + // support pre-authorization of identifiers. + errPreAuthorizationNotSupported = errors.New("acme: pre-authorization is not supported") ) // A Subproblem describes an ACME subproblem as reported in an Error. @@ -149,13 +154,16 @@ func (a *AuthorizationError) Error() string { // OrderError is returned from Client's order related methods. // It indicates the order is unusable and the clients should start over with -// AuthorizeOrder. +// AuthorizeOrder. A Problem description may be provided with details on +// what caused the order to become unusable. // // The clients can still fetch the order object from CA using GetOrder // to inspect its state. type OrderError struct { OrderURL string Status string + // Problem is the error that occurred while processing the order. + Problem *Error } func (oe *OrderError) Error() string { @@ -288,7 +296,7 @@ type Directory struct { // KeyChangeURL allows to perform account key rollover flow. KeyChangeURL string - // Term is a URI identifying the current terms of service. + // Terms is a URI identifying the current terms of service. Terms string // Website is an HTTP or HTTPS URL locating a website @@ -527,6 +535,16 @@ type Challenge struct { // when this challenge was used. // The type of a non-nil value is *Error. Error error + + // Payload is the JSON-formatted payload that the client sends + // to the server to indicate it is ready to respond to the challenge. + // When unset, it defaults to an empty JSON object: {}. + // For most challenges, the client must not set Payload, + // see https://tools.ietf.org/html/rfc8555#section-7.5.1. + // Payload is used only for newer challenges (such as "device-attest-01") + // where the client must send additional data for the server to validate + // the challenge. + Payload json.RawMessage } // wireChallenge is ACME JSON challenge representation. @@ -604,7 +622,7 @@ func (*certOptKey) privateCertOpt() {} // // In TLS ChallengeCert methods, the template is also used as parent, // resulting in a self-signed certificate. -// The DNSNames field of t is always overwritten for tls-sni challenge certs. +// The DNSNames or IPAddresses fields of t are always overwritten for tls-alpn challenge certs. func WithTemplate(t *x509.Certificate) CertOption { return (*certOptTemplate)(t) } diff --git a/vendor/golang.org/x/crypto/acme/version_go112.go b/vendor/golang.org/x/crypto/acme/version_go112.go deleted file mode 100644 index cc5fab60..00000000 --- a/vendor/golang.org/x/crypto/acme/version_go112.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.12 - -package acme - -import "runtime/debug" - -func init() { - // Set packageVersion if the binary was built in modules mode and x/crypto - // was not replaced with a different module. - info, ok := debug.ReadBuildInfo() - if !ok { - return - } - for _, m := range info.Deps { - if m.Path != "golang.org/x/crypto" { - continue - } - if m.Replace == nil { - packageVersion = m.Version - } - break - } -} diff --git a/vendor/golang.org/x/crypto/bcrypt/bcrypt.go b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go index 5577c0f9..3e7f8df8 100644 --- a/vendor/golang.org/x/crypto/bcrypt/bcrypt.go +++ b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go @@ -4,7 +4,7 @@ // Package bcrypt implements Provos and Mazières's bcrypt adaptive hashing // algorithm. See http://www.usenix.org/event/usenix99/provos/provos.pdf -package bcrypt // import "golang.org/x/crypto/bcrypt" +package bcrypt // The code is a port of Provos and Mazières's C implementation. import ( @@ -50,7 +50,7 @@ func (ih InvalidHashPrefixError) Error() string { type InvalidCostError int func (ic InvalidCostError) Error() string { - return fmt.Sprintf("crypto/bcrypt: cost %d is outside allowed range (%d,%d)", int(ic), MinCost, MaxCost) + return fmt.Sprintf("crypto/bcrypt: cost %d is outside allowed inclusive range %d..%d", int(ic), MinCost, MaxCost) } const ( diff --git a/vendor/golang.org/x/crypto/blowfish/cipher.go b/vendor/golang.org/x/crypto/blowfish/cipher.go index 213bf204..08989568 100644 --- a/vendor/golang.org/x/crypto/blowfish/cipher.go +++ b/vendor/golang.org/x/crypto/blowfish/cipher.go @@ -11,7 +11,7 @@ // Deprecated: any new system should use AES (from crypto/aes, if necessary in // an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from // golang.org/x/crypto/chacha20poly1305). -package blowfish // import "golang.org/x/crypto/blowfish" +package blowfish // The code is a port of Bruce Schneier's C implementation. // See https://www.schneier.com/blowfish.html. diff --git a/vendor/golang.org/x/mod/LICENSE b/vendor/golang.org/x/mod/LICENSE new file mode 100644 index 00000000..2a7cf70d --- /dev/null +++ b/vendor/golang.org/x/mod/LICENSE @@ -0,0 +1,27 @@ +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/mod/PATENTS b/vendor/golang.org/x/mod/PATENTS new file mode 100644 index 00000000..73309904 --- /dev/null +++ b/vendor/golang.org/x/mod/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/mod/internal/lazyregexp/lazyre.go b/vendor/golang.org/x/mod/internal/lazyregexp/lazyre.go new file mode 100644 index 00000000..150f887e --- /dev/null +++ b/vendor/golang.org/x/mod/internal/lazyregexp/lazyre.go @@ -0,0 +1,78 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package lazyregexp is a thin wrapper over regexp, allowing the use of global +// regexp variables without forcing them to be compiled at init. +package lazyregexp + +import ( + "os" + "regexp" + "strings" + "sync" +) + +// Regexp is a wrapper around [regexp.Regexp], where the underlying regexp will be +// compiled the first time it is needed. +type Regexp struct { + str string + once sync.Once + rx *regexp.Regexp +} + +func (r *Regexp) re() *regexp.Regexp { + r.once.Do(r.build) + return r.rx +} + +func (r *Regexp) build() { + r.rx = regexp.MustCompile(r.str) + r.str = "" +} + +func (r *Regexp) FindSubmatch(s []byte) [][]byte { + return r.re().FindSubmatch(s) +} + +func (r *Regexp) FindStringSubmatch(s string) []string { + return r.re().FindStringSubmatch(s) +} + +func (r *Regexp) FindStringSubmatchIndex(s string) []int { + return r.re().FindStringSubmatchIndex(s) +} + +func (r *Regexp) ReplaceAllString(src, repl string) string { + return r.re().ReplaceAllString(src, repl) +} + +func (r *Regexp) FindString(s string) string { + return r.re().FindString(s) +} + +func (r *Regexp) FindAllString(s string, n int) []string { + return r.re().FindAllString(s, n) +} + +func (r *Regexp) MatchString(s string) bool { + return r.re().MatchString(s) +} + +func (r *Regexp) SubexpNames() []string { + return r.re().SubexpNames() +} + +var inTest = len(os.Args) > 0 && strings.HasSuffix(strings.TrimSuffix(os.Args[0], ".exe"), ".test") + +// New creates a new lazy regexp, delaying the compiling work until it is first +// needed. If the code is being run as part of tests, the regexp compiling will +// happen immediately. +func New(str string) *Regexp { + lr := &Regexp{str: str} + if inTest { + // In tests, always compile the regexps early. + lr.re() + } + return lr +} diff --git a/vendor/golang.org/x/mod/module/module.go b/vendor/golang.org/x/mod/module/module.go new file mode 100644 index 00000000..9d3955bd --- /dev/null +++ b/vendor/golang.org/x/mod/module/module.go @@ -0,0 +1,840 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package module defines the module.Version type along with support code. +// +// The [module.Version] type is a simple Path, Version pair: +// +// type Version struct { +// Path string +// Version string +// } +// +// There are no restrictions imposed directly by use of this structure, +// but additional checking functions, most notably [Check], verify that +// a particular path, version pair is valid. +// +// # Escaped Paths +// +// Module paths appear as substrings of file system paths +// (in the download cache) and of web server URLs in the proxy protocol. +// In general we cannot rely on file systems to be case-sensitive, +// nor can we rely on web servers, since they read from file systems. +// That is, we cannot rely on the file system to keep rsc.io/QUOTE +// and rsc.io/quote separate. Windows and macOS don't. +// Instead, we must never require two different casings of a file path. +// Because we want the download cache to match the proxy protocol, +// and because we want the proxy protocol to be possible to serve +// from a tree of static files (which might be stored on a case-insensitive +// file system), the proxy protocol must never require two different casings +// of a URL path either. +// +// One possibility would be to make the escaped form be the lowercase +// hexadecimal encoding of the actual path bytes. This would avoid ever +// needing different casings of a file path, but it would be fairly illegible +// to most programmers when those paths appeared in the file system +// (including in file paths in compiler errors and stack traces) +// in web server logs, and so on. Instead, we want a safe escaped form that +// leaves most paths unaltered. +// +// The safe escaped form is to replace every uppercase letter +// with an exclamation mark followed by the letter's lowercase equivalent. +// +// For example, +// +// github.com/Azure/azure-sdk-for-go -> github.com/!azure/azure-sdk-for-go. +// github.com/GoogleCloudPlatform/cloudsql-proxy -> github.com/!google!cloud!platform/cloudsql-proxy +// github.com/Sirupsen/logrus -> github.com/!sirupsen/logrus. +// +// Import paths that avoid upper-case letters are left unchanged. +// Note that because import paths are ASCII-only and avoid various +// problematic punctuation (like : < and >), the escaped form is also ASCII-only +// and avoids the same problematic punctuation. +// +// Import paths have never allowed exclamation marks, so there is no +// need to define how to escape a literal !. +// +// # Unicode Restrictions +// +// Today, paths are disallowed from using Unicode. +// +// Although paths are currently disallowed from using Unicode, +// we would like at some point to allow Unicode letters as well, to assume that +// file systems and URLs are Unicode-safe (storing UTF-8), and apply +// the !-for-uppercase convention for escaping them in the file system. +// But there are at least two subtle considerations. +// +// First, note that not all case-fold equivalent distinct runes +// form an upper/lower pair. +// For example, U+004B ('K'), U+006B ('k'), and U+212A ('K' for Kelvin) +// are three distinct runes that case-fold to each other. +// When we do add Unicode letters, we must not assume that upper/lower +// are the only case-equivalent pairs. +// Perhaps the Kelvin symbol would be disallowed entirely, for example. +// Or perhaps it would escape as "!!k", or perhaps as "(212A)". +// +// Second, it would be nice to allow Unicode marks as well as letters, +// but marks include combining marks, and then we must deal not +// only with case folding but also normalization: both U+00E9 ('é') +// and U+0065 U+0301 ('e' followed by combining acute accent) +// look the same on the page and are treated by some file systems +// as the same path. If we do allow Unicode marks in paths, there +// must be some kind of normalization to allow only one canonical +// encoding of any character used in an import path. +package module + +// IMPORTANT NOTE +// +// This file essentially defines the set of valid import paths for the go command. +// There are many subtle considerations, including Unicode ambiguity, +// security, network, and file system representations. +// +// This file also defines the set of valid module path and version combinations, +// another topic with many subtle considerations. +// +// Changes to the semantics in this file require approval from rsc. + +import ( + "cmp" + "errors" + "fmt" + "path" + "slices" + "strings" + "unicode" + "unicode/utf8" + + "golang.org/x/mod/semver" +) + +// A Version (for clients, a module.Version) is defined by a module path and version pair. +// These are stored in their plain (unescaped) form. +type Version struct { + // Path is a module path, like "golang.org/x/text" or "rsc.io/quote/v2". + Path string + + // Version is usually a semantic version in canonical form. + // There are three exceptions to this general rule. + // First, the top-level target of a build has no specific version + // and uses Version = "". + // Second, during MVS calculations the version "none" is used + // to represent the decision to take no version of a given module. + // Third, filesystem paths found in "replace" directives are + // represented by a path with an empty version. + Version string `json:",omitempty"` +} + +// String returns a representation of the Version suitable for logging +// (Path@Version, or just Path if Version is empty). +func (m Version) String() string { + if m.Version == "" { + return m.Path + } + return m.Path + "@" + m.Version +} + +// A ModuleError indicates an error specific to a module. +type ModuleError struct { + Path string + Version string + Err error +} + +// VersionError returns a [ModuleError] derived from a [Version] and error, +// or err itself if it is already such an error. +func VersionError(v Version, err error) error { + var mErr *ModuleError + if errors.As(err, &mErr) && mErr.Path == v.Path && mErr.Version == v.Version { + return err + } + return &ModuleError{ + Path: v.Path, + Version: v.Version, + Err: err, + } +} + +func (e *ModuleError) Error() string { + if v, ok := e.Err.(*InvalidVersionError); ok { + return fmt.Sprintf("%s@%s: invalid %s: %v", e.Path, v.Version, v.noun(), v.Err) + } + if e.Version != "" { + return fmt.Sprintf("%s@%s: %v", e.Path, e.Version, e.Err) + } + return fmt.Sprintf("module %s: %v", e.Path, e.Err) +} + +func (e *ModuleError) Unwrap() error { return e.Err } + +// An InvalidVersionError indicates an error specific to a version, with the +// module path unknown or specified externally. +// +// A [ModuleError] may wrap an InvalidVersionError, but an InvalidVersionError +// must not wrap a ModuleError. +type InvalidVersionError struct { + Version string + Pseudo bool + Err error +} + +// noun returns either "version" or "pseudo-version", depending on whether +// e.Version is a pseudo-version. +func (e *InvalidVersionError) noun() string { + if e.Pseudo { + return "pseudo-version" + } + return "version" +} + +func (e *InvalidVersionError) Error() string { + return fmt.Sprintf("%s %q invalid: %s", e.noun(), e.Version, e.Err) +} + +func (e *InvalidVersionError) Unwrap() error { return e.Err } + +// An InvalidPathError indicates a module, import, or file path doesn't +// satisfy all naming constraints. See [CheckPath], [CheckImportPath], +// and [CheckFilePath] for specific restrictions. +type InvalidPathError struct { + Kind string // "module", "import", or "file" + Path string + Err error +} + +func (e *InvalidPathError) Error() string { + return fmt.Sprintf("malformed %s path %q: %v", e.Kind, e.Path, e.Err) +} + +func (e *InvalidPathError) Unwrap() error { return e.Err } + +// Check checks that a given module path, version pair is valid. +// In addition to the path being a valid module path +// and the version being a valid semantic version, +// the two must correspond. +// For example, the path "yaml/v2" only corresponds to +// semantic versions beginning with "v2.". +func Check(path, version string) error { + if err := CheckPath(path); err != nil { + return err + } + if !semver.IsValid(version) { + return &ModuleError{ + Path: path, + Err: &InvalidVersionError{Version: version, Err: errors.New("not a semantic version")}, + } + } + _, pathMajor, _ := SplitPathVersion(path) + if err := CheckPathMajor(version, pathMajor); err != nil { + return &ModuleError{Path: path, Err: err} + } + return nil +} + +// firstPathOK reports whether r can appear in the first element of a module path. +// The first element of the path must be an LDH domain name, at least for now. +// To avoid case ambiguity, the domain name must be entirely lower case. +func firstPathOK(r rune) bool { + return r == '-' || r == '.' || + '0' <= r && r <= '9' || + 'a' <= r && r <= 'z' +} + +// modPathOK reports whether r can appear in a module path element. +// Paths can be ASCII letters, ASCII digits, and limited ASCII punctuation: - . _ and ~. +// +// This matches what "go get" has historically recognized in import paths, +// and avoids confusing sequences like '%20' or '+' that would change meaning +// if used in a URL. +// +// TODO(rsc): We would like to allow Unicode letters, but that requires additional +// care in the safe encoding (see "escaped paths" above). +func modPathOK(r rune) bool { + if r < utf8.RuneSelf { + return r == '-' || r == '.' || r == '_' || r == '~' || + '0' <= r && r <= '9' || + 'A' <= r && r <= 'Z' || + 'a' <= r && r <= 'z' + } + return false +} + +// importPathOK reports whether r can appear in a package import path element. +// +// Import paths are intermediate between module paths and file paths: we +// disallow characters that would be confusing or ambiguous as arguments to +// 'go get' (such as '@' and ' ' ), but allow certain characters that are +// otherwise-unambiguous on the command line and historically used for some +// binary names (such as '++' as a suffix for compiler binaries and wrappers). +func importPathOK(r rune) bool { + return modPathOK(r) || r == '+' +} + +// fileNameOK reports whether r can appear in a file name. +// For now we allow all Unicode letters but otherwise limit to pathOK plus a few more punctuation characters. +// If we expand the set of allowed characters here, we have to +// work harder at detecting potential case-folding and normalization collisions. +// See note about "escaped paths" above. +func fileNameOK(r rune) bool { + if r < utf8.RuneSelf { + // Entire set of ASCII punctuation, from which we remove characters: + // ! " # $ % & ' ( ) * + , - . / : ; < = > ? @ [ \ ] ^ _ ` { | } ~ + // We disallow some shell special characters: " ' * < > ? ` | + // (Note that some of those are disallowed by the Windows file system as well.) + // We also disallow path separators / : and \ (fileNameOK is only called on path element characters). + // We allow spaces (U+0020) in file names. + const allowed = "!#$%&()+,-.=@[]^_{}~ " + if '0' <= r && r <= '9' || 'A' <= r && r <= 'Z' || 'a' <= r && r <= 'z' { + return true + } + return strings.ContainsRune(allowed, r) + } + // It may be OK to add more ASCII punctuation here, but only carefully. + // For example Windows disallows < > \, and macOS disallows :, so we must not allow those. + return unicode.IsLetter(r) +} + +// CheckPath checks that a module path is valid. +// A valid module path is a valid import path, as checked by [CheckImportPath], +// with three additional constraints. +// First, the leading path element (up to the first slash, if any), +// by convention a domain name, must contain only lower-case ASCII letters, +// ASCII digits, dots (U+002E), and dashes (U+002D); +// it must contain at least one dot and cannot start with a dash. +// Second, for a final path element of the form /vN, where N looks numeric +// (ASCII digits and dots) must not begin with a leading zero, must not be /v1, +// and must not contain any dots. For paths beginning with "gopkg.in/", +// this second requirement is replaced by a requirement that the path +// follow the gopkg.in server's conventions. +// Third, no path element may begin with a dot. +func CheckPath(path string) (err error) { + defer func() { + if err != nil { + err = &InvalidPathError{Kind: "module", Path: path, Err: err} + } + }() + + if err := checkPath(path, modulePath); err != nil { + return err + } + i := strings.Index(path, "/") + if i < 0 { + i = len(path) + } + if i == 0 { + return fmt.Errorf("leading slash") + } + if !strings.Contains(path[:i], ".") { + return fmt.Errorf("missing dot in first path element") + } + if path[0] == '-' { + return fmt.Errorf("leading dash in first path element") + } + for _, r := range path[:i] { + if !firstPathOK(r) { + return fmt.Errorf("invalid char %q in first path element", r) + } + } + if _, _, ok := SplitPathVersion(path); !ok { + return fmt.Errorf("invalid version") + } + return nil +} + +// CheckImportPath checks that an import path is valid. +// +// A valid import path consists of one or more valid path elements +// separated by slashes (U+002F). (It must not begin with nor end in a slash.) +// +// A valid path element is a non-empty string made up of +// ASCII letters, ASCII digits, and limited ASCII punctuation: - . _ and ~. +// It must not end with a dot (U+002E), nor contain two dots in a row. +// +// The element prefix up to the first dot must not be a reserved file name +// on Windows, regardless of case (CON, com1, NuL, and so on). The element +// must not have a suffix of a tilde followed by one or more ASCII digits +// (to exclude paths elements that look like Windows short-names). +// +// CheckImportPath may be less restrictive in the future, but see the +// top-level package documentation for additional information about +// subtleties of Unicode. +func CheckImportPath(path string) error { + if err := checkPath(path, importPath); err != nil { + return &InvalidPathError{Kind: "import", Path: path, Err: err} + } + return nil +} + +// pathKind indicates what kind of path we're checking. Module paths, +// import paths, and file paths have different restrictions. +type pathKind int + +const ( + modulePath pathKind = iota + importPath + filePath +) + +// checkPath checks that a general path is valid. kind indicates what +// specific constraints should be applied. +// +// checkPath returns an error describing why the path is not valid. +// Because these checks apply to module, import, and file paths, +// and because other checks may be applied, the caller is expected to wrap +// this error with [InvalidPathError]. +func checkPath(path string, kind pathKind) error { + if !utf8.ValidString(path) { + return fmt.Errorf("invalid UTF-8") + } + if path == "" { + return fmt.Errorf("empty string") + } + if path[0] == '-' && kind != filePath { + return fmt.Errorf("leading dash") + } + if strings.Contains(path, "//") { + return fmt.Errorf("double slash") + } + if path[len(path)-1] == '/' { + return fmt.Errorf("trailing slash") + } + elemStart := 0 + for i, r := range path { + if r == '/' { + if err := checkElem(path[elemStart:i], kind); err != nil { + return err + } + elemStart = i + 1 + } + } + if err := checkElem(path[elemStart:], kind); err != nil { + return err + } + return nil +} + +// checkElem checks whether an individual path element is valid. +func checkElem(elem string, kind pathKind) error { + if elem == "" { + return fmt.Errorf("empty path element") + } + if strings.Count(elem, ".") == len(elem) { + return fmt.Errorf("invalid path element %q", elem) + } + if elem[0] == '.' && kind == modulePath { + return fmt.Errorf("leading dot in path element") + } + if elem[len(elem)-1] == '.' { + return fmt.Errorf("trailing dot in path element") + } + for _, r := range elem { + ok := false + switch kind { + case modulePath: + ok = modPathOK(r) + case importPath: + ok = importPathOK(r) + case filePath: + ok = fileNameOK(r) + default: + panic(fmt.Sprintf("internal error: invalid kind %v", kind)) + } + if !ok { + return fmt.Errorf("invalid char %q", r) + } + } + + // Windows disallows a bunch of path elements, sadly. + // See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file + short := elem + if i := strings.Index(short, "."); i >= 0 { + short = short[:i] + } + for _, bad := range badWindowsNames { + if strings.EqualFold(bad, short) { + return fmt.Errorf("%q disallowed as path element component on Windows", short) + } + } + + if kind == filePath { + // don't check for Windows short-names in file names. They're + // only an issue for import paths. + return nil + } + + // Reject path components that look like Windows short-names. + // Those usually end in a tilde followed by one or more ASCII digits. + if tilde := strings.LastIndexByte(short, '~'); tilde >= 0 && tilde < len(short)-1 { + suffix := short[tilde+1:] + suffixIsDigits := true + for _, r := range suffix { + if r < '0' || r > '9' { + suffixIsDigits = false + break + } + } + if suffixIsDigits { + return fmt.Errorf("trailing tilde and digits in path element") + } + } + + return nil +} + +// CheckFilePath checks that a slash-separated file path is valid. +// The definition of a valid file path is the same as the definition +// of a valid import path except that the set of allowed characters is larger: +// all Unicode letters, ASCII digits, the ASCII space character (U+0020), +// and the ASCII punctuation characters +// “!#$%&()+,-.=@[]^_{}~”. +// (The excluded punctuation characters, " * < > ? ` ' | / \ and :, +// have special meanings in certain shells or operating systems.) +// +// CheckFilePath may be less restrictive in the future, but see the +// top-level package documentation for additional information about +// subtleties of Unicode. +func CheckFilePath(path string) error { + if err := checkPath(path, filePath); err != nil { + return &InvalidPathError{Kind: "file", Path: path, Err: err} + } + return nil +} + +// badWindowsNames are the reserved file path elements on Windows. +// See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file +var badWindowsNames = []string{ + "CON", + "PRN", + "AUX", + "NUL", + "COM1", + "COM2", + "COM3", + "COM4", + "COM5", + "COM6", + "COM7", + "COM8", + "COM9", + "LPT1", + "LPT2", + "LPT3", + "LPT4", + "LPT5", + "LPT6", + "LPT7", + "LPT8", + "LPT9", +} + +// SplitPathVersion returns prefix and major version such that prefix+pathMajor == path +// and version is either empty or "/vN" for N >= 2. +// As a special case, gopkg.in paths are recognized directly; +// they require ".vN" instead of "/vN", and for all N, not just N >= 2. +// SplitPathVersion returns with ok = false when presented with +// a path whose last path element does not satisfy the constraints +// applied by [CheckPath], such as "example.com/pkg/v1" or "example.com/pkg/v1.2". +func SplitPathVersion(path string) (prefix, pathMajor string, ok bool) { + if strings.HasPrefix(path, "gopkg.in/") { + return splitGopkgIn(path) + } + + i := len(path) + dot := false + for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9' || path[i-1] == '.') { + if path[i-1] == '.' { + dot = true + } + i-- + } + if i <= 1 || i == len(path) || path[i-1] != 'v' || path[i-2] != '/' { + return path, "", true + } + prefix, pathMajor = path[:i-2], path[i-2:] + if dot || len(pathMajor) <= 2 || pathMajor[2] == '0' || pathMajor == "/v1" { + return path, "", false + } + return prefix, pathMajor, true +} + +// splitGopkgIn is like SplitPathVersion but only for gopkg.in paths. +func splitGopkgIn(path string) (prefix, pathMajor string, ok bool) { + if !strings.HasPrefix(path, "gopkg.in/") { + return path, "", false + } + i := len(path) + if strings.HasSuffix(path, "-unstable") { + i -= len("-unstable") + } + for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9') { + i-- + } + if i <= 1 || path[i-1] != 'v' || path[i-2] != '.' { + // All gopkg.in paths must end in vN for some N. + return path, "", false + } + prefix, pathMajor = path[:i-2], path[i-2:] + if len(pathMajor) <= 2 || pathMajor[2] == '0' && pathMajor != ".v0" { + return path, "", false + } + return prefix, pathMajor, true +} + +// MatchPathMajor reports whether the semantic version v +// matches the path major version pathMajor. +// +// MatchPathMajor returns true if and only if [CheckPathMajor] returns nil. +func MatchPathMajor(v, pathMajor string) bool { + return CheckPathMajor(v, pathMajor) == nil +} + +// CheckPathMajor returns a non-nil error if the semantic version v +// does not match the path major version pathMajor. +func CheckPathMajor(v, pathMajor string) error { + // TODO(jayconrod): return errors or panic for invalid inputs. This function + // (and others) was covered by integration tests for cmd/go, and surrounding + // code protected against invalid inputs like non-canonical versions. + if strings.HasPrefix(pathMajor, ".v") && strings.HasSuffix(pathMajor, "-unstable") { + pathMajor = strings.TrimSuffix(pathMajor, "-unstable") + } + if strings.HasPrefix(v, "v0.0.0-") && pathMajor == ".v1" { + // Allow old bug in pseudo-versions that generated v0.0.0- pseudoversion for gopkg .v1. + // For example, gopkg.in/yaml.v2@v2.2.1's go.mod requires gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405. + return nil + } + m := semver.Major(v) + if pathMajor == "" { + if m == "v0" || m == "v1" || semver.Build(v) == "+incompatible" { + return nil + } + pathMajor = "v0 or v1" + } else if pathMajor[0] == '/' || pathMajor[0] == '.' { + if m == pathMajor[1:] { + return nil + } + pathMajor = pathMajor[1:] + } + return &InvalidVersionError{ + Version: v, + Err: fmt.Errorf("should be %s, not %s", pathMajor, semver.Major(v)), + } +} + +// PathMajorPrefix returns the major-version tag prefix implied by pathMajor. +// An empty PathMajorPrefix allows either v0 or v1. +// +// Note that [MatchPathMajor] may accept some versions that do not actually begin +// with this prefix: namely, it accepts a 'v0.0.0-' prefix for a '.v1' +// pathMajor, even though that pathMajor implies 'v1' tagging. +func PathMajorPrefix(pathMajor string) string { + if pathMajor == "" { + return "" + } + if pathMajor[0] != '/' && pathMajor[0] != '.' { + panic("pathMajor suffix " + pathMajor + " passed to PathMajorPrefix lacks separator") + } + if strings.HasPrefix(pathMajor, ".v") && strings.HasSuffix(pathMajor, "-unstable") { + pathMajor = strings.TrimSuffix(pathMajor, "-unstable") + } + m := pathMajor[1:] + if m != semver.Major(m) { + panic("pathMajor suffix " + pathMajor + "passed to PathMajorPrefix is not a valid major version") + } + return m +} + +// CanonicalVersion returns the canonical form of the version string v. +// It is the same as [semver.Canonical] except that it preserves the special build suffix "+incompatible". +func CanonicalVersion(v string) string { + cv := semver.Canonical(v) + if semver.Build(v) == "+incompatible" { + cv += "+incompatible" + } + return cv +} + +// Sort sorts the list by Path, breaking ties by comparing [Version] fields. +// The Version fields are interpreted as semantic versions (using [semver.Compare]) +// optionally followed by a tie-breaking suffix introduced by a slash character, +// like in "v0.0.1/go.mod". +func Sort(list []Version) { + slices.SortFunc(list, func(i, j Version) int { + if i.Path != j.Path { + return strings.Compare(i.Path, j.Path) + } + // To help go.sum formatting, allow version/file. + // Compare semver prefix by semver rules, + // file by string order. + vi := i.Version + vj := j.Version + var fi, fj string + if k := strings.Index(vi, "/"); k >= 0 { + vi, fi = vi[:k], vi[k:] + } + if k := strings.Index(vj, "/"); k >= 0 { + vj, fj = vj[:k], vj[k:] + } + if vi != vj { + return semver.Compare(vi, vj) + } + return cmp.Compare(fi, fj) + }) +} + +// EscapePath returns the escaped form of the given module path. +// It fails if the module path is invalid. +func EscapePath(path string) (escaped string, err error) { + if err := CheckPath(path); err != nil { + return "", err + } + + return escapeString(path) +} + +// EscapeVersion returns the escaped form of the given module version. +// Versions are allowed to be in non-semver form but must be valid file names +// and not contain exclamation marks. +func EscapeVersion(v string) (escaped string, err error) { + if err := checkElem(v, filePath); err != nil || strings.Contains(v, "!") { + return "", &InvalidVersionError{ + Version: v, + Err: fmt.Errorf("disallowed version string"), + } + } + return escapeString(v) +} + +func escapeString(s string) (escaped string, err error) { + haveUpper := false + for _, r := range s { + if r == '!' || r >= utf8.RuneSelf { + // This should be disallowed by CheckPath, but diagnose anyway. + // The correctness of the escaping loop below depends on it. + return "", fmt.Errorf("internal error: inconsistency in EscapePath") + } + if 'A' <= r && r <= 'Z' { + haveUpper = true + } + } + + if !haveUpper { + return s, nil + } + + var buf []byte + for _, r := range s { + if 'A' <= r && r <= 'Z' { + buf = append(buf, '!', byte(r+'a'-'A')) + } else { + buf = append(buf, byte(r)) + } + } + return string(buf), nil +} + +// UnescapePath returns the module path for the given escaped path. +// It fails if the escaped path is invalid or describes an invalid path. +func UnescapePath(escaped string) (path string, err error) { + path, ok := unescapeString(escaped) + if !ok { + return "", fmt.Errorf("invalid escaped module path %q", escaped) + } + if err := CheckPath(path); err != nil { + return "", fmt.Errorf("invalid escaped module path %q: %v", escaped, err) + } + return path, nil +} + +// UnescapeVersion returns the version string for the given escaped version. +// It fails if the escaped form is invalid or describes an invalid version. +// Versions are allowed to be in non-semver form but must be valid file names +// and not contain exclamation marks. +func UnescapeVersion(escaped string) (v string, err error) { + v, ok := unescapeString(escaped) + if !ok { + return "", fmt.Errorf("invalid escaped version %q", escaped) + } + if err := checkElem(v, filePath); err != nil { + return "", fmt.Errorf("invalid escaped version %q: %v", v, err) + } + return v, nil +} + +func unescapeString(escaped string) (string, bool) { + var buf []byte + + bang := false + for _, r := range escaped { + if r >= utf8.RuneSelf { + return "", false + } + if bang { + bang = false + if r < 'a' || 'z' < r { + return "", false + } + buf = append(buf, byte(r+'A'-'a')) + continue + } + if r == '!' { + bang = true + continue + } + if 'A' <= r && r <= 'Z' { + return "", false + } + buf = append(buf, byte(r)) + } + if bang { + return "", false + } + return string(buf), true +} + +// MatchPrefixPatterns reports whether any path prefix of target matches one of +// the glob patterns (as defined by [path.Match]) in the comma-separated globs +// list. This implements the algorithm used when matching a module path to the +// GOPRIVATE environment variable, as described by 'go help module-private'. +// +// It ignores any empty or malformed patterns in the list. +// Trailing slashes on patterns are ignored. +func MatchPrefixPatterns(globs, target string) bool { + for globs != "" { + // Extract next non-empty glob in comma-separated list. + var glob string + if i := strings.Index(globs, ","); i >= 0 { + glob, globs = globs[:i], globs[i+1:] + } else { + glob, globs = globs, "" + } + glob = strings.TrimSuffix(glob, "/") + if glob == "" { + continue + } + + // A glob with N+1 path elements (N slashes) needs to be matched + // against the first N+1 path elements of target, + // which end just before the N+1'th slash. + n := strings.Count(glob, "/") + prefix := target + // Walk target, counting slashes, truncating at the N+1'th slash. + for i := 0; i < len(target); i++ { + if target[i] == '/' { + if n == 0 { + prefix = target[:i] + break + } + n-- + } + } + if n > 0 { + // Not enough prefix elements. + continue + } + matched, _ := path.Match(glob, prefix) + if matched { + return true + } + } + return false +} diff --git a/vendor/golang.org/x/mod/module/pseudo.go b/vendor/golang.org/x/mod/module/pseudo.go new file mode 100644 index 00000000..9cf19d32 --- /dev/null +++ b/vendor/golang.org/x/mod/module/pseudo.go @@ -0,0 +1,250 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Pseudo-versions +// +// Code authors are expected to tag the revisions they want users to use, +// including prereleases. However, not all authors tag versions at all, +// and not all commits a user might want to try will have tags. +// A pseudo-version is a version with a special form that allows us to +// address an untagged commit and order that version with respect to +// other versions we might encounter. +// +// A pseudo-version takes one of the general forms: +// +// (1) vX.0.0-yyyymmddhhmmss-abcdef123456 +// (2) vX.Y.(Z+1)-0.yyyymmddhhmmss-abcdef123456 +// (3) vX.Y.(Z+1)-0.yyyymmddhhmmss-abcdef123456+incompatible +// (4) vX.Y.Z-pre.0.yyyymmddhhmmss-abcdef123456 +// (5) vX.Y.Z-pre.0.yyyymmddhhmmss-abcdef123456+incompatible +// +// If there is no recently tagged version with the right major version vX, +// then form (1) is used, creating a space of pseudo-versions at the bottom +// of the vX version range, less than any tagged version, including the unlikely v0.0.0. +// +// If the most recent tagged version before the target commit is vX.Y.Z or vX.Y.Z+incompatible, +// then the pseudo-version uses form (2) or (3), making it a prerelease for the next +// possible semantic version after vX.Y.Z. The leading 0 segment in the prerelease string +// ensures that the pseudo-version compares less than possible future explicit prereleases +// like vX.Y.(Z+1)-rc1 or vX.Y.(Z+1)-1. +// +// If the most recent tagged version before the target commit is vX.Y.Z-pre or vX.Y.Z-pre+incompatible, +// then the pseudo-version uses form (4) or (5), making it a slightly later prerelease. + +package module + +import ( + "errors" + "fmt" + "strings" + "time" + + "golang.org/x/mod/internal/lazyregexp" + "golang.org/x/mod/semver" +) + +var pseudoVersionRE = lazyregexp.New(`^v[0-9]+\.(0\.0-|\d+\.\d+-([^+]*\.)?0\.)\d{14}-[A-Za-z0-9]+(\+[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*)?$`) + +const PseudoVersionTimestampFormat = "20060102150405" + +// PseudoVersion returns a pseudo-version for the given major version ("v1") +// preexisting older tagged version ("" or "v1.2.3" or "v1.2.3-pre"), revision time, +// and revision identifier (usually a 12-byte commit hash prefix). +func PseudoVersion(major, older string, t time.Time, rev string) string { + if major == "" { + major = "v0" + } + segment := fmt.Sprintf("%s-%s", t.UTC().Format(PseudoVersionTimestampFormat), rev) + build := semver.Build(older) + older = semver.Canonical(older) + if older == "" { + return major + ".0.0-" + segment // form (1) + } + if semver.Prerelease(older) != "" { + return older + ".0." + segment + build // form (4), (5) + } + + // Form (2), (3). + // Extract patch from vMAJOR.MINOR.PATCH + i := strings.LastIndex(older, ".") + 1 + v, patch := older[:i], older[i:] + + // Reassemble. + return v + incDecimal(patch) + "-0." + segment + build +} + +// ZeroPseudoVersion returns a pseudo-version with a zero timestamp and +// revision, which may be used as a placeholder. +func ZeroPseudoVersion(major string) string { + return PseudoVersion(major, "", time.Time{}, "000000000000") +} + +// incDecimal returns the decimal string incremented by 1. +func incDecimal(decimal string) string { + // Scan right to left turning 9s to 0s until you find a digit to increment. + digits := []byte(decimal) + i := len(digits) - 1 + for ; i >= 0 && digits[i] == '9'; i-- { + digits[i] = '0' + } + if i >= 0 { + digits[i]++ + } else { + // digits is all zeros + digits[0] = '1' + digits = append(digits, '0') + } + return string(digits) +} + +// decDecimal returns the decimal string decremented by 1, or the empty string +// if the decimal is all zeroes. +func decDecimal(decimal string) string { + // Scan right to left turning 0s to 9s until you find a digit to decrement. + digits := []byte(decimal) + i := len(digits) - 1 + for ; i >= 0 && digits[i] == '0'; i-- { + digits[i] = '9' + } + if i < 0 { + // decimal is all zeros + return "" + } + if i == 0 && digits[i] == '1' && len(digits) > 1 { + digits = digits[1:] + } else { + digits[i]-- + } + return string(digits) +} + +// IsPseudoVersion reports whether v is a pseudo-version. +func IsPseudoVersion(v string) bool { + return strings.Count(v, "-") >= 2 && semver.IsValid(v) && pseudoVersionRE.MatchString(v) +} + +// IsZeroPseudoVersion returns whether v is a pseudo-version with a zero base, +// timestamp, and revision, as returned by [ZeroPseudoVersion]. +func IsZeroPseudoVersion(v string) bool { + return v == ZeroPseudoVersion(semver.Major(v)) +} + +// PseudoVersionTime returns the time stamp of the pseudo-version v. +// It returns an error if v is not a pseudo-version or if the time stamp +// embedded in the pseudo-version is not a valid time. +func PseudoVersionTime(v string) (time.Time, error) { + _, timestamp, _, _, err := parsePseudoVersion(v) + if err != nil { + return time.Time{}, err + } + t, err := time.Parse("20060102150405", timestamp) + if err != nil { + return time.Time{}, &InvalidVersionError{ + Version: v, + Pseudo: true, + Err: fmt.Errorf("malformed time %q", timestamp), + } + } + return t, nil +} + +// PseudoVersionRev returns the revision identifier of the pseudo-version v. +// It returns an error if v is not a pseudo-version. +func PseudoVersionRev(v string) (rev string, err error) { + _, _, rev, _, err = parsePseudoVersion(v) + return +} + +// PseudoVersionBase returns the canonical parent version, if any, upon which +// the pseudo-version v is based. +// +// If v has no parent version (that is, if it is "vX.0.0-[…]"), +// PseudoVersionBase returns the empty string and a nil error. +func PseudoVersionBase(v string) (string, error) { + base, _, _, build, err := parsePseudoVersion(v) + if err != nil { + return "", err + } + + switch pre := semver.Prerelease(base); pre { + case "": + // vX.0.0-yyyymmddhhmmss-abcdef123456 → "" + if build != "" { + // Pseudo-versions of the form vX.0.0-yyyymmddhhmmss-abcdef123456+incompatible + // are nonsensical: the "vX.0.0-" prefix implies that there is no parent tag, + // but the "+incompatible" suffix implies that the major version of + // the parent tag is not compatible with the module's import path. + // + // There are a few such entries in the index generated by proxy.golang.org, + // but we believe those entries were generated by the proxy itself. + return "", &InvalidVersionError{ + Version: v, + Pseudo: true, + Err: fmt.Errorf("lacks base version, but has build metadata %q", build), + } + } + return "", nil + + case "-0": + // vX.Y.(Z+1)-0.yyyymmddhhmmss-abcdef123456 → vX.Y.Z + // vX.Y.(Z+1)-0.yyyymmddhhmmss-abcdef123456+incompatible → vX.Y.Z+incompatible + base = strings.TrimSuffix(base, pre) + i := strings.LastIndexByte(base, '.') + if i < 0 { + panic("base from parsePseudoVersion missing patch number: " + base) + } + patch := decDecimal(base[i+1:]) + if patch == "" { + // vX.0.0-0 is invalid, but has been observed in the wild in the index + // generated by requests to proxy.golang.org. + // + // NOTE(bcmills): I cannot find a historical bug that accounts for + // pseudo-versions of this form, nor have I seen such versions in any + // actual go.mod files. If we find actual examples of this form and a + // reasonable theory of how they came into existence, it seems fine to + // treat them as equivalent to vX.0.0 (especially since the invalid + // pseudo-versions have lower precedence than the real ones). For now, we + // reject them. + return "", &InvalidVersionError{ + Version: v, + Pseudo: true, + Err: fmt.Errorf("version before %s would have negative patch number", base), + } + } + return base[:i+1] + patch + build, nil + + default: + // vX.Y.Z-pre.0.yyyymmddhhmmss-abcdef123456 → vX.Y.Z-pre + // vX.Y.Z-pre.0.yyyymmddhhmmss-abcdef123456+incompatible → vX.Y.Z-pre+incompatible + if !strings.HasSuffix(base, ".0") { + panic(`base from parsePseudoVersion missing ".0" before date: ` + base) + } + return strings.TrimSuffix(base, ".0") + build, nil + } +} + +var errPseudoSyntax = errors.New("syntax error") + +func parsePseudoVersion(v string) (base, timestamp, rev, build string, err error) { + if !IsPseudoVersion(v) { + return "", "", "", "", &InvalidVersionError{ + Version: v, + Pseudo: true, + Err: errPseudoSyntax, + } + } + build = semver.Build(v) + v = strings.TrimSuffix(v, build) + j := strings.LastIndex(v, "-") + v, rev = v[:j], v[j+1:] + i := strings.LastIndex(v, "-") + if j := strings.LastIndex(v, "."); j > i { + base = v[:j] // "vX.Y.Z-pre.0" or "vX.Y.(Z+1)-0" + timestamp = v[j+1:] + } else { + base = v[:i] // "vX.0.0" + timestamp = v[i+1:] + } + return base, timestamp, rev, build, nil +} diff --git a/vendor/golang.org/x/mod/semver/semver.go b/vendor/golang.org/x/mod/semver/semver.go new file mode 100644 index 00000000..824b282c --- /dev/null +++ b/vendor/golang.org/x/mod/semver/semver.go @@ -0,0 +1,407 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package semver implements comparison of semantic version strings. +// In this package, semantic version strings must begin with a leading "v", +// as in "v1.0.0". +// +// The general form of a semantic version string accepted by this package is +// +// vMAJOR[.MINOR[.PATCH[-PRERELEASE][+BUILD]]] +// +// where square brackets indicate optional parts of the syntax; +// MAJOR, MINOR, and PATCH are decimal integers without extra leading zeros; +// PRERELEASE and BUILD are each a series of non-empty dot-separated identifiers +// using only alphanumeric characters and hyphens; and +// all-numeric PRERELEASE identifiers must not have leading zeros. +// +// This package follows Semantic Versioning 2.0.0 (see semver.org) +// with two exceptions. First, it requires the "v" prefix. Second, it recognizes +// vMAJOR and vMAJOR.MINOR (with no prerelease or build suffixes) +// as shorthands for vMAJOR.0.0 and vMAJOR.MINOR.0. +package semver + +import ( + "slices" + "strings" +) + +// parsed returns the parsed form of a semantic version string. +type parsed struct { + major string + minor string + patch string + short string + prerelease string + build string +} + +// IsValid reports whether v is a valid semantic version string. +func IsValid(v string) bool { + _, ok := parse(v) + return ok +} + +// Canonical returns the canonical formatting of the semantic version v. +// It fills in any missing .MINOR or .PATCH and discards build metadata. +// Two semantic versions compare equal only if their canonical formatting +// is an identical string. +// The canonical invalid semantic version is the empty string. +func Canonical(v string) string { + p, ok := parse(v) + if !ok { + return "" + } + if p.build != "" { + return v[:len(v)-len(p.build)] + } + if p.short != "" { + return v + p.short + } + return v +} + +// Major returns the major version prefix of the semantic version v. +// For example, Major("v2.1.0") == "v2". +// If v is an invalid semantic version string, Major returns the empty string. +func Major(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return v[:1+len(pv.major)] +} + +// MajorMinor returns the major.minor version prefix of the semantic version v. +// For example, MajorMinor("v2.1.0") == "v2.1". +// If v is an invalid semantic version string, MajorMinor returns the empty string. +func MajorMinor(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + i := 1 + len(pv.major) + if j := i + 1 + len(pv.minor); j <= len(v) && v[i] == '.' && v[i+1:j] == pv.minor { + return v[:j] + } + return v[:i] + "." + pv.minor +} + +// Prerelease returns the prerelease suffix of the semantic version v. +// For example, Prerelease("v2.1.0-pre+meta") == "-pre". +// If v is an invalid semantic version string, Prerelease returns the empty string. +func Prerelease(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return pv.prerelease +} + +// Build returns the build suffix of the semantic version v. +// For example, Build("v2.1.0+meta") == "+meta". +// If v is an invalid semantic version string, Build returns the empty string. +func Build(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return pv.build +} + +// Compare returns an integer comparing two versions according to +// semantic version precedence. +// The result will be 0 if v == w, -1 if v < w, or +1 if v > w. +// +// An invalid semantic version string is considered less than a valid one. +// All invalid semantic version strings compare equal to each other. +func Compare(v, w string) int { + pv, ok1 := parse(v) + pw, ok2 := parse(w) + if !ok1 && !ok2 { + return 0 + } + if !ok1 { + return -1 + } + if !ok2 { + return +1 + } + if c := compareInt(pv.major, pw.major); c != 0 { + return c + } + if c := compareInt(pv.minor, pw.minor); c != 0 { + return c + } + if c := compareInt(pv.patch, pw.patch); c != 0 { + return c + } + return comparePrerelease(pv.prerelease, pw.prerelease) +} + +// Max canonicalizes its arguments and then returns the version string +// that compares greater. +// +// Deprecated: use [Compare] instead. In most cases, returning a canonicalized +// version is not expected or desired. +func Max(v, w string) string { + v = Canonical(v) + w = Canonical(w) + if Compare(v, w) > 0 { + return v + } + return w +} + +// ByVersion implements [sort.Interface] for sorting semantic version strings. +type ByVersion []string + +func (vs ByVersion) Len() int { return len(vs) } +func (vs ByVersion) Swap(i, j int) { vs[i], vs[j] = vs[j], vs[i] } +func (vs ByVersion) Less(i, j int) bool { return compareVersion(vs[i], vs[j]) < 0 } + +// Sort sorts a list of semantic version strings using [Compare] and falls back +// to use [strings.Compare] if both versions are considered equal. +func Sort(list []string) { + slices.SortFunc(list, compareVersion) +} + +func compareVersion(a, b string) int { + cmp := Compare(a, b) + if cmp != 0 { + return cmp + } + return strings.Compare(a, b) +} + +func parse(v string) (p parsed, ok bool) { + if v == "" || v[0] != 'v' { + return + } + p.major, v, ok = parseInt(v[1:]) + if !ok { + return + } + if v == "" { + p.minor = "0" + p.patch = "0" + p.short = ".0.0" + return + } + if v[0] != '.' { + ok = false + return + } + p.minor, v, ok = parseInt(v[1:]) + if !ok { + return + } + if v == "" { + p.patch = "0" + p.short = ".0" + return + } + if v[0] != '.' { + ok = false + return + } + p.patch, v, ok = parseInt(v[1:]) + if !ok { + return + } + if len(v) > 0 && v[0] == '-' { + p.prerelease, v, ok = parsePrerelease(v) + if !ok { + return + } + } + if len(v) > 0 && v[0] == '+' { + p.build, v, ok = parseBuild(v) + if !ok { + return + } + } + if v != "" { + ok = false + return + } + ok = true + return +} + +func parseInt(v string) (t, rest string, ok bool) { + if v == "" { + return + } + if v[0] < '0' || '9' < v[0] { + return + } + i := 1 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + if v[0] == '0' && i != 1 { + return + } + return v[:i], v[i:], true +} + +func parsePrerelease(v string) (t, rest string, ok bool) { + // "A pre-release version MAY be denoted by appending a hyphen and + // a series of dot separated identifiers immediately following the patch version. + // Identifiers MUST comprise only ASCII alphanumerics and hyphen [0-9A-Za-z-]. + // Identifiers MUST NOT be empty. Numeric identifiers MUST NOT include leading zeroes." + if v == "" || v[0] != '-' { + return + } + i := 1 + start := 1 + for i < len(v) && v[i] != '+' { + if !isIdentChar(v[i]) && v[i] != '.' { + return + } + if v[i] == '.' { + if start == i || isBadNum(v[start:i]) { + return + } + start = i + 1 + } + i++ + } + if start == i || isBadNum(v[start:i]) { + return + } + return v[:i], v[i:], true +} + +func parseBuild(v string) (t, rest string, ok bool) { + if v == "" || v[0] != '+' { + return + } + i := 1 + start := 1 + for i < len(v) { + if !isIdentChar(v[i]) && v[i] != '.' { + return + } + if v[i] == '.' { + if start == i { + return + } + start = i + 1 + } + i++ + } + if start == i { + return + } + return v[:i], v[i:], true +} + +func isIdentChar(c byte) bool { + return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '-' +} + +func isBadNum(v string) bool { + i := 0 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + return i == len(v) && i > 1 && v[0] == '0' +} + +func isNum(v string) bool { + i := 0 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + return i == len(v) +} + +func compareInt(x, y string) int { + if x == y { + return 0 + } + if len(x) < len(y) { + return -1 + } + if len(x) > len(y) { + return +1 + } + if x < y { + return -1 + } else { + return +1 + } +} + +func comparePrerelease(x, y string) int { + // "When major, minor, and patch are equal, a pre-release version has + // lower precedence than a normal version. + // Example: 1.0.0-alpha < 1.0.0. + // Precedence for two pre-release versions with the same major, minor, + // and patch version MUST be determined by comparing each dot separated + // identifier from left to right until a difference is found as follows: + // identifiers consisting of only digits are compared numerically and + // identifiers with letters or hyphens are compared lexically in ASCII + // sort order. Numeric identifiers always have lower precedence than + // non-numeric identifiers. A larger set of pre-release fields has a + // higher precedence than a smaller set, if all of the preceding + // identifiers are equal. + // Example: 1.0.0-alpha < 1.0.0-alpha.1 < 1.0.0-alpha.beta < + // 1.0.0-beta < 1.0.0-beta.2 < 1.0.0-beta.11 < 1.0.0-rc.1 < 1.0.0." + if x == y { + return 0 + } + if x == "" { + return +1 + } + if y == "" { + return -1 + } + for x != "" && y != "" { + x = x[1:] // skip - or . + y = y[1:] // skip - or . + var dx, dy string + dx, x = nextIdent(x) + dy, y = nextIdent(y) + if dx != dy { + ix := isNum(dx) + iy := isNum(dy) + if ix != iy { + if ix { + return -1 + } else { + return +1 + } + } + if ix { + if len(dx) < len(dy) { + return -1 + } + if len(dx) > len(dy) { + return +1 + } + } + if dx < dy { + return -1 + } else { + return +1 + } + } + } + if x == "" { + return -1 + } else { + return +1 + } +} + +func nextIdent(x string) (dx, rest string) { + i := 0 + for i < len(x) && x[i] != '.' { + i++ + } + return x[:i], x[i:] +} diff --git a/vendor/golang.org/x/net/LICENSE b/vendor/golang.org/x/net/LICENSE index 6a66aea5..2a7cf70d 100644 --- a/vendor/golang.org/x/net/LICENSE +++ b/vendor/golang.org/x/net/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/net/http2/client_conn_pool.go b/vendor/golang.org/x/net/http2/client_conn_pool.go index 780968d6..e81b73e6 100644 --- a/vendor/golang.org/x/net/http2/client_conn_pool.go +++ b/vendor/golang.org/x/net/http2/client_conn_pool.go @@ -8,8 +8,8 @@ package http2 import ( "context" - "crypto/tls" "errors" + "net" "net/http" "sync" ) @@ -158,7 +158,7 @@ func (c *dialCall) dial(ctx context.Context, addr string) { // This code decides which ones live or die. // The return value used is whether c was used. // c is never closed. -func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn) (used bool, err error) { +func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c net.Conn) (used bool, err error) { p.mu.Lock() for _, cc := range p.conns[key] { if cc.CanTakeNewRequest() { @@ -194,8 +194,8 @@ type addConnCall struct { err error } -func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) { - cc, err := t.NewClientConn(tc) +func (c *addConnCall) run(t *Transport, key string, nc net.Conn) { + cc, err := t.NewClientConn(nc) p := c.p p.mu.Lock() diff --git a/vendor/golang.org/x/net/http2/config.go b/vendor/golang.org/x/net/http2/config.go new file mode 100644 index 00000000..8a7a89d0 --- /dev/null +++ b/vendor/golang.org/x/net/http2/config.go @@ -0,0 +1,169 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "math" + "net/http" + "time" +) + +// http2Config is a package-internal version of net/http.HTTP2Config. +// +// http.HTTP2Config was added in Go 1.24. +// When running with a version of net/http that includes HTTP2Config, +// we merge the configuration with the fields in Transport or Server +// to produce an http2Config. +// +// Zero valued fields in http2Config are interpreted as in the +// net/http.HTTPConfig documentation. +// +// Precedence order for reconciling configurations is: +// +// - Use the net/http.{Server,Transport}.HTTP2Config value, when non-zero. +// - Otherwise use the http2.{Server.Transport} value. +// - If the resulting value is zero or out of range, use a default. +type http2Config struct { + MaxConcurrentStreams uint32 + StrictMaxConcurrentRequests bool + MaxDecoderHeaderTableSize uint32 + MaxEncoderHeaderTableSize uint32 + MaxReadFrameSize uint32 + MaxUploadBufferPerConnection int32 + MaxUploadBufferPerStream int32 + SendPingTimeout time.Duration + PingTimeout time.Duration + WriteByteTimeout time.Duration + PermitProhibitedCipherSuites bool + CountError func(errType string) +} + +// configFromServer merges configuration settings from +// net/http.Server.HTTP2Config and http2.Server. +func configFromServer(h1 *http.Server, h2 *Server) http2Config { + conf := http2Config{ + MaxConcurrentStreams: h2.MaxConcurrentStreams, + MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize, + MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize, + MaxReadFrameSize: h2.MaxReadFrameSize, + MaxUploadBufferPerConnection: h2.MaxUploadBufferPerConnection, + MaxUploadBufferPerStream: h2.MaxUploadBufferPerStream, + SendPingTimeout: h2.ReadIdleTimeout, + PingTimeout: h2.PingTimeout, + WriteByteTimeout: h2.WriteByteTimeout, + PermitProhibitedCipherSuites: h2.PermitProhibitedCipherSuites, + CountError: h2.CountError, + } + fillNetHTTPConfig(&conf, h1.HTTP2) + setConfigDefaults(&conf, true) + return conf +} + +// configFromTransport merges configuration settings from h2 and h2.t1.HTTP2 +// (the net/http Transport). +func configFromTransport(h2 *Transport) http2Config { + conf := http2Config{ + StrictMaxConcurrentRequests: h2.StrictMaxConcurrentStreams, + MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize, + MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize, + MaxReadFrameSize: h2.MaxReadFrameSize, + SendPingTimeout: h2.ReadIdleTimeout, + PingTimeout: h2.PingTimeout, + WriteByteTimeout: h2.WriteByteTimeout, + } + + // Unlike most config fields, where out-of-range values revert to the default, + // Transport.MaxReadFrameSize clips. + if conf.MaxReadFrameSize < minMaxFrameSize { + conf.MaxReadFrameSize = minMaxFrameSize + } else if conf.MaxReadFrameSize > maxFrameSize { + conf.MaxReadFrameSize = maxFrameSize + } + + if h2.t1 != nil { + fillNetHTTPConfig(&conf, h2.t1.HTTP2) + } + setConfigDefaults(&conf, false) + return conf +} + +func setDefault[T ~int | ~int32 | ~uint32 | ~int64](v *T, minval, maxval, defval T) { + if *v < minval || *v > maxval { + *v = defval + } +} + +func setConfigDefaults(conf *http2Config, server bool) { + setDefault(&conf.MaxConcurrentStreams, 1, math.MaxUint32, defaultMaxStreams) + setDefault(&conf.MaxEncoderHeaderTableSize, 1, math.MaxUint32, initialHeaderTableSize) + setDefault(&conf.MaxDecoderHeaderTableSize, 1, math.MaxUint32, initialHeaderTableSize) + if server { + setDefault(&conf.MaxUploadBufferPerConnection, initialWindowSize, math.MaxInt32, 1<<20) + } else { + setDefault(&conf.MaxUploadBufferPerConnection, initialWindowSize, math.MaxInt32, transportDefaultConnFlow) + } + if server { + setDefault(&conf.MaxUploadBufferPerStream, 1, math.MaxInt32, 1<<20) + } else { + setDefault(&conf.MaxUploadBufferPerStream, 1, math.MaxInt32, transportDefaultStreamFlow) + } + setDefault(&conf.MaxReadFrameSize, minMaxFrameSize, maxFrameSize, defaultMaxReadFrameSize) + setDefault(&conf.PingTimeout, 1, math.MaxInt64, 15*time.Second) +} + +// adjustHTTP1MaxHeaderSize converts a limit in bytes on the size of an HTTP/1 header +// to an HTTP/2 MAX_HEADER_LIST_SIZE value. +func adjustHTTP1MaxHeaderSize(n int64) int64 { + // http2's count is in a slightly different unit and includes 32 bytes per pair. + // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. + const perFieldOverhead = 32 // per http2 spec + const typicalHeaders = 10 // conservative + return n + typicalHeaders*perFieldOverhead +} + +func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) { + if h2 == nil { + return + } + if h2.MaxConcurrentStreams != 0 { + conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) + } + if http2ConfigStrictMaxConcurrentRequests(h2) { + conf.StrictMaxConcurrentRequests = true + } + if h2.MaxEncoderHeaderTableSize != 0 { + conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize) + } + if h2.MaxDecoderHeaderTableSize != 0 { + conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize) + } + if h2.MaxConcurrentStreams != 0 { + conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) + } + if h2.MaxReadFrameSize != 0 { + conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize) + } + if h2.MaxReceiveBufferPerConnection != 0 { + conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection) + } + if h2.MaxReceiveBufferPerStream != 0 { + conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream) + } + if h2.SendPingTimeout != 0 { + conf.SendPingTimeout = h2.SendPingTimeout + } + if h2.PingTimeout != 0 { + conf.PingTimeout = h2.PingTimeout + } + if h2.WriteByteTimeout != 0 { + conf.WriteByteTimeout = h2.WriteByteTimeout + } + if h2.PermitProhibitedCipherSuites { + conf.PermitProhibitedCipherSuites = true + } + if h2.CountError != nil { + conf.CountError = h2.CountError + } +} diff --git a/vendor/golang.org/x/net/http2/config_go125.go b/vendor/golang.org/x/net/http2/config_go125.go new file mode 100644 index 00000000..b4373fe3 --- /dev/null +++ b/vendor/golang.org/x/net/http2/config_go125.go @@ -0,0 +1,15 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.26 + +package http2 + +import ( + "net/http" +) + +func http2ConfigStrictMaxConcurrentRequests(h2 *http.HTTP2Config) bool { + return false +} diff --git a/vendor/golang.org/x/net/http2/config_go126.go b/vendor/golang.org/x/net/http2/config_go126.go new file mode 100644 index 00000000..6b071c14 --- /dev/null +++ b/vendor/golang.org/x/net/http2/config_go126.go @@ -0,0 +1,15 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.26 + +package http2 + +import ( + "net/http" +) + +func http2ConfigStrictMaxConcurrentRequests(h2 *http.HTTP2Config) bool { + return h2.StrictMaxConcurrentRequests +} diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index 105c3b27..9a4bd123 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -39,7 +39,7 @@ const ( FrameContinuation FrameType = 0x9 ) -var frameName = map[FrameType]string{ +var frameNames = [...]string{ FrameData: "DATA", FrameHeaders: "HEADERS", FramePriority: "PRIORITY", @@ -53,10 +53,10 @@ var frameName = map[FrameType]string{ } func (t FrameType) String() string { - if s, ok := frameName[t]; ok { - return s + if int(t) < len(frameNames) { + return frameNames[t] } - return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", uint8(t)) + return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", t) } // Flags is a bitmask of HTTP/2 flags. @@ -124,7 +124,7 @@ var flagName = map[FrameType]map[Flags]string{ // might be 0). type frameParser func(fc *frameCache, fh FrameHeader, countError func(string), payload []byte) (Frame, error) -var frameParsers = map[FrameType]frameParser{ +var frameParsers = [...]frameParser{ FrameData: parseDataFrame, FrameHeaders: parseHeadersFrame, FramePriority: parsePriorityFrame, @@ -138,8 +138,8 @@ var frameParsers = map[FrameType]frameParser{ } func typeFrameParser(t FrameType) frameParser { - if f := frameParsers[t]; f != nil { - return f + if int(t) < len(frameParsers) { + return frameParsers[t] } return parseUnknownFrame } @@ -225,6 +225,11 @@ var fhBytes = sync.Pool{ }, } +func invalidHTTP1LookingFrameHeader() FrameHeader { + fh, _ := readFrameHeader(make([]byte, frameHeaderLen), strings.NewReader("HTTP/1.1 ")) + return fh +} + // ReadFrameHeader reads 9 bytes from r and returns a FrameHeader. // Most users should use Framer.ReadFrame instead. func ReadFrameHeader(r io.Reader) (FrameHeader, error) { @@ -275,6 +280,8 @@ type Framer struct { // lastHeaderStream is non-zero if the last frame was an // unfinished HEADERS/CONTINUATION. lastHeaderStream uint32 + // lastFrameType holds the type of the last frame for verifying frame order. + lastFrameType FrameType maxReadSize uint32 headerBuf [frameHeaderLen]byte @@ -342,7 +349,7 @@ func (fr *Framer) maxHeaderListSize() uint32 { func (f *Framer) startWrite(ftype FrameType, flags Flags, streamID uint32) { // Write the FrameHeader. f.wbuf = append(f.wbuf[:0], - 0, // 3 bytes of length, filled in in endWrite + 0, // 3 bytes of length, filled in endWrite 0, 0, byte(ftype), @@ -483,30 +490,47 @@ func terminalReadFrameError(err error) bool { return err != nil } -// ReadFrame reads a single frame. The returned Frame is only valid -// until the next call to ReadFrame. +// ReadFrameHeader reads the header of the next frame. +// It reads the 9-byte fixed frame header, and does not read any portion of the +// frame payload. The caller is responsible for consuming the payload, either +// with ReadFrameForHeader or directly from the Framer's io.Reader. // -// If the frame is larger than previously set with SetMaxReadFrameSize, the -// returned error is ErrFrameTooLarge. Other errors may be of type -// ConnectionError, StreamError, or anything else from the underlying -// reader. +// If the frame is larger than previously set with SetMaxReadFrameSize, it +// returns the frame header and ErrFrameTooLarge. // -// If ReadFrame returns an error and a non-nil Frame, the Frame's StreamID -// indicates the stream responsible for the error. -func (fr *Framer) ReadFrame() (Frame, error) { +// If the returned FrameHeader.StreamID is non-zero, it indicates the stream +// responsible for the error. +func (fr *Framer) ReadFrameHeader() (FrameHeader, error) { fr.errDetail = nil + fh, err := readFrameHeader(fr.headerBuf[:], fr.r) + if err != nil { + return fh, err + } + if fh.Length > fr.maxReadSize { + if fh == invalidHTTP1LookingFrameHeader() { + return fh, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", ErrFrameTooLarge) + } + return fh, ErrFrameTooLarge + } + if err := fr.checkFrameOrder(fh); err != nil { + return fh, err + } + return fh, nil +} + +// ReadFrameForHeader reads the payload for the frame with the given FrameHeader. +// +// It behaves identically to ReadFrame, other than not checking the maximum +// frame size. +func (fr *Framer) ReadFrameForHeader(fh FrameHeader) (Frame, error) { if fr.lastFrame != nil { fr.lastFrame.invalidate() } - fh, err := readFrameHeader(fr.headerBuf[:], fr.r) - if err != nil { - return nil, err - } - if fh.Length > fr.maxReadSize { - return nil, ErrFrameTooLarge - } payload := fr.getReadBuf(fh.Length) if _, err := io.ReadFull(fr.r, payload); err != nil { + if fh == invalidHTTP1LookingFrameHeader() { + return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", err) + } return nil, err } f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, fr.countError, payload) @@ -516,9 +540,7 @@ func (fr *Framer) ReadFrame() (Frame, error) { } return nil, err } - if err := fr.checkFrameOrder(f); err != nil { - return nil, err - } + fr.lastFrame = f if fr.logReads { fr.debugReadLoggerf("http2: Framer %p: read %v", fr, summarizeFrame(f)) } @@ -528,6 +550,24 @@ func (fr *Framer) ReadFrame() (Frame, error) { return f, nil } +// ReadFrame reads a single frame. The returned Frame is only valid +// until the next call to ReadFrame or ReadFrameBodyForHeader. +// +// If the frame is larger than previously set with SetMaxReadFrameSize, the +// returned error is ErrFrameTooLarge. Other errors may be of type +// ConnectionError, StreamError, or anything else from the underlying +// reader. +// +// If ReadFrame returns an error and a non-nil Frame, the Frame's StreamID +// indicates the stream responsible for the error. +func (fr *Framer) ReadFrame() (Frame, error) { + fh, err := fr.ReadFrameHeader() + if err != nil { + return nil, err + } + return fr.ReadFrameForHeader(fh) +} + // connError returns ConnectionError(code) but first // stashes away a public reason to the caller can optionally relay it // to the peer before hanging up on them. This might help others debug @@ -540,20 +580,19 @@ func (fr *Framer) connError(code ErrCode, reason string) error { // checkFrameOrder reports an error if f is an invalid frame to return // next from ReadFrame. Mostly it checks whether HEADERS and // CONTINUATION frames are contiguous. -func (fr *Framer) checkFrameOrder(f Frame) error { - last := fr.lastFrame - fr.lastFrame = f +func (fr *Framer) checkFrameOrder(fh FrameHeader) error { + lastType := fr.lastFrameType + fr.lastFrameType = fh.Type if fr.AllowIllegalReads { return nil } - fh := f.Header() if fr.lastHeaderStream != 0 { if fh.Type != FrameContinuation { return fr.connError(ErrCodeProtocol, fmt.Sprintf("got %s for stream %d; expected CONTINUATION following %s for stream %d", fh.Type, fh.StreamID, - last.Header().Type, fr.lastHeaderStream)) + lastType, fr.lastHeaderStream)) } if fh.StreamID != fr.lastHeaderStream { return fr.connError(ErrCodeProtocol, @@ -1141,7 +1180,16 @@ type PriorityFrame struct { PriorityParam } -// PriorityParam are the stream prioritzation parameters. +var defaultRFC9218Priority = PriorityParam{ + incremental: 0, + urgency: 3, +} + +// Note that HTTP/2 has had two different prioritization schemes, and +// PriorityParam struct below is a superset of both schemes. The exported +// symbols are from RFC 7540 and the non-exported ones are from RFC 9218. + +// PriorityParam are the stream prioritization parameters. type PriorityParam struct { // StreamDep is a 31-bit stream identifier for the // stream that this stream depends on. Zero means no @@ -1156,6 +1204,20 @@ type PriorityParam struct { // the spec, "Add one to the value to obtain a weight between // 1 and 256." Weight uint8 + + // "The urgency (u) parameter value is Integer (see Section 3.3.1 of + // [STRUCTURED-FIELDS]), between 0 and 7 inclusive, in descending order of + // priority. The default is 3." + urgency uint8 + + // "The incremental (i) parameter value is Boolean (see Section 3.3.6 of + // [STRUCTURED-FIELDS]). It indicates if an HTTP response can be processed + // incrementally, i.e., provide some meaningful output as chunks of the + // response arrive." + // + // We use uint8 (i.e. 0 is false, 1 is true) instead of bool so we can + // avoid unnecessary type conversions and because either type takes 1 byte. + incremental uint8 } func (p PriorityParam) IsZero() bool { @@ -1490,7 +1552,7 @@ func (mh *MetaHeadersFrame) checkPseudos() error { pf := mh.PseudoFields() for i, hf := range pf { switch hf.Name { - case ":method", ":path", ":scheme", ":authority": + case ":method", ":path", ":scheme", ":authority", ":protocol": isRequest = true case ":status": isResponse = true @@ -1498,7 +1560,7 @@ func (mh *MetaHeadersFrame) checkPseudos() error { return pseudoHeaderError(hf.Name) } // Check for duplicates. - // This would be a bad algorithm, but N is 4. + // This would be a bad algorithm, but N is 5. // And this doesn't allocate. for _, hf2 := range pf[:i] { if hf.Name == hf2.Name { diff --git a/vendor/golang.org/x/net/http2/gotrack.go b/vendor/golang.org/x/net/http2/gotrack.go index 9933c9f8..9921ca09 100644 --- a/vendor/golang.org/x/net/http2/gotrack.go +++ b/vendor/golang.org/x/net/http2/gotrack.go @@ -15,21 +15,32 @@ import ( "runtime" "strconv" "sync" + "sync/atomic" ) var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1" +// Setting DebugGoroutines to false during a test to disable goroutine debugging +// results in race detector complaints when a test leaves goroutines running before +// returning. Tests shouldn't do this, of course, but when they do it generally shows +// up as infrequent, hard-to-debug flakes. (See #66519.) +// +// Disable goroutine debugging during individual tests with an atomic bool. +// (Note that it's safe to enable/disable debugging mid-test, so the actual race condition +// here is harmless.) +var disableDebugGoroutines atomic.Bool + type goroutineLock uint64 func newGoroutineLock() goroutineLock { - if !DebugGoroutines { + if !DebugGoroutines || disableDebugGoroutines.Load() { return 0 } return goroutineLock(curGoroutineID()) } func (g goroutineLock) check() { - if !DebugGoroutines { + if !DebugGoroutines || disableDebugGoroutines.Load() { return } if curGoroutineID() != uint64(g) { @@ -38,7 +49,7 @@ func (g goroutineLock) check() { } func (g goroutineLock) checkNotOn() { - if !DebugGoroutines { + if !DebugGoroutines || disableDebugGoroutines.Load() { return } if curGoroutineID() == uint64(g) { diff --git a/vendor/golang.org/x/net/http2/h2c/h2c.go b/vendor/golang.org/x/net/http2/h2c/h2c.go index 2d6bf861..19e94791 100644 --- a/vendor/golang.org/x/net/http2/h2c/h2c.go +++ b/vendor/golang.org/x/net/http2/h2c/h2c.go @@ -132,11 +132,8 @@ func (s h2cHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // of the body, and reforward the client preface on the net.Conn this function // creates. func initH2CWithPriorKnowledge(w http.ResponseWriter) (net.Conn, error) { - hijacker, ok := w.(http.Hijacker) - if !ok { - return nil, errors.New("h2c: connection does not support Hijack") - } - conn, rw, err := hijacker.Hijack() + rc := http.NewResponseController(w) + conn, rw, err := rc.Hijack() if err != nil { return nil, err } @@ -163,10 +160,6 @@ func h2cUpgrade(w http.ResponseWriter, r *http.Request) (_ net.Conn, settings [] if err != nil { return nil, nil, err } - hijacker, ok := w.(http.Hijacker) - if !ok { - return nil, nil, errors.New("h2c: connection does not support Hijack") - } body, err := io.ReadAll(r.Body) if err != nil { @@ -174,7 +167,8 @@ func h2cUpgrade(w http.ResponseWriter, r *http.Request) (_ net.Conn, settings [] } r.Body = io.NopCloser(bytes.NewBuffer(body)) - conn, rw, err := hijacker.Hijack() + rc := http.NewResponseController(w) + conn, rw, err := rc.Hijack() if err != nil { return nil, nil, err } diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index 6f2df281..105fe12f 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -11,21 +11,21 @@ // requires Go 1.6 or later) // // See https://http2.github.io/ for more information on HTTP/2. -// -// See https://http2.golang.org/ for a test server running this code. package http2 // import "golang.org/x/net/http2" import ( "bufio" "crypto/tls" + "errors" "fmt" - "io" + "net" "net/http" "os" "sort" "strconv" "strings" "sync" + "time" "golang.org/x/net/http/httpguts" ) @@ -34,7 +34,15 @@ var ( VerboseLogs bool logFrameWrites bool logFrameReads bool - inTests bool + + // Enabling extended CONNECT by causes browsers to attempt to use + // WebSockets-over-HTTP/2. This results in problems when the server's websocket + // package doesn't support extended CONNECT. + // + // Disable extended CONNECT by default for now. + // + // Issue #71128. + disableExtendedConnectProtocol = true ) func init() { @@ -47,6 +55,9 @@ func init() { logFrameWrites = true logFrameReads = true } + if strings.Contains(e, "http2xconnect=1") { + disableExtendedConnectProtocol = false + } } const ( @@ -138,6 +149,10 @@ func (s Setting) Valid() error { if s.Val < 16384 || s.Val > 1<<24-1 { return ConnectionError(ErrCodeProtocol) } + case SettingEnableConnectProtocol: + if s.Val != 1 && s.Val != 0 { + return ConnectionError(ErrCodeProtocol) + } } return nil } @@ -147,21 +162,23 @@ func (s Setting) Valid() error { type SettingID uint16 const ( - SettingHeaderTableSize SettingID = 0x1 - SettingEnablePush SettingID = 0x2 - SettingMaxConcurrentStreams SettingID = 0x3 - SettingInitialWindowSize SettingID = 0x4 - SettingMaxFrameSize SettingID = 0x5 - SettingMaxHeaderListSize SettingID = 0x6 + SettingHeaderTableSize SettingID = 0x1 + SettingEnablePush SettingID = 0x2 + SettingMaxConcurrentStreams SettingID = 0x3 + SettingInitialWindowSize SettingID = 0x4 + SettingMaxFrameSize SettingID = 0x5 + SettingMaxHeaderListSize SettingID = 0x6 + SettingEnableConnectProtocol SettingID = 0x8 ) var settingName = map[SettingID]string{ - SettingHeaderTableSize: "HEADER_TABLE_SIZE", - SettingEnablePush: "ENABLE_PUSH", - SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", - SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", - SettingMaxFrameSize: "MAX_FRAME_SIZE", - SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", + SettingHeaderTableSize: "HEADER_TABLE_SIZE", + SettingEnablePush: "ENABLE_PUSH", + SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", + SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", + SettingMaxFrameSize: "MAX_FRAME_SIZE", + SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", + SettingEnableConnectProtocol: "ENABLE_CONNECT_PROTOCOL", } func (s SettingID) String() string { @@ -210,12 +227,6 @@ type stringWriter interface { WriteString(s string) (n int, err error) } -// A gate lets two goroutines coordinate their activities. -type gate chan struct{} - -func (g gate) Done() { g <- struct{}{} } -func (g gate) Wait() { <-g } - // A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed). type closeWaiter chan struct{} @@ -241,13 +252,17 @@ func (cw closeWaiter) Wait() { // Its buffered writer is lazily allocated as needed, to minimize // idle memory usage with many connections. type bufferedWriter struct { - _ incomparable - w io.Writer // immutable - bw *bufio.Writer // non-nil when data is buffered + _ incomparable + conn net.Conn // immutable + bw *bufio.Writer // non-nil when data is buffered + byteTimeout time.Duration // immutable, WriteByteTimeout } -func newBufferedWriter(w io.Writer) *bufferedWriter { - return &bufferedWriter{w: w} +func newBufferedWriter(conn net.Conn, timeout time.Duration) *bufferedWriter { + return &bufferedWriter{ + conn: conn, + byteTimeout: timeout, + } } // bufWriterPoolBufferSize is the size of bufio.Writer's @@ -274,7 +289,7 @@ func (w *bufferedWriter) Available() int { func (w *bufferedWriter) Write(p []byte) (n int, err error) { if w.bw == nil { bw := bufWriterPool.Get().(*bufio.Writer) - bw.Reset(w.w) + bw.Reset((*bufferedWriterTimeoutWriter)(w)) w.bw = bw } return w.bw.Write(p) @@ -292,6 +307,32 @@ func (w *bufferedWriter) Flush() error { return err } +type bufferedWriterTimeoutWriter bufferedWriter + +func (w *bufferedWriterTimeoutWriter) Write(p []byte) (n int, err error) { + return writeWithByteTimeout(w.conn, w.byteTimeout, p) +} + +// writeWithByteTimeout writes to conn. +// If more than timeout passes without any bytes being written to the connection, +// the write fails. +func writeWithByteTimeout(conn net.Conn, timeout time.Duration, p []byte) (n int, err error) { + if timeout <= 0 { + return conn.Write(p) + } + for { + conn.SetWriteDeadline(time.Now().Add(timeout)) + nn, err := conn.Write(p[n:]) + n += nn + if n == len(p) || nn == 0 || !errors.Is(err, os.ErrDeadlineExceeded) { + // Either we finished the write, made no progress, or hit the deadline. + // Whichever it is, we're done now. + conn.SetWriteDeadline(time.Time{}) + return n, err + } + } +} + func mustUint31(v int32) uint32 { if v < 0 || v > 2147483647 { panic("out of range") @@ -362,23 +403,6 @@ func (s *sorter) SortStrings(ss []string) { s.v = save } -// validPseudoPath reports whether v is a valid :path pseudo-header -// value. It must be either: -// -// - a non-empty string starting with '/' -// - the string '*', for OPTIONS requests. -// -// For now this is only used a quick check for deciding when to clean -// up Opaque URLs before sending requests from the Transport. -// See golang.org/issue/16847 -// -// We used to enforce that the path also didn't start with "//", but -// Google's GFE accepts such paths and Chrome sends them, so ignore -// that part of the spec. See golang.org/issue/19103. -func validPseudoPath(v string) bool { - return (len(v) > 0 && v[0] == '/') || v == "*" -} - // incomparable is a zero-width, non-comparable type. Adding it to a struct // makes that struct also non-comparable, and generally doesn't add // any size (as long as it's first). diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index c5d08108..bdc5520e 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -29,6 +29,7 @@ import ( "bufio" "bytes" "context" + "crypto/rand" "crypto/tls" "errors" "fmt" @@ -49,13 +50,18 @@ import ( "golang.org/x/net/http/httpguts" "golang.org/x/net/http2/hpack" + "golang.org/x/net/internal/httpcommon" ) const ( - prefaceTimeout = 10 * time.Second - firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway - handlerChunkWriteSize = 4 << 10 - defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? + prefaceTimeout = 10 * time.Second + firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway + handlerChunkWriteSize = 4 << 10 + defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? + + // maxQueuedControlFrames is the maximum number of control frames like + // SETTINGS, PING and RST_STREAM that will be queued for writing before + // the connection is closed to prevent memory exhaustion attacks. maxQueuedControlFrames = 10000 ) @@ -127,6 +133,22 @@ type Server struct { // If zero or negative, there is no timeout. IdleTimeout time.Duration + // ReadIdleTimeout is the timeout after which a health check using a ping + // frame will be carried out if no frame is received on the connection. + // If zero, no health check is performed. + ReadIdleTimeout time.Duration + + // PingTimeout is the timeout after which the connection will be closed + // if a response to a ping is not received. + // If zero, a default of 15 seconds is used. + PingTimeout time.Duration + + // WriteByteTimeout is the timeout after which a connection will be + // closed if no data can be written to it. The timeout begins when data is + // available to write, and is extended whenever any bytes are written. + // If zero or negative, there is no timeout. + WriteByteTimeout time.Duration + // MaxUploadBufferPerConnection is the size of the initial flow // control window for each connections. The HTTP/2 spec does not // allow this to be smaller than 65535 or larger than 2^32-1. @@ -156,60 +178,13 @@ type Server struct { state *serverInternalState } -func (s *Server) initialConnRecvWindowSize() int32 { - if s.MaxUploadBufferPerConnection >= initialWindowSize { - return s.MaxUploadBufferPerConnection - } - return 1 << 20 -} - -func (s *Server) initialStreamRecvWindowSize() int32 { - if s.MaxUploadBufferPerStream > 0 { - return s.MaxUploadBufferPerStream - } - return 1 << 20 -} - -func (s *Server) maxReadFrameSize() uint32 { - if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize { - return v - } - return defaultMaxReadFrameSize -} - -func (s *Server) maxConcurrentStreams() uint32 { - if v := s.MaxConcurrentStreams; v > 0 { - return v - } - return defaultMaxStreams -} - -func (s *Server) maxDecoderHeaderTableSize() uint32 { - if v := s.MaxDecoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - -func (s *Server) maxEncoderHeaderTableSize() uint32 { - if v := s.MaxEncoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - -// maxQueuedControlFrames is the maximum number of control frames like -// SETTINGS, PING and RST_STREAM that will be queued for writing before -// the connection is closed to prevent memory exhaustion attacks. -func (s *Server) maxQueuedControlFrames() int { - // TODO: if anybody asks, add a Server field, and remember to define the - // behavior of negative values. - return maxQueuedControlFrames -} - type serverInternalState struct { mu sync.Mutex activeConns map[*serverConn]struct{} + + // Pool of error channels. This is per-Server rather than global + // because channels can't be reused across synctest bubbles. + errChanPool sync.Pool } func (s *serverInternalState) registerConn(sc *serverConn) { @@ -241,6 +216,27 @@ func (s *serverInternalState) startGracefulShutdown() { s.mu.Unlock() } +// Global error channel pool used for uninitialized Servers. +// We use a per-Server pool when possible to avoid using channels across synctest bubbles. +var errChanPool = sync.Pool{ + New: func() any { return make(chan error, 1) }, +} + +func (s *serverInternalState) getErrChan() chan error { + if s == nil { + return errChanPool.Get().(chan error) // Server used without calling ConfigureServer + } + return s.errChanPool.Get().(chan error) +} + +func (s *serverInternalState) putErrChan(ch chan error) { + if s == nil { + errChanPool.Put(ch) // Server used without calling ConfigureServer + return + } + s.errChanPool.Put(ch) +} + // ConfigureServer adds HTTP/2 support to a net/http Server. // // The configuration conf may be nil. @@ -253,7 +249,10 @@ func ConfigureServer(s *http.Server, conf *Server) error { if conf == nil { conf = new(Server) } - conf.state = &serverInternalState{activeConns: make(map[*serverConn]struct{})} + conf.state = &serverInternalState{ + activeConns: make(map[*serverConn]struct{}), + errChanPool: sync.Pool{New: func() any { return make(chan error, 1) }}, + } if h1, h2 := s, conf; h2.IdleTimeout == 0 { if h1.IdleTimeout != 0 { h2.IdleTimeout = h1.IdleTimeout @@ -303,7 +302,7 @@ func ConfigureServer(s *http.Server, conf *Server) error { if s.TLSNextProto == nil { s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){} } - protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) { + protoHandler := func(hs *http.Server, c net.Conn, h http.Handler, sawClientPreface bool) { if testHookOnConn != nil { testHookOnConn() } @@ -320,12 +319,31 @@ func ConfigureServer(s *http.Server, conf *Server) error { ctx = bc.BaseContext() } conf.ServeConn(c, &ServeConnOpts{ - Context: ctx, - Handler: h, - BaseConfig: hs, + Context: ctx, + Handler: h, + BaseConfig: hs, + SawClientPreface: sawClientPreface, }) } - s.TLSNextProto[NextProtoTLS] = protoHandler + s.TLSNextProto[NextProtoTLS] = func(hs *http.Server, c *tls.Conn, h http.Handler) { + protoHandler(hs, c, h, false) + } + // The "unencrypted_http2" TLSNextProto key is used to pass off non-TLS HTTP/2 conns. + // + // A connection passed in this method has already had the HTTP/2 preface read from it. + s.TLSNextProto[nextProtoUnencryptedHTTP2] = func(hs *http.Server, c *tls.Conn, h http.Handler) { + nc, err := unencryptedNetConnFromTLSConn(c) + if err != nil { + if lg := hs.ErrorLog; lg != nil { + lg.Print(err) + } else { + log.Print(err) + } + go c.Close() + return + } + protoHandler(hs, nc, h, true) + } return nil } @@ -400,16 +418,25 @@ func (o *ServeConnOpts) handler() http.Handler { // // The opts parameter is optional. If nil, default values are used. func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { + if opts == nil { + opts = &ServeConnOpts{} + } + s.serveConn(c, opts, nil) +} + +func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverConn)) { baseCtx, cancel := serverConnBaseContext(c, opts) defer cancel() + http1srv := opts.baseConfig() + conf := configFromServer(http1srv, s) sc := &serverConn{ srv: s, - hs: opts.baseConfig(), + hs: http1srv, conn: c, baseCtx: baseCtx, remoteAddrStr: c.RemoteAddr().String(), - bw: newBufferedWriter(c), + bw: newBufferedWriter(c, conf.WriteByteTimeout), handler: opts.handler(), streams: make(map[uint32]*stream), readFrameCh: make(chan readFrameResult), @@ -419,13 +446,19 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way doneServing: make(chan struct{}), clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value" - advMaxStreams: s.maxConcurrentStreams(), + advMaxStreams: conf.MaxConcurrentStreams, initialStreamSendWindowSize: initialWindowSize, + initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream, maxFrameSize: initialMaxFrameSize, + pingTimeout: conf.PingTimeout, + countErrorFunc: conf.CountError, serveG: newGoroutineLock(), pushEnabled: true, sawClientPreface: opts.SawClientPreface, } + if newf != nil { + newf(sc) + } s.state.registerConn(sc) defer s.state.unregisterConn(sc) @@ -451,15 +484,15 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { sc.flow.add(initialWindowSize) sc.inflow.init(initialWindowSize) sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) - sc.hpackEncoder.SetMaxDynamicTableSizeLimit(s.maxEncoderHeaderTableSize()) + sc.hpackEncoder.SetMaxDynamicTableSizeLimit(conf.MaxEncoderHeaderTableSize) fr := NewFramer(sc.bw, c) - if s.CountError != nil { - fr.countError = s.CountError + if conf.CountError != nil { + fr.countError = conf.CountError } - fr.ReadMetaHeaders = hpack.NewDecoder(s.maxDecoderHeaderTableSize(), nil) + fr.ReadMetaHeaders = hpack.NewDecoder(conf.MaxDecoderHeaderTableSize, nil) fr.MaxHeaderListSize = sc.maxHeaderListSize() - fr.SetMaxReadFrameSize(s.maxReadFrameSize()) + fr.SetMaxReadFrameSize(conf.MaxReadFrameSize) sc.framer = fr if tc, ok := c.(connectionStater); ok { @@ -492,7 +525,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { // So for now, do nothing here again. } - if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { + if !conf.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { // "Endpoints MAY choose to generate a connection error // (Section 5.4.1) of type INADEQUATE_SECURITY if one of // the prohibited cipher suites are negotiated." @@ -529,7 +562,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { opts.UpgradeRequest = nil } - sc.serve() + sc.serve(conf) } func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx context.Context, cancel func()) { @@ -569,6 +602,7 @@ type serverConn struct { tlsState *tls.ConnectionState // shared by all handlers, like net/http remoteAddrStr string writeSched WriteScheduler + countErrorFunc func(errType string) // Everything following is owned by the serve loop; use serveG.check(): serveG goroutineLock // used to verify funcs are on serve() @@ -588,6 +622,7 @@ type serverConn struct { streams map[uint32]*stream unstartedHandlers []unstartedHandler initialStreamSendWindowSize int32 + initialStreamRecvWindowSize int32 maxFrameSize int32 peerMaxHeaderListSize uint32 // zero means unknown (default) canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case @@ -598,9 +633,14 @@ type serverConn struct { inGoAway bool // we've started to or sent GOAWAY inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop needToSendGoAway bool // we need to schedule a GOAWAY frame write + pingSent bool + sentPingData [8]byte goAwayCode ErrCode shutdownTimer *time.Timer // nil until used idleTimer *time.Timer // nil if unused + readIdleTimeout time.Duration + pingTimeout time.Duration + readIdleTimer *time.Timer // nil if unused // Owned by the writeFrameAsync goroutine: headerWriteBuf bytes.Buffer @@ -615,11 +655,7 @@ func (sc *serverConn) maxHeaderListSize() uint32 { if n <= 0 { n = http.DefaultMaxHeaderBytes } - // http2's count is in a slightly different unit and includes 32 bytes per pair. - // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. - const perFieldOverhead = 32 // per http2 spec - const typicalHeaders = 10 // conservative - return uint32(n + typicalHeaders*perFieldOverhead) + return uint32(adjustHTTP1MaxHeaderSize(int64(n))) } func (sc *serverConn) curOpenStreams() uint32 { @@ -775,8 +811,7 @@ const maxCachedCanonicalHeadersKeysSize = 2048 func (sc *serverConn) canonicalHeader(v string) string { sc.serveG.check() - buildCommonHeaderMapsOnce() - cv, ok := commonCanonHeader[v] + cv, ok := httpcommon.CachedCanonicalHeader(v) if ok { return cv } @@ -811,8 +846,8 @@ type readFrameResult struct { // consumer is done with the frame. // It's run on its own goroutine. func (sc *serverConn) readFrames() { - gate := make(gate) - gateDone := gate.Done + gate := make(chan struct{}) + gateDone := func() { gate <- struct{}{} } for { f, err := sc.framer.ReadFrame() select { @@ -881,7 +916,7 @@ func (sc *serverConn) notePanic() { } } -func (sc *serverConn) serve() { +func (sc *serverConn) serve(conf http2Config) { sc.serveG.check() defer sc.notePanic() defer sc.conn.Close() @@ -893,20 +928,24 @@ func (sc *serverConn) serve() { sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs) } + settings := writeSettings{ + {SettingMaxFrameSize, conf.MaxReadFrameSize}, + {SettingMaxConcurrentStreams, sc.advMaxStreams}, + {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, + {SettingHeaderTableSize, conf.MaxDecoderHeaderTableSize}, + {SettingInitialWindowSize, uint32(sc.initialStreamRecvWindowSize)}, + } + if !disableExtendedConnectProtocol { + settings = append(settings, Setting{SettingEnableConnectProtocol, 1}) + } sc.writeFrame(FrameWriteRequest{ - write: writeSettings{ - {SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, - {SettingMaxConcurrentStreams, sc.advMaxStreams}, - {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, - {SettingHeaderTableSize, sc.srv.maxDecoderHeaderTableSize()}, - {SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())}, - }, + write: settings, }) sc.unackedSettings++ // Each connection starts with initialWindowSize inflow tokens. // If a higher value is configured, we add more tokens. - if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 { + if diff := conf.MaxUploadBufferPerConnection - initialWindowSize; diff > 0 { sc.sendWindowUpdate(nil, int(diff)) } @@ -926,11 +965,18 @@ func (sc *serverConn) serve() { defer sc.idleTimer.Stop() } + if conf.SendPingTimeout > 0 { + sc.readIdleTimeout = conf.SendPingTimeout + sc.readIdleTimer = time.AfterFunc(conf.SendPingTimeout, sc.onReadIdleTimer) + defer sc.readIdleTimer.Stop() + } + go sc.readFrames() // closed by defer sc.conn.Close above settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer) defer settingsTimer.Stop() + lastFrameTime := time.Now() loopNum := 0 for { loopNum++ @@ -944,6 +990,7 @@ func (sc *serverConn) serve() { case res := <-sc.wroteFrameCh: sc.wroteFrame(res) case res := <-sc.readFrameCh: + lastFrameTime = time.Now() // Process any written frames before reading new frames from the client since a // written frame could have triggered a new stream to be started. if sc.writingFrameAsync { @@ -975,6 +1022,8 @@ func (sc *serverConn) serve() { case idleTimerMsg: sc.vlogf("connection is idle") sc.goAway(ErrCodeNo) + case readIdleTimerMsg: + sc.handlePingTimer(lastFrameTime) case shutdownTimerMsg: sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr()) return @@ -997,7 +1046,7 @@ func (sc *serverConn) serve() { // If the peer is causing us to generate a lot of control frames, // but not reading them from us, assume they are trying to make us // run out of memory. - if sc.queuedControlFrames > sc.srv.maxQueuedControlFrames() { + if sc.queuedControlFrames > maxQueuedControlFrames { sc.vlogf("http2: too many control frames in send queue, closing connection") return } @@ -1013,12 +1062,42 @@ func (sc *serverConn) serve() { } } +func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) { + if sc.pingSent { + sc.logf("timeout waiting for PING response") + if f := sc.countErrorFunc; f != nil { + f("conn_close_lost_ping") + } + sc.conn.Close() + return + } + + pingAt := lastFrameReadTime.Add(sc.readIdleTimeout) + now := time.Now() + if pingAt.After(now) { + // We received frames since arming the ping timer. + // Reset it for the next possible timeout. + sc.readIdleTimer.Reset(pingAt.Sub(now)) + return + } + + sc.pingSent = true + // Ignore crypto/rand.Read errors: It generally can't fail, and worse case if it does + // is we send a PING frame containing 0s. + _, _ = rand.Read(sc.sentPingData[:]) + sc.writeFrame(FrameWriteRequest{ + write: &writePing{data: sc.sentPingData}, + }) + sc.readIdleTimer.Reset(sc.pingTimeout) +} + type serverMessage int // Message values sent to serveMsgCh. var ( settingsTimerMsg = new(serverMessage) idleTimerMsg = new(serverMessage) + readIdleTimerMsg = new(serverMessage) shutdownTimerMsg = new(serverMessage) gracefulShutdownMsg = new(serverMessage) handlerDoneMsg = new(serverMessage) @@ -1026,6 +1105,7 @@ var ( func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) } func (sc *serverConn) onIdleTimer() { sc.sendServeMsg(idleTimerMsg) } +func (sc *serverConn) onReadIdleTimer() { sc.sendServeMsg(readIdleTimerMsg) } func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) } func (sc *serverConn) sendServeMsg(msg interface{}) { @@ -1072,10 +1152,6 @@ func (sc *serverConn) readPreface() error { } } -var errChanPool = sync.Pool{ - New: func() interface{} { return make(chan error, 1) }, -} - var writeDataPool = sync.Pool{ New: func() interface{} { return new(writeData) }, } @@ -1083,7 +1159,7 @@ var writeDataPool = sync.Pool{ // writeDataFromHandler writes DATA response frames from a handler on // the given stream. func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error { - ch := errChanPool.Get().(chan error) + ch := sc.srv.state.getErrChan() writeArg := writeDataPool.Get().(*writeData) *writeArg = writeData{stream.id, data, endStream} err := sc.writeFrameFromHandler(FrameWriteRequest{ @@ -1115,7 +1191,7 @@ func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStrea return errStreamClosed } } - errChanPool.Put(ch) + sc.srv.state.putErrChan(ch) if frameWriteDone { writeDataPool.Put(writeArg) } @@ -1278,6 +1354,10 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) { sc.writingFrame = false sc.writingFrameAsync = false + if res.err != nil { + sc.conn.Close() + } + wr := res.wr if writeEndsStream(wr.write) { @@ -1552,6 +1632,11 @@ func (sc *serverConn) processFrame(f Frame) error { func (sc *serverConn) processPing(f *PingFrame) error { sc.serveG.check() if f.IsAck() { + if sc.pingSent && sc.sentPingData == f.Data { + // This is a response to a PING we sent. + sc.pingSent = false + sc.readIdleTimer.Reset(sc.readIdleTimeout) + } // 6.7 PING: " An endpoint MUST NOT respond to PING frames // containing this flag." return nil @@ -1639,7 +1724,7 @@ func (sc *serverConn) closeStream(st *stream, err error) { delete(sc.streams, st.id) if len(sc.streams) == 0 { sc.setConnState(http.StateIdle) - if sc.srv.IdleTimeout > 0 { + if sc.srv.IdleTimeout > 0 && sc.idleTimer != nil { sc.idleTimer.Reset(sc.srv.IdleTimeout) } if h1ServerKeepAlivesDisabled(sc.hs) { @@ -1661,6 +1746,7 @@ func (sc *serverConn) closeStream(st *stream, err error) { } } st.closeErr = err + st.cancelCtx() st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc sc.writeSched.CloseStream(st.id) } @@ -1714,6 +1800,9 @@ func (sc *serverConn) processSetting(s Setting) error { sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31 case SettingMaxHeaderListSize: sc.peerMaxHeaderListSize = s.Val + case SettingEnableConnectProtocol: + // Receipt of this parameter by a server does not + // have any impact default: // Unknown setting: "An endpoint that receives a SETTINGS // frame with any unknown or unsupported identifier MUST @@ -2117,7 +2206,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream st.cw.Init() st.flow.conn = &sc.flow // link to conn-level counter st.flow.add(sc.initialStreamSendWindowSize) - st.inflow.init(sc.srv.initialStreamRecvWindowSize()) + st.inflow.init(sc.initialStreamRecvWindowSize) if sc.hs.WriteTimeout > 0 { st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) } @@ -2139,19 +2228,25 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) { sc.serveG.check() - rp := requestParam{ - method: f.PseudoValue("method"), - scheme: f.PseudoValue("scheme"), - authority: f.PseudoValue("authority"), - path: f.PseudoValue("path"), + rp := httpcommon.ServerRequestParam{ + Method: f.PseudoValue("method"), + Scheme: f.PseudoValue("scheme"), + Authority: f.PseudoValue("authority"), + Path: f.PseudoValue("path"), + Protocol: f.PseudoValue("protocol"), } - isConnect := rp.method == "CONNECT" + // extended connect is disabled, so we should not see :protocol + if disableExtendedConnectProtocol && rp.Protocol != "" { + return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol)) + } + + isConnect := rp.Method == "CONNECT" if isConnect { - if rp.path != "" || rp.scheme != "" || rp.authority == "" { + if rp.Protocol == "" && (rp.Path != "" || rp.Scheme != "" || rp.Authority == "") { return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol)) } - } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") { + } else if rp.Method == "" || rp.Path == "" || (rp.Scheme != "https" && rp.Scheme != "http") { // See 8.1.2.6 Malformed Requests and Responses: // // Malformed requests or responses that are detected @@ -2165,12 +2260,16 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res return nil, nil, sc.countError("bad_path_method", streamError(f.StreamID, ErrCodeProtocol)) } - rp.header = make(http.Header) + header := make(http.Header) + rp.Header = header for _, hf := range f.RegularFields() { - rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value) + header.Add(sc.canonicalHeader(hf.Name), hf.Value) } - if rp.authority == "" { - rp.authority = rp.header.Get("Host") + if rp.Authority == "" { + rp.Authority = header.Get("Host") + } + if rp.Protocol != "" { + header.Set(":protocol", rp.Protocol) } rw, req, err := sc.newWriterAndRequestNoBody(st, rp) @@ -2179,7 +2278,7 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res } bodyOpen := !f.StreamEnded() if bodyOpen { - if vv, ok := rp.header["Content-Length"]; ok { + if vv, ok := rp.Header["Content-Length"]; ok { if cl, err := strconv.ParseUint(vv[0], 10, 63); err == nil { req.ContentLength = int64(cl) } else { @@ -2195,83 +2294,38 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res return rw, req, nil } -type requestParam struct { - method string - scheme, authority, path string - header http.Header -} - -func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*responseWriter, *http.Request, error) { +func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp httpcommon.ServerRequestParam) (*responseWriter, *http.Request, error) { sc.serveG.check() var tlsState *tls.ConnectionState // nil if not scheme https - if rp.scheme == "https" { + if rp.Scheme == "https" { tlsState = sc.tlsState } - needsContinue := httpguts.HeaderValuesContainsToken(rp.header["Expect"], "100-continue") - if needsContinue { - rp.header.Del("Expect") - } - // Merge Cookie headers into one "; "-delimited value. - if cookies := rp.header["Cookie"]; len(cookies) > 1 { - rp.header.Set("Cookie", strings.Join(cookies, "; ")) - } - - // Setup Trailers - var trailer http.Header - for _, v := range rp.header["Trailer"] { - for _, key := range strings.Split(v, ",") { - key = http.CanonicalHeaderKey(textproto.TrimString(key)) - switch key { - case "Transfer-Encoding", "Trailer", "Content-Length": - // Bogus. (copy of http1 rules) - // Ignore. - default: - if trailer == nil { - trailer = make(http.Header) - } - trailer[key] = nil - } - } - } - delete(rp.header, "Trailer") - - var url_ *url.URL - var requestURI string - if rp.method == "CONNECT" { - url_ = &url.URL{Host: rp.authority} - requestURI = rp.authority // mimic HTTP/1 server behavior - } else { - var err error - url_, err = url.ParseRequestURI(rp.path) - if err != nil { - return nil, nil, sc.countError("bad_path", streamError(st.id, ErrCodeProtocol)) - } - requestURI = rp.path + res := httpcommon.NewServerRequest(rp) + if res.InvalidReason != "" { + return nil, nil, sc.countError(res.InvalidReason, streamError(st.id, ErrCodeProtocol)) } body := &requestBody{ conn: sc, stream: st, - needsContinue: needsContinue, + needsContinue: res.NeedsContinue, } - req := &http.Request{ - Method: rp.method, - URL: url_, + req := (&http.Request{ + Method: rp.Method, + URL: res.URL, RemoteAddr: sc.remoteAddrStr, - Header: rp.header, - RequestURI: requestURI, + Header: rp.Header, + RequestURI: res.RequestURI, Proto: "HTTP/2.0", ProtoMajor: 2, ProtoMinor: 0, TLS: tlsState, - Host: rp.authority, + Host: rp.Authority, Body: body, - Trailer: trailer, - } - req = req.WithContext(st.ctx) - + Trailer: res.Trailer, + }).WithContext(st.ctx) rw := sc.newResponseWriter(st, req) return rw, req, nil } @@ -2391,7 +2445,7 @@ func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) erro // waiting for this frame to be written, so an http.Flush mid-handler // writes out the correct value of keys, before a handler later potentially // mutates it. - errc = errChanPool.Get().(chan error) + errc = sc.srv.state.getErrChan() } if err := sc.writeFrameFromHandler(FrameWriteRequest{ write: headerData, @@ -2403,7 +2457,7 @@ func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) erro if errc != nil { select { case err := <-errc: - errChanPool.Put(errc) + sc.srv.state.putErrChan(errc) return err case <-sc.doneServing: return errClientDisconnected @@ -2510,7 +2564,7 @@ func (b *requestBody) Read(p []byte) (n int, err error) { if err == io.EOF { b.sawEOF = true } - if b.conn == nil && inTests { + if b.conn == nil { return } b.conn.noteBodyReadFromHandler(b.stream, n, err) @@ -2811,6 +2865,11 @@ func (w *responseWriter) SetWriteDeadline(deadline time.Time) error { return nil } +func (w *responseWriter) EnableFullDuplex() error { + // We always support full duplex responses, so this is a no-op. + return nil +} + func (w *responseWriter) Flush() { w.FlushError() } @@ -3079,7 +3138,7 @@ func (w *responseWriter) Push(target string, opts *http.PushOptions) error { method: opts.Method, url: u, header: cloneHeader(opts.Header), - done: errChanPool.Get().(chan error), + done: sc.srv.state.getErrChan(), } select { @@ -3096,7 +3155,7 @@ func (w *responseWriter) Push(target string, opts *http.PushOptions) error { case <-st.cw: return errStreamClosed case err := <-msg.done: - errChanPool.Put(msg.done) + sc.srv.state.putErrChan(msg.done) return err } } @@ -3160,12 +3219,12 @@ func (sc *serverConn) startPush(msg *startPushRequest) { // we start in "half closed (remote)" for simplicity. // See further comments at the definition of stateHalfClosedRemote. promised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote) - rw, req, err := sc.newWriterAndRequestNoBody(promised, requestParam{ - method: msg.method, - scheme: msg.url.Scheme, - authority: msg.url.Host, - path: msg.url.RequestURI(), - header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE + rw, req, err := sc.newWriterAndRequestNoBody(promised, httpcommon.ServerRequestParam{ + Method: msg.method, + Scheme: msg.url.Scheme, + Authority: msg.url.Host, + Path: msg.url.RequestURI(), + Header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE }) if err != nil { // Should not happen, since we've already validated msg.url. @@ -3257,7 +3316,7 @@ func (sc *serverConn) countError(name string, err error) error { if sc == nil || sc.srv == nil { return err } - f := sc.srv.CountError + f := sc.countErrorFunc if f == nil { return err } diff --git a/vendor/golang.org/x/net/http2/testsync.go b/vendor/golang.org/x/net/http2/testsync.go deleted file mode 100644 index 61075bd1..00000000 --- a/vendor/golang.org/x/net/http2/testsync.go +++ /dev/null @@ -1,331 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -package http2 - -import ( - "context" - "sync" - "time" -) - -// testSyncHooks coordinates goroutines in tests. -// -// For example, a call to ClientConn.RoundTrip involves several goroutines, including: -// - the goroutine running RoundTrip; -// - the clientStream.doRequest goroutine, which writes the request; and -// - the clientStream.readLoop goroutine, which reads the response. -// -// Using testSyncHooks, a test can start a RoundTrip and identify when all these goroutines -// are blocked waiting for some condition such as reading the Request.Body or waiting for -// flow control to become available. -// -// The testSyncHooks also manage timers and synthetic time in tests. -// This permits us to, for example, start a request and cause it to time out waiting for -// response headers without resorting to time.Sleep calls. -type testSyncHooks struct { - // active/inactive act as a mutex and condition variable. - // - // - neither chan contains a value: testSyncHooks is locked. - // - active contains a value: unlocked, and at least one goroutine is not blocked - // - inactive contains a value: unlocked, and all goroutines are blocked - active chan struct{} - inactive chan struct{} - - // goroutine counts - total int // total goroutines - condwait map[*sync.Cond]int // blocked in sync.Cond.Wait - blocked []*testBlockedGoroutine // otherwise blocked - - // fake time - now time.Time - timers []*fakeTimer - - // Transport testing: Report various events. - newclientconn func(*ClientConn) - newstream func(*clientStream) -} - -// testBlockedGoroutine is a blocked goroutine. -type testBlockedGoroutine struct { - f func() bool // blocked until f returns true - ch chan struct{} // closed when unblocked -} - -func newTestSyncHooks() *testSyncHooks { - h := &testSyncHooks{ - active: make(chan struct{}, 1), - inactive: make(chan struct{}, 1), - condwait: map[*sync.Cond]int{}, - } - h.inactive <- struct{}{} - h.now = time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC) - return h -} - -// lock acquires the testSyncHooks mutex. -func (h *testSyncHooks) lock() { - select { - case <-h.active: - case <-h.inactive: - } -} - -// waitInactive waits for all goroutines to become inactive. -func (h *testSyncHooks) waitInactive() { - for { - <-h.inactive - if !h.unlock() { - break - } - } -} - -// unlock releases the testSyncHooks mutex. -// It reports whether any goroutines are active. -func (h *testSyncHooks) unlock() (active bool) { - // Look for a blocked goroutine which can be unblocked. - blocked := h.blocked[:0] - unblocked := false - for _, b := range h.blocked { - if !unblocked && b.f() { - unblocked = true - close(b.ch) - } else { - blocked = append(blocked, b) - } - } - h.blocked = blocked - - // Count goroutines blocked on condition variables. - condwait := 0 - for _, count := range h.condwait { - condwait += count - } - - if h.total > condwait+len(blocked) { - h.active <- struct{}{} - return true - } else { - h.inactive <- struct{}{} - return false - } -} - -// goRun starts a new goroutine. -func (h *testSyncHooks) goRun(f func()) { - h.lock() - h.total++ - h.unlock() - go func() { - defer func() { - h.lock() - h.total-- - h.unlock() - }() - f() - }() -} - -// blockUntil indicates that a goroutine is blocked waiting for some condition to become true. -// It waits until f returns true before proceeding. -// -// Example usage: -// -// h.blockUntil(func() bool { -// // Is the context done yet? -// select { -// case <-ctx.Done(): -// default: -// return false -// } -// return true -// }) -// // Wait for the context to become done. -// <-ctx.Done() -// -// The function f passed to blockUntil must be non-blocking and idempotent. -func (h *testSyncHooks) blockUntil(f func() bool) { - if f() { - return - } - ch := make(chan struct{}) - h.lock() - h.blocked = append(h.blocked, &testBlockedGoroutine{ - f: f, - ch: ch, - }) - h.unlock() - <-ch -} - -// broadcast is sync.Cond.Broadcast. -func (h *testSyncHooks) condBroadcast(cond *sync.Cond) { - h.lock() - delete(h.condwait, cond) - h.unlock() - cond.Broadcast() -} - -// broadcast is sync.Cond.Wait. -func (h *testSyncHooks) condWait(cond *sync.Cond) { - h.lock() - h.condwait[cond]++ - h.unlock() -} - -// newTimer creates a new fake timer. -func (h *testSyncHooks) newTimer(d time.Duration) timer { - h.lock() - defer h.unlock() - t := &fakeTimer{ - hooks: h, - when: h.now.Add(d), - c: make(chan time.Time), - } - h.timers = append(h.timers, t) - return t -} - -// afterFunc creates a new fake AfterFunc timer. -func (h *testSyncHooks) afterFunc(d time.Duration, f func()) timer { - h.lock() - defer h.unlock() - t := &fakeTimer{ - hooks: h, - when: h.now.Add(d), - f: f, - } - h.timers = append(h.timers, t) - return t -} - -func (h *testSyncHooks) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { - ctx, cancel := context.WithCancel(ctx) - t := h.afterFunc(d, cancel) - return ctx, func() { - t.Stop() - cancel() - } -} - -func (h *testSyncHooks) timeUntilEvent() time.Duration { - h.lock() - defer h.unlock() - var next time.Time - for _, t := range h.timers { - if next.IsZero() || t.when.Before(next) { - next = t.when - } - } - if d := next.Sub(h.now); d > 0 { - return d - } - return 0 -} - -// advance advances time and causes synthetic timers to fire. -func (h *testSyncHooks) advance(d time.Duration) { - h.lock() - defer h.unlock() - h.now = h.now.Add(d) - timers := h.timers[:0] - for _, t := range h.timers { - t := t // remove after go.mod depends on go1.22 - t.mu.Lock() - switch { - case t.when.After(h.now): - timers = append(timers, t) - case t.when.IsZero(): - // stopped timer - default: - t.when = time.Time{} - if t.c != nil { - close(t.c) - } - if t.f != nil { - h.total++ - go func() { - defer func() { - h.lock() - h.total-- - h.unlock() - }() - t.f() - }() - } - } - t.mu.Unlock() - } - h.timers = timers -} - -// A timer wraps a time.Timer, or a synthetic equivalent in tests. -// Unlike time.Timer, timer is single-use: The timer channel is closed when the timer expires. -type timer interface { - C() <-chan time.Time - Stop() bool - Reset(d time.Duration) bool -} - -// timeTimer implements timer using real time. -type timeTimer struct { - t *time.Timer - c chan time.Time -} - -// newTimeTimer creates a new timer using real time. -func newTimeTimer(d time.Duration) timer { - ch := make(chan time.Time) - t := time.AfterFunc(d, func() { - close(ch) - }) - return &timeTimer{t, ch} -} - -// newTimeAfterFunc creates an AfterFunc timer using real time. -func newTimeAfterFunc(d time.Duration, f func()) timer { - return &timeTimer{ - t: time.AfterFunc(d, f), - } -} - -func (t timeTimer) C() <-chan time.Time { return t.c } -func (t timeTimer) Stop() bool { return t.t.Stop() } -func (t timeTimer) Reset(d time.Duration) bool { return t.t.Reset(d) } - -// fakeTimer implements timer using fake time. -type fakeTimer struct { - hooks *testSyncHooks - - mu sync.Mutex - when time.Time // when the timer will fire - c chan time.Time // closed when the timer fires; mutually exclusive with f - f func() // called when the timer fires; mutually exclusive with c -} - -func (t *fakeTimer) C() <-chan time.Time { return t.c } - -func (t *fakeTimer) Stop() bool { - t.mu.Lock() - defer t.mu.Unlock() - stopped := t.when.IsZero() - t.when = time.Time{} - return stopped -} - -func (t *fakeTimer) Reset(d time.Duration) bool { - if t.c != nil || t.f == nil { - panic("fakeTimer only supports Reset on AfterFunc timers") - } - t.mu.Lock() - defer t.mu.Unlock() - t.hooks.lock() - defer t.hooks.unlock() - active := !t.when.IsZero() - t.when = t.hooks.now.Add(d) - if !active { - t.hooks.timers = append(t.hooks.timers, t) - } - return active -} diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 2fa49490..ccb87e6d 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -9,6 +9,7 @@ package http2 import ( "bufio" "bytes" + "compress/flate" "compress/gzip" "context" "crypto/rand" @@ -25,8 +26,6 @@ import ( "net/http" "net/http/httptrace" "net/textproto" - "os" - "sort" "strconv" "strings" "sync" @@ -36,6 +35,7 @@ import ( "golang.org/x/net/http/httpguts" "golang.org/x/net/http2/hpack" "golang.org/x/net/idna" + "golang.org/x/net/internal/httpcommon" ) const ( @@ -185,44 +185,38 @@ type Transport struct { connPoolOnce sync.Once connPoolOrDef ClientConnPool // non-nil version of ConnPool - syncHooks *testSyncHooks + *transportTestHooks +} + +// Hook points used for testing. +// Outside of tests, t.transportTestHooks is nil and these all have minimal implementations. +// Inside tests, see the testSyncHooks function docs. + +type transportTestHooks struct { + newclientconn func(*ClientConn) } func (t *Transport) maxHeaderListSize() uint32 { - if t.MaxHeaderListSize == 0 { + n := int64(t.MaxHeaderListSize) + if t.t1 != nil && t.t1.MaxResponseHeaderBytes != 0 { + n = t.t1.MaxResponseHeaderBytes + if n > 0 { + n = adjustHTTP1MaxHeaderSize(n) + } + } + if n <= 0 { return 10 << 20 } - if t.MaxHeaderListSize == 0xffffffff { + if n >= 0xffffffff { return 0 } - return t.MaxHeaderListSize -} - -func (t *Transport) maxFrameReadSize() uint32 { - if t.MaxReadFrameSize == 0 { - return 0 // use the default provided by the peer - } - if t.MaxReadFrameSize < minMaxFrameSize { - return minMaxFrameSize - } - if t.MaxReadFrameSize > maxFrameSize { - return maxFrameSize - } - return t.MaxReadFrameSize + return uint32(n) } func (t *Transport) disableCompression() bool { return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) } -func (t *Transport) pingTimeout() time.Duration { - if t.PingTimeout == 0 { - return 15 * time.Second - } - return t.PingTimeout - -} - // ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. // It returns an error if t1 has already been HTTP/2-enabled. // @@ -258,8 +252,8 @@ func configureTransports(t1 *http.Transport) (*Transport, error) { if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") { t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1") } - upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper { - addr := authorityAddr("https", authority) + upgradeFn := func(scheme, authority string, c net.Conn) http.RoundTripper { + addr := authorityAddr(scheme, authority) if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil { go c.Close() return erringRoundTripper{err} @@ -270,18 +264,37 @@ func configureTransports(t1 *http.Transport) (*Transport, error) { // was unknown) go c.Close() } + if scheme == "http" { + return (*unencryptedTransport)(t2) + } return t2 } - if m := t1.TLSNextProto; len(m) == 0 { - t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{ - "h2": upgradeFn, + if t1.TLSNextProto == nil { + t1.TLSNextProto = make(map[string]func(string, *tls.Conn) http.RoundTripper) + } + t1.TLSNextProto[NextProtoTLS] = func(authority string, c *tls.Conn) http.RoundTripper { + return upgradeFn("https", authority, c) + } + // The "unencrypted_http2" TLSNextProto key is used to pass off non-TLS HTTP/2 conns. + t1.TLSNextProto[nextProtoUnencryptedHTTP2] = func(authority string, c *tls.Conn) http.RoundTripper { + nc, err := unencryptedNetConnFromTLSConn(c) + if err != nil { + go c.Close() + return erringRoundTripper{err} } - } else { - m["h2"] = upgradeFn + return upgradeFn("http", authority, nc) } return t2, nil } +// unencryptedTransport is a Transport with a RoundTrip method that +// always permits http:// URLs. +type unencryptedTransport Transport + +func (t *unencryptedTransport) RoundTrip(req *http.Request) (*http.Response, error) { + return (*Transport)(t).RoundTripOpt(req, RoundTripOpt{allowHTTP: true}) +} + func (t *Transport) connPool() ClientConnPool { t.connPoolOnce.Do(t.initConnPool) return t.connPoolOrDef @@ -301,7 +314,7 @@ type ClientConn struct { t *Transport tconn net.Conn // usually *tls.Conn, except specialized impls tlsState *tls.ConnectionState // nil only for specialized impls - reused uint32 // whether conn is being reused; atomic + atomicReused uint32 // whether conn is being reused; atomic singleUse bool // whether being used for a single http.Request getConnCalled bool // used by clientConnPool @@ -310,39 +323,77 @@ type ClientConn struct { readerErr error // set before readerDone is closed idleTimeout time.Duration // or 0 for never - idleTimer timer + idleTimer *time.Timer - mu sync.Mutex // guards following - cond *sync.Cond // hold mu; broadcast on flow/closed changes - flow outflow // our conn-level flow control quota (cs.outflow is per stream) - inflow inflow // peer's conn-level flow control - doNotReuse bool // whether conn is marked to not be reused for any future requests - closing bool - closed bool - seenSettings bool // true if we've seen a settings frame, false otherwise - wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back - goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received - goAwayDebug string // goAway frame's debug data, retained as a string - streams map[uint32]*clientStream // client-initiated - streamsReserved int // incr by ReserveNewRequest; decr on RoundTrip - nextStreamID uint32 - pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams - pings map[[8]byte]chan struct{} // in flight ping data to notification channel - br *bufio.Reader - lastActive time.Time - lastIdle time.Time // time last idle + mu sync.Mutex // guards following + cond *sync.Cond // hold mu; broadcast on flow/closed changes + flow outflow // our conn-level flow control quota (cs.outflow is per stream) + inflow inflow // peer's conn-level flow control + doNotReuse bool // whether conn is marked to not be reused for any future requests + closing bool + closed bool + closedOnIdle bool // true if conn was closed for idleness + seenSettings bool // true if we've seen a settings frame, false otherwise + seenSettingsChan chan struct{} // closed when seenSettings is true or frame reading fails + wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back + goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received + goAwayDebug string // goAway frame's debug data, retained as a string + streams map[uint32]*clientStream // client-initiated + streamsReserved int // incr by ReserveNewRequest; decr on RoundTrip + nextStreamID uint32 + pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams + pings map[[8]byte]chan struct{} // in flight ping data to notification channel + br *bufio.Reader + lastActive time.Time + lastIdle time.Time // time last idle // Settings from peer: (also guarded by wmu) - maxFrameSize uint32 - maxConcurrentStreams uint32 - peerMaxHeaderListSize uint64 - peerMaxHeaderTableSize uint32 - initialWindowSize uint32 + maxFrameSize uint32 + maxConcurrentStreams uint32 + peerMaxHeaderListSize uint64 + peerMaxHeaderTableSize uint32 + initialWindowSize uint32 + initialStreamRecvWindowSize int32 + readIdleTimeout time.Duration + pingTimeout time.Duration + extendedConnectAllowed bool + strictMaxConcurrentStreams bool + + // rstStreamPingsBlocked works around an unfortunate gRPC behavior. + // gRPC strictly limits the number of PING frames that it will receive. + // The default is two pings per two hours, but the limit resets every time + // the gRPC endpoint sends a HEADERS or DATA frame. See golang/go#70575. + // + // rstStreamPingsBlocked is set after receiving a response to a PING frame + // bundled with an RST_STREAM (see pendingResets below), and cleared after + // receiving a HEADERS or DATA frame. + rstStreamPingsBlocked bool + + // pendingResets is the number of RST_STREAM frames we have sent to the peer, + // without confirming that the peer has received them. When we send a RST_STREAM, + // we bundle it with a PING frame, unless a PING is already in flight. We count + // the reset stream against the connection's concurrency limit until we get + // a PING response. This limits the number of requests we'll try to send to a + // completely unresponsive connection. + pendingResets int + + // readBeforeStreamID is the smallest stream ID that has not been followed by + // a frame read from the peer. We use this to determine when a request may + // have been sent to a completely unresponsive connection: + // If the request ID is less than readBeforeStreamID, then we have had some + // indication of life on the connection since sending the request. + readBeforeStreamID uint32 // reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests. // Write to reqHeaderMu to lock it, read from it to unlock. // Lock reqmu BEFORE mu or wmu. reqHeaderMu chan struct{} + // internalStateHook reports state changes back to the net/http.ClientConn. + // Note that this is different from the user state hook registered by + // net/http.ClientConn.SetStateHook: The internal hook calls ClientConn, + // which calls the user hook. + internalStateHook func() + // wmu is held while writing. // Acquire BEFORE mu when holding both, to avoid blocking mu on network writes. // Only acquire both at the same time when changing peer settings. @@ -352,60 +403,6 @@ type ClientConn struct { werr error // first write error that has occurred hbuf bytes.Buffer // HPACK encoder writes into this henc *hpack.Encoder - - syncHooks *testSyncHooks // can be nil -} - -// Hook points used for testing. -// Outside of tests, cc.syncHooks is nil and these all have minimal implementations. -// Inside tests, see the testSyncHooks function docs. - -// goRun starts a new goroutine. -func (cc *ClientConn) goRun(f func()) { - if cc.syncHooks != nil { - cc.syncHooks.goRun(f) - return - } - go f() -} - -// condBroadcast is cc.cond.Broadcast. -func (cc *ClientConn) condBroadcast() { - if cc.syncHooks != nil { - cc.syncHooks.condBroadcast(cc.cond) - } - cc.cond.Broadcast() -} - -// condWait is cc.cond.Wait. -func (cc *ClientConn) condWait() { - if cc.syncHooks != nil { - cc.syncHooks.condWait(cc.cond) - } - cc.cond.Wait() -} - -// newTimer creates a new time.Timer, or a synthetic timer in tests. -func (cc *ClientConn) newTimer(d time.Duration) timer { - if cc.syncHooks != nil { - return cc.syncHooks.newTimer(d) - } - return newTimeTimer(d) -} - -// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests. -func (cc *ClientConn) afterFunc(d time.Duration, f func()) timer { - if cc.syncHooks != nil { - return cc.syncHooks.afterFunc(d, f) - } - return newTimeAfterFunc(d, f) -} - -func (cc *ClientConn) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { - if cc.syncHooks != nil { - return cc.syncHooks.contextWithTimeout(ctx, d) - } - return context.WithTimeout(ctx, d) } // clientStream is the state for a single HTTP/2 stream. One of these @@ -448,12 +445,12 @@ type clientStream struct { sentHeaders bool // owned by clientConnReadLoop: - firstByte bool // got the first response byte - pastHeaders bool // got first MetaHeadersFrame (actual headers) - pastTrailers bool // got optional second MetaHeadersFrame (trailers) - num1xx uint8 // number of 1xx responses seen - readClosed bool // peer sent an END_STREAM flag - readAborted bool // read loop reset the stream + firstByte bool // got the first response byte + pastHeaders bool // got first MetaHeadersFrame (actual headers) + pastTrailers bool // got optional second MetaHeadersFrame (trailers) + readClosed bool // peer sent an END_STREAM flag + readAborted bool // read loop reset the stream + totalHeaderSize int64 // total size of 1xx headers seen trailer http.Header // accumulated trailers resTrailer *http.Header // client's Response.Trailer @@ -487,7 +484,7 @@ func (cs *clientStream) abortStreamLocked(err error) { // TODO(dneil): Clean up tests where cs.cc.cond is nil. if cs.cc.cond != nil { // Wake up writeRequestBody if it is waiting on flow control. - cs.cc.condBroadcast() + cs.cc.cond.Broadcast() } } @@ -497,7 +494,7 @@ func (cs *clientStream) abortRequestBodyWrite() { defer cc.mu.Unlock() if cs.reqBody != nil && cs.reqBodyClosed == nil { cs.closeReqBodyLocked() - cc.condBroadcast() + cc.cond.Broadcast() } } @@ -507,10 +504,10 @@ func (cs *clientStream) closeReqBodyLocked() { } cs.reqBodyClosed = make(chan struct{}) reqBodyClosed := cs.reqBodyClosed - cs.cc.goRun(func() { + go func() { cs.reqBody.Close() close(reqBodyClosed) - }) + }() } type stickyErrWriter struct { @@ -523,22 +520,9 @@ func (sew stickyErrWriter) Write(p []byte) (n int, err error) { if *sew.err != nil { return 0, *sew.err } - for { - if sew.timeout != 0 { - sew.conn.SetWriteDeadline(time.Now().Add(sew.timeout)) - } - nn, err := sew.conn.Write(p[n:]) - n += nn - if n < len(p) && nn > 0 && errors.Is(err, os.ErrDeadlineExceeded) { - // Keep extending the deadline so long as we're making progress. - continue - } - if sew.timeout != 0 { - sew.conn.SetWriteDeadline(time.Time{}) - } - *sew.err = err - return n, err - } + n, err = writeWithByteTimeout(sew.conn, sew.timeout, p) + *sew.err = err + return n, err } // noCachedConnError is the concrete type of ErrNoCachedConn, which @@ -569,6 +553,8 @@ type RoundTripOpt struct { // no cached connection is available, RoundTripOpt // will return ErrNoCachedConn. OnlyCachedConn bool + + allowHTTP bool // allow http:// URLs } func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { @@ -601,7 +587,14 @@ func authorityAddr(scheme string, authority string) (addr string) { // RoundTripOpt is like RoundTrip, but takes options. func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { - if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { + switch req.URL.Scheme { + case "https": + // Always okay. + case "http": + if !t.AllowHTTP && !opt.allowHTTP { + return nil, errors.New("http2: unencrypted HTTP/2 not enabled") + } + default: return nil, errors.New("http2: unsupported scheme") } @@ -612,7 +605,7 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err) return nil, err } - reused := !atomic.CompareAndSwapUint32(&cc.reused, 0, 1) + reused := !atomic.CompareAndSwapUint32(&cc.atomicReused, 0, 1) traceGotConn(req, cc, reused) res, err := cc.RoundTrip(req) if err != nil && retry <= 6 { @@ -626,23 +619,9 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res backoff := float64(uint(1) << (uint(retry) - 1)) backoff += backoff * (0.1 * mathrand.Float64()) d := time.Second * time.Duration(backoff) - var tm timer - if t.syncHooks != nil { - tm = t.syncHooks.newTimer(d) - t.syncHooks.blockUntil(func() bool { - select { - case <-tm.C(): - case <-req.Context().Done(): - default: - return false - } - return true - }) - } else { - tm = newTimeTimer(d) - } + tm := time.NewTimer(d) select { - case <-tm.C(): + case <-tm.C: t.vlogf("RoundTrip retrying after failure: %v", roundTripErr) continue case <-req.Context().Done(): @@ -651,6 +630,22 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res } } } + if err == errClientConnNotEstablished { + // This ClientConn was created recently, + // this is the first request to use it, + // and the connection is closed and not usable. + // + // In this state, cc.idleTimer will remove the conn from the pool + // when it fires. Stop the timer and remove it here so future requests + // won't try to use this connection. + // + // If the timer has already fired and we're racing it, the redundant + // call to MarkDead is harmless. + if cc.idleTimer != nil { + cc.idleTimer.Stop() + } + t.connPool().MarkDead(cc) + } if err != nil { t.vlogf("RoundTrip failure: %v", err) return nil, err @@ -669,9 +664,11 @@ func (t *Transport) CloseIdleConnections() { } var ( - errClientConnClosed = errors.New("http2: client conn is closed") - errClientConnUnusable = errors.New("http2: client conn not usable") - errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") + errClientConnClosed = errors.New("http2: client conn is closed") + errClientConnUnusable = errors.New("http2: client conn not usable") + errClientConnNotEstablished = errors.New("http2: client conn could not be established") + errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") + errClientConnForceClosed = errors.New("http2: client connection force closed via ClientConn.Close") ) // shouldRetryRequest is called by RoundTrip when a request fails to get @@ -725,8 +722,8 @@ func canRetryError(err error) bool { } func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse bool) (*ClientConn, error) { - if t.syncHooks != nil { - return t.newClientConn(nil, singleUse, t.syncHooks) + if t.transportTestHooks != nil { + return t.newClientConn(nil, singleUse, nil) } host, _, err := net.SplitHostPort(addr) if err != nil { @@ -787,49 +784,38 @@ func (t *Transport) expectContinueTimeout() time.Duration { return t.t1.ExpectContinueTimeout } -func (t *Transport) maxDecoderHeaderTableSize() uint32 { - if v := t.MaxDecoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - -func (t *Transport) maxEncoderHeaderTableSize() uint32 { - if v := t.MaxEncoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { return t.newClientConn(c, t.disableKeepAlives(), nil) } -func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHooks) (*ClientConn, error) { +func (t *Transport) newClientConn(c net.Conn, singleUse bool, internalStateHook func()) (*ClientConn, error) { + conf := configFromTransport(t) cc := &ClientConn{ - t: t, - tconn: c, - readerDone: make(chan struct{}), - nextStreamID: 1, - maxFrameSize: 16 << 10, // spec default - initialWindowSize: 65535, // spec default - maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings. - peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. - streams: make(map[uint32]*clientStream), - singleUse: singleUse, - wantSettingsAck: true, - pings: make(map[[8]byte]chan struct{}), - reqHeaderMu: make(chan struct{}, 1), - syncHooks: hooks, + t: t, + tconn: c, + readerDone: make(chan struct{}), + nextStreamID: 1, + maxFrameSize: 16 << 10, // spec default + initialWindowSize: 65535, // spec default + initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream, + maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings. + strictMaxConcurrentStreams: conf.StrictMaxConcurrentRequests, + peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. + streams: make(map[uint32]*clientStream), + singleUse: singleUse, + seenSettingsChan: make(chan struct{}), + wantSettingsAck: true, + readIdleTimeout: conf.SendPingTimeout, + pingTimeout: conf.PingTimeout, + pings: make(map[[8]byte]chan struct{}), + reqHeaderMu: make(chan struct{}, 1), + lastActive: time.Now(), + internalStateHook: internalStateHook, } - if hooks != nil { - hooks.newclientconn(cc) + if t.transportTestHooks != nil { + t.transportTestHooks.newclientconn(cc) c = cc.tconn } - if d := t.idleConnTimeout(); d != 0 { - cc.idleTimeout = d - cc.idleTimer = cc.afterFunc(d, cc.onIdleTimeout) - } if VerboseLogs { t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) } @@ -841,29 +827,23 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHoo // MTU + crypto/tls record padding. cc.bw = bufio.NewWriter(stickyErrWriter{ conn: c, - timeout: t.WriteByteTimeout, + timeout: conf.WriteByteTimeout, err: &cc.werr, }) cc.br = bufio.NewReader(c) cc.fr = NewFramer(cc.bw, cc.br) - if t.maxFrameReadSize() != 0 { - cc.fr.SetMaxReadFrameSize(t.maxFrameReadSize()) - } + cc.fr.SetMaxReadFrameSize(conf.MaxReadFrameSize) if t.CountError != nil { cc.fr.countError = t.CountError } - maxHeaderTableSize := t.maxDecoderHeaderTableSize() + maxHeaderTableSize := conf.MaxDecoderHeaderTableSize cc.fr.ReadMetaHeaders = hpack.NewDecoder(maxHeaderTableSize, nil) cc.fr.MaxHeaderListSize = t.maxHeaderListSize() cc.henc = hpack.NewEncoder(&cc.hbuf) - cc.henc.SetMaxDynamicTableSizeLimit(t.maxEncoderHeaderTableSize()) + cc.henc.SetMaxDynamicTableSizeLimit(conf.MaxEncoderHeaderTableSize) cc.peerMaxHeaderTableSize = initialHeaderTableSize - if t.AllowHTTP { - cc.nextStreamID = 3 - } - if cs, ok := c.(connectionStater); ok { state := cs.ConnectionState() cc.tlsState = &state @@ -871,11 +851,9 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHoo initialSettings := []Setting{ {ID: SettingEnablePush, Val: 0}, - {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, - } - if max := t.maxFrameReadSize(); max != 0 { - initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: max}) + {ID: SettingInitialWindowSize, Val: uint32(cc.initialStreamRecvWindowSize)}, } + initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: conf.MaxReadFrameSize}) if max := t.maxHeaderListSize(); max != 0 { initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) } @@ -885,23 +863,29 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHoo cc.bw.Write(clientPreface) cc.fr.WriteSettings(initialSettings...) - cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) - cc.inflow.init(transportDefaultConnFlow + initialWindowSize) + cc.fr.WriteWindowUpdate(0, uint32(conf.MaxUploadBufferPerConnection)) + cc.inflow.init(conf.MaxUploadBufferPerConnection + initialWindowSize) cc.bw.Flush() if cc.werr != nil { cc.Close() return nil, cc.werr } - cc.goRun(cc.readLoop) + // Start the idle timer after the connection is fully initialized. + if d := t.idleConnTimeout(); d != 0 { + cc.idleTimeout = d + cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout) + } + + go cc.readLoop() return cc, nil } func (cc *ClientConn) healthCheck() { - pingTimeout := cc.t.pingTimeout() + pingTimeout := cc.pingTimeout // We don't need to periodically ping in the health check, because the readLoop of ClientConn will // trigger the healthCheck again if there is no frame received. - ctx, cancel := cc.contextWithTimeout(context.Background(), pingTimeout) + ctx, cancel := context.WithTimeout(context.Background(), pingTimeout) defer cancel() cc.vlogf("http2: Transport sending health check") err := cc.Ping(ctx) @@ -1026,7 +1010,7 @@ func (cc *ClientConn) State() ClientConnState { return ClientConnState{ Closed: cc.closed, Closing: cc.closing || cc.singleUse || cc.doNotReuse || cc.goAway != nil, - StreamsActive: len(cc.streams), + StreamsActive: len(cc.streams) + cc.pendingResets, StreamsReserved: cc.streamsReserved, StreamsPending: cc.pendingRequests, LastIdle: cc.lastIdle, @@ -1051,21 +1035,67 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { return } var maxConcurrentOkay bool - if cc.t.StrictMaxConcurrentStreams { + if cc.strictMaxConcurrentStreams { // We'll tell the caller we can take a new request to // prevent the caller from dialing a new TCP // connection, but then we'll block later before // writing it. maxConcurrentOkay = true } else { - maxConcurrentOkay = int64(len(cc.streams)+cc.streamsReserved+1) <= int64(cc.maxConcurrentStreams) + // We can take a new request if the total of + // - active streams; + // - reservation slots for new streams; and + // - streams for which we have sent a RST_STREAM and a PING, + // but received no subsequent frame + // is less than the concurrency limit. + maxConcurrentOkay = cc.currentRequestCountLocked() < int(cc.maxConcurrentStreams) } - st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay && + st.canTakeNewRequest = maxConcurrentOkay && cc.isUsableLocked() + + // If this connection has never been used for a request and is closed, + // then let it take a request (which will fail). + // If the conn was closed for idleness, we're racing the idle timer; + // don't try to use the conn. (Issue #70515.) + // + // This avoids a situation where an error early in a connection's lifetime + // goes unreported. + if cc.nextStreamID == 1 && cc.streamsReserved == 0 && cc.closed && !cc.closedOnIdle { + st.canTakeNewRequest = true + } + + return +} + +func (cc *ClientConn) isUsableLocked() bool { + return cc.goAway == nil && + !cc.closed && + !cc.closing && !cc.doNotReuse && int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 && !cc.tooIdleLocked() - return +} + +// canReserveLocked reports whether a net/http.ClientConn can reserve a slot on this conn. +// +// This follows slightly different rules than clientConnIdleState.canTakeNewRequest. +// We only permit reservations up to the conn's concurrency limit. +// This differs from ClientConn.ReserveNewRequest, which permits reservations +// past the limit when StrictMaxConcurrentStreams is set. +func (cc *ClientConn) canReserveLocked() bool { + if cc.currentRequestCountLocked() >= int(cc.maxConcurrentStreams) { + return false + } + if !cc.isUsableLocked() { + return false + } + return true +} + +// currentRequestCountLocked reports the number of concurrency slots currently in use, +// including active streams, reserved slots, and reset streams waiting for acknowledgement. +func (cc *ClientConn) currentRequestCountLocked() int { + return len(cc.streams) + cc.streamsReserved + cc.pendingResets } func (cc *ClientConn) canTakeNewRequestLocked() bool { @@ -1073,6 +1103,14 @@ func (cc *ClientConn) canTakeNewRequestLocked() bool { return st.canTakeNewRequest } +// availableLocked reports the number of concurrency slots available. +func (cc *ClientConn) availableLocked() int { + if !cc.canTakeNewRequestLocked() { + return 0 + } + return max(0, int(cc.maxConcurrentStreams)-cc.currentRequestCountLocked()) +} + // tooIdleLocked reports whether this connection has been been sitting idle // for too much wall time. func (cc *ClientConn) tooIdleLocked() bool { @@ -1097,6 +1135,7 @@ func (cc *ClientConn) closeConn() { t := time.AfterFunc(250*time.Millisecond, cc.forceCloseConn) defer t.Stop() cc.tconn.Close() + cc.maybeCallStateHook() } // A tls.Conn.Close can hang for a long time if the peer is unresponsive. @@ -1118,6 +1157,7 @@ func (cc *ClientConn) closeIfIdle() { return } cc.closed = true + cc.closedOnIdle = true nextID := cc.nextStreamID // TODO: do clients send GOAWAY too? maybe? Just Close: cc.mu.Unlock() @@ -1144,7 +1184,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { // Wait for all in-flight streams to complete or connection to close done := make(chan struct{}) cancelled := false // guarded by cc.mu - cc.goRun(func() { + go func() { cc.mu.Lock() defer cc.mu.Unlock() for { @@ -1156,9 +1196,9 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { if cancelled { break } - cc.condWait() + cc.cond.Wait() } - }) + }() shutdownEnterWaitStateHook() select { case <-done: @@ -1168,7 +1208,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { cc.mu.Lock() // Free the goroutine above cancelled = true - cc.condBroadcast() + cc.cond.Broadcast() cc.mu.Unlock() return ctx.Err() } @@ -1206,7 +1246,7 @@ func (cc *ClientConn) closeForError(err error) { for _, cs := range cc.streams { cs.abortStreamLocked(err) } - cc.condBroadcast() + cc.cond.Broadcast() cc.mu.Unlock() cc.closeConn() } @@ -1215,8 +1255,7 @@ func (cc *ClientConn) closeForError(err error) { // // In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead. func (cc *ClientConn) Close() error { - err := errors.New("http2: client connection force closed via ClientConn.Close") - cc.closeForError(err) + cc.closeForError(errClientConnForceClosed) return nil } @@ -1233,23 +1272,6 @@ func (cc *ClientConn) closeForLostPing() { // exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests. var errRequestCanceled = errors.New("net/http: request canceled") -func commaSeparatedTrailers(req *http.Request) (string, error) { - keys := make([]string, 0, len(req.Trailer)) - for k := range req.Trailer { - k = canonicalHeader(k) - switch k { - case "Transfer-Encoding", "Trailer", "Content-Length": - return "", fmt.Errorf("invalid Trailer key %q", k) - } - keys = append(keys, k) - } - if len(keys) > 0 { - sort.Strings(keys) - return strings.Join(keys, ","), nil - } - return "", nil -} - func (cc *ClientConn) responseHeaderTimeout() time.Duration { if cc.t.t1 != nil { return cc.t.t1.ResponseHeaderTimeout @@ -1261,22 +1283,6 @@ func (cc *ClientConn) responseHeaderTimeout() time.Duration { return 0 } -// checkConnHeaders checks whether req has any invalid connection-level headers. -// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields. -// Certain headers are special-cased as okay but not transmitted later. -func checkConnHeaders(req *http.Request) error { - if v := req.Header.Get("Upgrade"); v != "" { - return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"]) - } - if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { - return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv) - } - if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !asciiEqualFold(vv[0], "close") && !asciiEqualFold(vv[0], "keep-alive")) { - return fmt.Errorf("http2: invalid Connection request header: %q", vv) - } - return nil -} - // actualContentLength returns a sanitized version of // req.ContentLength, where 0 actually means zero (not unknown) and -1 // means unknown. @@ -1321,23 +1327,12 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) respHeaderRecv: make(chan struct{}), donec: make(chan struct{}), } - cc.goRun(func() { - cs.doRequest(req) - }) + + cs.requestedGzip = httpcommon.IsRequestGzip(req.Method, req.Header, cc.t.disableCompression()) + + go cs.doRequest(req, streamf) waitDone := func() error { - if cc.syncHooks != nil { - cc.syncHooks.blockUntil(func() bool { - select { - case <-cs.donec: - case <-ctx.Done(): - case <-cs.reqCancel: - default: - return false - } - return true - }) - } select { case <-cs.donec: return nil @@ -1398,24 +1393,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) return err } - if streamf != nil { - streamf(cs) - } - for { - if cc.syncHooks != nil { - cc.syncHooks.blockUntil(func() bool { - select { - case <-cs.respHeaderRecv: - case <-cs.abort: - case <-ctx.Done(): - case <-cs.reqCancel: - default: - return false - } - return true - }) - } select { case <-cs.respHeaderRecv: return handleResponseHeaders() @@ -1445,11 +1423,13 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) // doRequest runs for the duration of the request lifetime. // // It sends the request and performs post-request cleanup (closing Request.Body, etc.). -func (cs *clientStream) doRequest(req *http.Request) { - err := cs.writeRequest(req) +func (cs *clientStream) doRequest(req *http.Request, streamf func(*clientStream)) { + err := cs.writeRequest(req, streamf) cs.cleanupWriteRequest(err) } +var errExtendedConnectNotSupported = errors.New("net/http: extended connect not supported by peer") + // writeRequest sends a request. // // It returns nil after the request is written, the response read, @@ -1457,12 +1437,15 @@ func (cs *clientStream) doRequest(req *http.Request) { // // It returns non-nil if the request ends otherwise. // If the returned error is StreamError, the error Code may be used in resetting the stream. -func (cs *clientStream) writeRequest(req *http.Request) (err error) { +func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStream)) (err error) { cc := cs.cc ctx := cs.ctx - if err := checkConnHeaders(req); err != nil { - return err + // wait for setting frames to be received, a server can change this value later, + // but we just wait for the first settings frame + var isExtendedConnect bool + if req.Method == "CONNECT" && req.Header.Get(":protocol") != "" { + isExtendedConnect = true } // Acquire the new-request lock by writing to reqHeaderMu. @@ -1471,20 +1454,17 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { if cc.reqHeaderMu == nil { panic("RoundTrip on uninitialized ClientConn") // for tests } - var newStreamHook func(*clientStream) - if cc.syncHooks != nil { - newStreamHook = cc.syncHooks.newstream - cc.syncHooks.blockUntil(func() bool { - select { - case cc.reqHeaderMu <- struct{}{}: - <-cc.reqHeaderMu - case <-cs.reqCancel: - case <-ctx.Done(): - default: - return false + if isExtendedConnect { + select { + case <-cs.reqCancel: + return errRequestCanceled + case <-ctx.Done(): + return ctx.Err() + case <-cc.seenSettingsChan: + if !cc.extendedConnectAllowed { + return errExtendedConnectNotSupported } - return true - }) + } } select { case cc.reqHeaderMu <- struct{}{}: @@ -1510,28 +1490,8 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { } cc.mu.Unlock() - if newStreamHook != nil { - newStreamHook(cs) - } - - // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? - if !cc.t.disableCompression() && - req.Header.Get("Accept-Encoding") == "" && - req.Header.Get("Range") == "" && - !cs.isHead { - // Request gzip only, not deflate. Deflate is ambiguous and - // not as universally supported anyway. - // See: https://zlib.net/zlib_faq.html#faq39 - // - // Note that we don't request this for HEAD requests, - // due to a bug in nginx: - // http://trac.nginx.org/nginx/ticket/358 - // https://golang.org/issue/5522 - // - // We don't request gzip if the request is for a range, since - // auto-decoding a portion of a gzipped document will just fail - // anyway. See https://golang.org/issue/8923 - cs.requestedGzip = true + if streamf != nil { + streamf(cs) } continueTimeout := cc.t.expectContinueTimeout() @@ -1594,30 +1554,15 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { var respHeaderTimer <-chan time.Time var respHeaderRecv chan struct{} if d := cc.responseHeaderTimeout(); d != 0 { - timer := cc.newTimer(d) + timer := time.NewTimer(d) defer timer.Stop() - respHeaderTimer = timer.C() + respHeaderTimer = timer.C respHeaderRecv = cs.respHeaderRecv } // Wait until the peer half-closes its end of the stream, // or until the request is aborted (via context, error, or otherwise), // whichever comes first. for { - if cc.syncHooks != nil { - cc.syncHooks.blockUntil(func() bool { - select { - case <-cs.peerClosed: - case <-respHeaderTimer: - case <-respHeaderRecv: - case <-cs.abort: - case <-ctx.Done(): - case <-cs.reqCancel: - default: - return false - } - return true - }) - } select { case <-cs.peerClosed: return nil @@ -1659,26 +1604,39 @@ func (cs *clientStream) encodeAndWriteHeaders(req *http.Request) error { // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is // sent by writeRequestBody below, along with any Trailers, // again in form HEADERS{1}, CONTINUATION{0,}) - trailers, err := commaSeparatedTrailers(req) + cc.hbuf.Reset() + res, err := encodeRequestHeaders(req, cs.requestedGzip, cc.peerMaxHeaderListSize, func(name, value string) { + cc.writeHeader(name, value) + }) if err != nil { - return err - } - hasTrailers := trailers != "" - contentLen := actualContentLength(req) - hasBody := contentLen != 0 - hdrs, err := cc.encodeHeaders(req, cs.requestedGzip, trailers, contentLen) - if err != nil { - return err + return fmt.Errorf("http2: %w", err) } + hdrs := cc.hbuf.Bytes() // Write the request. - endStream := !hasBody && !hasTrailers + endStream := !res.HasBody && !res.HasTrailers cs.sentHeaders = true err = cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs) traceWroteHeaders(cs.trace) return err } +func encodeRequestHeaders(req *http.Request, addGzipHeader bool, peerMaxHeaderListSize uint64, headerf func(name, value string)) (httpcommon.EncodeHeadersResult, error) { + return httpcommon.EncodeHeaders(req.Context(), httpcommon.EncodeHeadersParam{ + Request: httpcommon.Request{ + Header: req.Header, + Trailer: req.Trailer, + URL: req.URL, + Host: req.Host, + Method: req.Method, + ActualContentLength: actualContentLength(req), + }, + AddGzipHeader: addGzipHeader, + PeerMaxHeaderListSize: peerMaxHeaderListSize, + DefaultUserAgent: defaultUserAgent, + }, headerf) +} + // cleanupWriteRequest performs post-request tasks. // // If err (the result of writeRequest) is non-nil and the stream is not closed, @@ -1702,6 +1660,9 @@ func (cs *clientStream) cleanupWriteRequest(err error) { cs.reqBodyClosed = make(chan struct{}) } bodyClosed := cs.reqBodyClosed + closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil + // Have we read any frames from the connection since sending this request? + readSinceStream := cc.readBeforeStreamID > cs.ID cc.mu.Unlock() if mustCloseBody { cs.reqBody.Close() @@ -1726,16 +1687,46 @@ func (cs *clientStream) cleanupWriteRequest(err error) { if cs.sentHeaders { if se, ok := err.(StreamError); ok { if se.Cause != errFromPeer { - cc.writeStreamReset(cs.ID, se.Code, err) + cc.writeStreamReset(cs.ID, se.Code, false, err) } } else { - cc.writeStreamReset(cs.ID, ErrCodeCancel, err) + // We're cancelling an in-flight request. + // + // This could be due to the server becoming unresponsive. + // To avoid sending too many requests on a dead connection, + // if we haven't read any frames from the connection since + // sending this request, we let it continue to consume + // a concurrency slot until we can confirm the server is + // still responding. + // We do this by sending a PING frame along with the RST_STREAM + // (unless a ping is already in flight). + // + // For simplicity, we don't bother tracking the PING payload: + // We reset cc.pendingResets any time we receive a PING ACK. + // + // We skip this if the conn is going to be closed on idle, + // because it's short lived and will probably be closed before + // we get the ping response. + ping := false + if !closeOnIdle && !readSinceStream { + cc.mu.Lock() + // rstStreamPingsBlocked works around a gRPC behavior: + // see comment on the field for details. + if !cc.rstStreamPingsBlocked { + if cc.pendingResets == 0 { + ping = true + } + cc.pendingResets++ + } + cc.mu.Unlock() + } + cc.writeStreamReset(cs.ID, ErrCodeCancel, ping, err) } } cs.bufPipe.CloseWithError(err) // no-op if already closed } else { if cs.sentHeaders && !cs.sentEndStream { - cc.writeStreamReset(cs.ID, ErrCodeNo, nil) + cc.writeStreamReset(cs.ID, ErrCodeNo, false, nil) } cs.bufPipe.CloseWithError(errRequestCanceled) } @@ -1751,22 +1742,28 @@ func (cs *clientStream) cleanupWriteRequest(err error) { } close(cs.donec) + cc.maybeCallStateHook() } // awaitOpenSlotForStreamLocked waits until len(streams) < maxConcurrentStreams. // Must hold cc.mu. func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error { for { + if cc.closed && cc.nextStreamID == 1 && cc.streamsReserved == 0 { + // This is the very first request sent to this connection. + // Return a fatal error which aborts the retry loop. + return errClientConnNotEstablished + } cc.lastActive = time.Now() if cc.closed || !cc.canTakeNewRequestLocked() { return errClientConnUnusable } cc.lastIdle = time.Time{} - if int64(len(cc.streams)) < int64(cc.maxConcurrentStreams) { + if cc.currentRequestCountLocked() < int(cc.maxConcurrentStreams) { return nil } cc.pendingRequests++ - cc.condWait() + cc.cond.Wait() cc.pendingRequests-- select { case <-cs.abort: @@ -2028,215 +2025,7 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) cs.flow.take(take) return take, nil } - cc.condWait() - } -} - -func validateHeaders(hdrs http.Header) string { - for k, vv := range hdrs { - if !httpguts.ValidHeaderFieldName(k) { - return fmt.Sprintf("name %q", k) - } - for _, v := range vv { - if !httpguts.ValidHeaderFieldValue(v) { - // Don't include the value in the error, - // because it may be sensitive. - return fmt.Sprintf("value for header %q", k) - } - } - } - return "" -} - -var errNilRequestURL = errors.New("http2: Request.URI is nil") - -// requires cc.wmu be held. -func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { - cc.hbuf.Reset() - if req.URL == nil { - return nil, errNilRequestURL - } - - host := req.Host - if host == "" { - host = req.URL.Host - } - host, err := httpguts.PunycodeHostPort(host) - if err != nil { - return nil, err - } - if !httpguts.ValidHostHeader(host) { - return nil, errors.New("http2: invalid Host header") - } - - var path string - if req.Method != "CONNECT" { - path = req.URL.RequestURI() - if !validPseudoPath(path) { - orig := path - path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host) - if !validPseudoPath(path) { - if req.URL.Opaque != "" { - return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque) - } else { - return nil, fmt.Errorf("invalid request :path %q", orig) - } - } - } - } - - // Check for any invalid headers+trailers and return an error before we - // potentially pollute our hpack state. (We want to be able to - // continue to reuse the hpack encoder for future requests) - if err := validateHeaders(req.Header); err != "" { - return nil, fmt.Errorf("invalid HTTP header %s", err) - } - if err := validateHeaders(req.Trailer); err != "" { - return nil, fmt.Errorf("invalid HTTP trailer %s", err) - } - - enumerateHeaders := func(f func(name, value string)) { - // 8.1.2.3 Request Pseudo-Header Fields - // The :path pseudo-header field includes the path and query parts of the - // target URI (the path-absolute production and optionally a '?' character - // followed by the query production, see Sections 3.3 and 3.4 of - // [RFC3986]). - f(":authority", host) - m := req.Method - if m == "" { - m = http.MethodGet - } - f(":method", m) - if req.Method != "CONNECT" { - f(":path", path) - f(":scheme", req.URL.Scheme) - } - if trailers != "" { - f("trailer", trailers) - } - - var didUA bool - for k, vv := range req.Header { - if asciiEqualFold(k, "host") || asciiEqualFold(k, "content-length") { - // Host is :authority, already sent. - // Content-Length is automatic, set below. - continue - } else if asciiEqualFold(k, "connection") || - asciiEqualFold(k, "proxy-connection") || - asciiEqualFold(k, "transfer-encoding") || - asciiEqualFold(k, "upgrade") || - asciiEqualFold(k, "keep-alive") { - // Per 8.1.2.2 Connection-Specific Header - // Fields, don't send connection-specific - // fields. We have already checked if any - // are error-worthy so just ignore the rest. - continue - } else if asciiEqualFold(k, "user-agent") { - // Match Go's http1 behavior: at most one - // User-Agent. If set to nil or empty string, - // then omit it. Otherwise if not mentioned, - // include the default (below). - didUA = true - if len(vv) < 1 { - continue - } - vv = vv[:1] - if vv[0] == "" { - continue - } - } else if asciiEqualFold(k, "cookie") { - // Per 8.1.2.5 To allow for better compression efficiency, the - // Cookie header field MAY be split into separate header fields, - // each with one or more cookie-pairs. - for _, v := range vv { - for { - p := strings.IndexByte(v, ';') - if p < 0 { - break - } - f("cookie", v[:p]) - p++ - // strip space after semicolon if any. - for p+1 <= len(v) && v[p] == ' ' { - p++ - } - v = v[p:] - } - if len(v) > 0 { - f("cookie", v) - } - } - continue - } - - for _, v := range vv { - f(k, v) - } - } - if shouldSendReqContentLength(req.Method, contentLength) { - f("content-length", strconv.FormatInt(contentLength, 10)) - } - if addGzipHeader { - f("accept-encoding", "gzip") - } - if !didUA { - f("user-agent", defaultUserAgent) - } - } - - // Do a first pass over the headers counting bytes to ensure - // we don't exceed cc.peerMaxHeaderListSize. This is done as a - // separate pass before encoding the headers to prevent - // modifying the hpack state. - hlSize := uint64(0) - enumerateHeaders(func(name, value string) { - hf := hpack.HeaderField{Name: name, Value: value} - hlSize += uint64(hf.Size()) - }) - - if hlSize > cc.peerMaxHeaderListSize { - return nil, errRequestHeaderListSize - } - - trace := httptrace.ContextClientTrace(req.Context()) - traceHeaders := traceHasWroteHeaderField(trace) - - // Header list size is ok. Write the headers. - enumerateHeaders(func(name, value string) { - name, ascii := lowerHeader(name) - if !ascii { - // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header - // field names have to be ASCII characters (just as in HTTP/1.x). - return - } - cc.writeHeader(name, value) - if traceHeaders { - traceWroteHeaderField(trace, name, value) - } - }) - - return cc.hbuf.Bytes(), nil -} - -// shouldSendReqContentLength reports whether the http2.Transport should send -// a "content-length" request header. This logic is basically a copy of the net/http -// transferWriter.shouldSendContentLength. -// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown). -// -1 means unknown. -func shouldSendReqContentLength(method string, contentLength int64) bool { - if contentLength > 0 { - return true - } - if contentLength < 0 { - return false - } - // For zero bodies, whether we send a content-length depends on the method. - // It also kinda doesn't matter for http2 either way, with END_STREAM. - switch method { - case "POST", "PUT", "PATCH": - return true - default: - return false + cc.cond.Wait() } } @@ -2256,7 +2045,7 @@ func (cc *ClientConn) encodeTrailers(trailer http.Header) ([]byte, error) { } for k, vv := range trailer { - lowKey, ascii := lowerHeader(k) + lowKey, ascii := httpcommon.LowerHeader(k) if !ascii { // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header // field names have to be ASCII characters (just as in HTTP/1.x). @@ -2288,7 +2077,7 @@ type resAndError struct { func (cc *ClientConn) addStreamLocked(cs *clientStream) { cs.flow.add(int32(cc.initialWindowSize)) cs.flow.setConnFlow(&cc.flow) - cs.inflow.init(transportDefaultStreamFlow) + cs.inflow.init(cc.initialStreamRecvWindowSize) cs.ID = cc.nextStreamID cc.nextStreamID += 2 cc.streams[cs.ID] = cs @@ -2311,7 +2100,7 @@ func (cc *ClientConn) forgetStreamID(id uint32) { } // Wake up writeRequestBody via clientStream.awaitFlowControl and // wake up RoundTrip if there is a pending request. - cc.condBroadcast() + cc.cond.Broadcast() closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil if closeOnIdle && cc.streamsReserved == 0 && len(cc.streams) == 0 { @@ -2366,7 +2155,6 @@ func isEOFOrNetReadError(err error) bool { func (rl *clientConnReadLoop) cleanup() { cc := rl.cc - cc.t.connPool().MarkDead(cc) defer cc.closeConn() defer close(cc.readerDone) @@ -2390,6 +2178,27 @@ func (rl *clientConnReadLoop) cleanup() { } cc.closed = true + // If the connection has never been used, and has been open for only a short time, + // leave it in the connection pool for a little while. + // + // This avoids a situation where new connections are constantly created, + // added to the pool, fail, and are removed from the pool, without any error + // being surfaced to the user. + unusedWaitTime := 5 * time.Second + if cc.idleTimeout > 0 && unusedWaitTime > cc.idleTimeout { + unusedWaitTime = cc.idleTimeout + } + idleTime := time.Now().Sub(cc.lastActive) + if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime && !cc.closedOnIdle { + cc.idleTimer = time.AfterFunc(unusedWaitTime-idleTime, func() { + cc.t.connPool().MarkDead(cc) + }) + } else { + cc.mu.Unlock() // avoid any deadlocks in MarkDead + cc.t.connPool().MarkDead(cc) + cc.mu.Lock() + } + for _, cs := range cc.streams { select { case <-cs.peerClosed: @@ -2399,8 +2208,15 @@ func (rl *clientConnReadLoop) cleanup() { cs.abortStreamLocked(err) } } - cc.condBroadcast() + cc.cond.Broadcast() cc.mu.Unlock() + + if !cc.seenSettings { + // If we have a pending request that wants extended CONNECT, + // let it continue and fail with the connection error. + cc.extendedConnectAllowed = true + close(cc.seenSettingsChan) + } } // countReadFrameError calls Transport.CountError with a string @@ -2433,10 +2249,10 @@ func (cc *ClientConn) countReadFrameError(err error) { func (rl *clientConnReadLoop) run() error { cc := rl.cc gotSettings := false - readIdleTimeout := cc.t.ReadIdleTimeout - var t timer + readIdleTimeout := cc.readIdleTimeout + var t *time.Timer if readIdleTimeout != 0 { - t = cc.afterFunc(readIdleTimeout, cc.healthCheck) + t = time.AfterFunc(readIdleTimeout, cc.healthCheck) } for { f, err := cc.fr.ReadFrame() @@ -2447,7 +2263,7 @@ func (rl *clientConnReadLoop) run() error { cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err) } if se, ok := err.(StreamError); ok { - if cs := rl.streamByID(se.StreamID); cs != nil { + if cs := rl.streamByID(se.StreamID, notHeaderOrDataFrame); cs != nil { if se.Cause == nil { se.Cause = cc.fr.errDetail } @@ -2499,7 +2315,7 @@ func (rl *clientConnReadLoop) run() error { } func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, headerOrDataFrame) if cs == nil { // We'd get here if we canceled a request while the // server had its response still in flight. So if this @@ -2587,7 +2403,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra Status: status + " " + http.StatusText(statusCode), } for _, hf := range regularFields { - key := canonicalHeader(hf.Name) + key := httpcommon.CanonicalHeader(hf.Name) if key == "Trailer" { t := res.Trailer if t == nil { @@ -2595,7 +2411,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra res.Trailer = t } foreachHeaderElement(hf.Value, func(v string) { - t[canonicalHeader(v)] = nil + t[httpcommon.CanonicalHeader(v)] = nil }) } else { vv := header[key] @@ -2617,15 +2433,34 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra if f.StreamEnded() { return nil, errors.New("1xx informational response with END_STREAM flag") } - cs.num1xx++ - const max1xxResponses = 5 // arbitrary bound on number of informational responses, same as net/http - if cs.num1xx > max1xxResponses { - return nil, errors.New("http2: too many 1xx informational responses") - } if fn := cs.get1xxTraceFunc(); fn != nil { + // If the 1xx response is being delivered to the user, + // then they're responsible for limiting the number + // of responses. if err := fn(statusCode, textproto.MIMEHeader(header)); err != nil { return nil, err } + } else { + // If the user didn't examine the 1xx response, then we + // limit the size of all 1xx headers. + // + // This differs a bit from the HTTP/1 implementation, which + // limits the size of all 1xx headers plus the final response. + // Use the larger limit of MaxHeaderListSize and + // net/http.Transport.MaxResponseHeaderBytes. + limit := int64(cs.cc.t.maxHeaderListSize()) + if t1 := cs.cc.t.t1; t1 != nil && t1.MaxResponseHeaderBytes > limit { + limit = t1.MaxResponseHeaderBytes + } + for _, h := range f.Fields { + cs.totalHeaderSize += int64(h.Size()) + } + if cs.totalHeaderSize > limit { + if VerboseLogs { + log.Printf("http2: 1xx informational responses too large") + } + return nil, errors.New("header list too large") + } } if statusCode == 100 { traceGot100Continue(cs.trace) @@ -2700,7 +2535,7 @@ func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFr trailer := make(http.Header) for _, hf := range f.RegularFields() { - key := canonicalHeader(hf.Name) + key := httpcommon.CanonicalHeader(hf.Name) trailer[key] = append(trailer[key], hf.Value) } cs.trailer = trailer @@ -2809,7 +2644,7 @@ func (b transportResponseBody) Close() error { func (rl *clientConnReadLoop) processData(f *DataFrame) error { cc := rl.cc - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, headerOrDataFrame) data := f.Data() if cs == nil { cc.mu.Lock() @@ -2944,9 +2779,23 @@ func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { cs.abortStream(err) } -func (rl *clientConnReadLoop) streamByID(id uint32) *clientStream { +// Constants passed to streamByID for documentation purposes. +const ( + headerOrDataFrame = true + notHeaderOrDataFrame = false +) + +// streamByID returns the stream with the given id, or nil if no stream has that id. +// If headerOrData is true, it clears rst.StreamPingsBlocked. +func (rl *clientConnReadLoop) streamByID(id uint32, headerOrData bool) *clientStream { rl.cc.mu.Lock() defer rl.cc.mu.Unlock() + if headerOrData { + // Work around an unfortunate gRPC behavior. + // See comment on ClientConn.rstStreamPingsBlocked for details. + rl.cc.rstStreamPingsBlocked = false + } + rl.cc.readBeforeStreamID = rl.cc.nextStreamID cs := rl.cc.streams[id] if cs != nil && !cs.readAborted { return cs @@ -2997,6 +2846,7 @@ func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error { func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { cc := rl.cc + defer cc.maybeCallStateHook() cc.mu.Lock() defer cc.mu.Unlock() @@ -3034,12 +2884,27 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { for _, cs := range cc.streams { cs.flow.add(delta) } - cc.condBroadcast() + cc.cond.Broadcast() cc.initialWindowSize = s.Val case SettingHeaderTableSize: cc.henc.SetMaxDynamicTableSize(s.Val) cc.peerMaxHeaderTableSize = s.Val + case SettingEnableConnectProtocol: + if err := s.Valid(); err != nil { + return err + } + // If the peer wants to send us SETTINGS_ENABLE_CONNECT_PROTOCOL, + // we require that it do so in the first SETTINGS frame. + // + // When we attempt to use extended CONNECT, we wait for the first + // SETTINGS frame to see if the server supports it. If we let the + // server enable the feature with a later SETTINGS frame, then + // users will see inconsistent results depending on whether we've + // seen that frame or not. + if !cc.seenSettings { + cc.extendedConnectAllowed = s.Val == 1 + } default: cc.vlogf("Unhandled Setting: %v", s) } @@ -3057,6 +2922,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { // connection can establish to our default. cc.maxConcurrentStreams = defaultMaxConcurrentStreams } + close(cc.seenSettingsChan) cc.seenSettings = true } @@ -3065,7 +2931,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { cc := rl.cc - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, notHeaderOrDataFrame) if f.StreamID != 0 && cs == nil { return nil } @@ -3089,12 +2955,12 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { return ConnectionError(ErrCodeFlowControl) } - cc.condBroadcast() + cc.cond.Broadcast() return nil } func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, notHeaderOrDataFrame) if cs == nil { // TODO: return error if server tries to RST_STREAM an idle stream return nil @@ -3133,7 +2999,7 @@ func (cc *ClientConn) Ping(ctx context.Context) error { } var pingError error errc := make(chan struct{}) - cc.goRun(func() { + go func() { cc.wmu.Lock() defer cc.wmu.Unlock() if pingError = cc.fr.WritePing(false, p); pingError != nil { @@ -3144,20 +3010,7 @@ func (cc *ClientConn) Ping(ctx context.Context) error { close(errc) return } - }) - if cc.syncHooks != nil { - cc.syncHooks.blockUntil(func() bool { - select { - case <-c: - case <-errc: - case <-ctx.Done(): - case <-cc.readerDone: - default: - return false - } - return true - }) - } + }() select { case <-c: return nil @@ -3174,6 +3027,7 @@ func (cc *ClientConn) Ping(ctx context.Context) error { func (rl *clientConnReadLoop) processPing(f *PingFrame) error { if f.IsAck() { cc := rl.cc + defer cc.maybeCallStateHook() cc.mu.Lock() defer cc.mu.Unlock() // If ack, notify listener if any @@ -3181,6 +3035,12 @@ func (rl *clientConnReadLoop) processPing(f *PingFrame) error { close(c) delete(cc.pings, f.Data) } + if cc.pendingResets > 0 { + // See clientStream.cleanupWriteRequest. + cc.pendingResets = 0 + cc.rstStreamPingsBlocked = true + cc.cond.Broadcast() + } return nil } cc := rl.cc @@ -3203,20 +3063,27 @@ func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error { return ConnectionError(ErrCodeProtocol) } -func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) { +// writeStreamReset sends a RST_STREAM frame. +// When ping is true, it also sends a PING frame with a random payload. +func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, ping bool, err error) { // TODO: map err to more interesting error codes, once the // HTTP community comes up with some. But currently for // RST_STREAM there's no equivalent to GOAWAY frame's debug // data, and the error codes are all pretty vague ("cancel"). cc.wmu.Lock() cc.fr.WriteRSTStream(streamID, code) + if ping { + var payload [8]byte + rand.Read(payload[:]) + cc.fr.WritePing(false, payload) + } cc.bw.Flush() cc.wmu.Unlock() } var ( errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit") - errRequestHeaderListSize = errors.New("http2: request header list larger than peer's advertised limit") + errRequestHeaderListSize = httpcommon.ErrRequestHeaderListSize ) func (cc *ClientConn) logf(format string, args ...interface{}) { @@ -3263,35 +3130,102 @@ type erringRoundTripper struct{ err error } func (rt erringRoundTripper) RoundTripErr() error { return rt.err } func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err } +var errConcurrentReadOnResBody = errors.New("http2: concurrent read on response body") + // gzipReader wraps a response body so it can lazily -// call gzip.NewReader on the first call to Read +// get gzip.Reader from the pool on the first call to Read. +// After Close is called it puts gzip.Reader to the pool immediately +// if there is no Read in progress or later when Read completes. type gzipReader struct { _ incomparable body io.ReadCloser // underlying Response.Body - zr *gzip.Reader // lazily-initialized gzip reader - zerr error // sticky error + mu sync.Mutex // guards zr and zerr + zr *gzip.Reader // stores gzip reader from the pool between reads + zerr error // sticky gzip reader init error or sentinel value to detect concurrent read and read after close +} + +type eofReader struct{} + +func (eofReader) Read([]byte) (int, error) { return 0, io.EOF } +func (eofReader) ReadByte() (byte, error) { return 0, io.EOF } + +var gzipPool = sync.Pool{New: func() any { return new(gzip.Reader) }} + +// gzipPoolGet gets a gzip.Reader from the pool and resets it to read from r. +func gzipPoolGet(r io.Reader) (*gzip.Reader, error) { + zr := gzipPool.Get().(*gzip.Reader) + if err := zr.Reset(r); err != nil { + gzipPoolPut(zr) + return nil, err + } + return zr, nil +} + +// gzipPoolPut puts a gzip.Reader back into the pool. +func gzipPoolPut(zr *gzip.Reader) { + // Reset will allocate bufio.Reader if we pass it anything + // other than a flate.Reader, so ensure that it's getting one. + var r flate.Reader = eofReader{} + zr.Reset(r) + gzipPool.Put(zr) +} + +// acquire returns a gzip.Reader for reading response body. +// The reader must be released after use. +func (gz *gzipReader) acquire() (*gzip.Reader, error) { + gz.mu.Lock() + defer gz.mu.Unlock() + if gz.zerr != nil { + return nil, gz.zerr + } + if gz.zr == nil { + gz.zr, gz.zerr = gzipPoolGet(gz.body) + if gz.zerr != nil { + return nil, gz.zerr + } + } + ret := gz.zr + gz.zr, gz.zerr = nil, errConcurrentReadOnResBody + return ret, nil +} + +// release returns the gzip.Reader to the pool if Close was called during Read. +func (gz *gzipReader) release(zr *gzip.Reader) { + gz.mu.Lock() + defer gz.mu.Unlock() + if gz.zerr == errConcurrentReadOnResBody { + gz.zr, gz.zerr = zr, nil + } else { // fs.ErrClosed + gzipPoolPut(zr) + } +} + +// close returns the gzip.Reader to the pool immediately or +// signals release to do so after Read completes. +func (gz *gzipReader) close() { + gz.mu.Lock() + defer gz.mu.Unlock() + if gz.zerr == nil && gz.zr != nil { + gzipPoolPut(gz.zr) + gz.zr = nil + } + gz.zerr = fs.ErrClosed } func (gz *gzipReader) Read(p []byte) (n int, err error) { - if gz.zerr != nil { - return 0, gz.zerr + zr, err := gz.acquire() + if err != nil { + return 0, err } - if gz.zr == nil { - gz.zr, err = gzip.NewReader(gz.body) - if err != nil { - gz.zerr = err - return 0, err - } - } - return gz.zr.Read(p) + defer gz.release(zr) + + return zr.Read(p) } func (gz *gzipReader) Close() error { - if err := gz.body.Close(); err != nil { - return err - } - gz.zerr = fs.ErrClosed - return nil + gz.close() + + return gz.body.Close() } type errorReader struct{ err error } @@ -3317,9 +3251,13 @@ func registerHTTPSProtocol(t *http.Transport, rt noDialH2RoundTripper) (err erro } // noDialH2RoundTripper is a RoundTripper which only tries to complete the request -// if there's already has a cached connection to the host. +// if there's already a cached connection to the host. // (The field is exported so it can be accessed via reflect from net/http; tested // by TestNoDialH2RoundTripperType) +// +// A noDialH2RoundTripper is registered with http1.Transport.RegisterProtocol, +// and the http1.Transport can use type assertions to call non-RoundTrip methods on it. +// This lets us expose, for example, NewClientConn to net/http. type noDialH2RoundTripper struct{ *Transport } func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { @@ -3330,6 +3268,85 @@ func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, err return res, err } +func (rt noDialH2RoundTripper) NewClientConn(conn net.Conn, internalStateHook func()) (http.RoundTripper, error) { + tr := rt.Transport + cc, err := tr.newClientConn(conn, tr.disableKeepAlives(), internalStateHook) + if err != nil { + return nil, err + } + + // RoundTrip should block when the conn is at its concurrency limit, + // not return an error. Setting strictMaxConcurrentStreams enables this. + cc.strictMaxConcurrentStreams = true + + return netHTTPClientConn{cc}, nil +} + +// netHTTPClientConn wraps ClientConn and implements the interface net/http expects from +// the RoundTripper returned by NewClientConn. +type netHTTPClientConn struct { + cc *ClientConn +} + +func (cc netHTTPClientConn) RoundTrip(req *http.Request) (*http.Response, error) { + return cc.cc.RoundTrip(req) +} + +func (cc netHTTPClientConn) Close() error { + return cc.cc.Close() +} + +func (cc netHTTPClientConn) Err() error { + cc.cc.mu.Lock() + defer cc.cc.mu.Unlock() + if cc.cc.closed { + return errors.New("connection closed") + } + return nil +} + +func (cc netHTTPClientConn) Reserve() error { + defer cc.cc.maybeCallStateHook() + cc.cc.mu.Lock() + defer cc.cc.mu.Unlock() + if !cc.cc.canReserveLocked() { + return errors.New("connection is unavailable") + } + cc.cc.streamsReserved++ + return nil +} + +func (cc netHTTPClientConn) Release() { + defer cc.cc.maybeCallStateHook() + cc.cc.mu.Lock() + defer cc.cc.mu.Unlock() + // We don't complain if streamsReserved is 0. + // + // This is consistent with RoundTrip: both Release and RoundTrip will + // consume a reservation iff one exists. + if cc.cc.streamsReserved > 0 { + cc.cc.streamsReserved-- + } +} + +func (cc netHTTPClientConn) Available() int { + cc.cc.mu.Lock() + defer cc.cc.mu.Unlock() + return cc.cc.availableLocked() +} + +func (cc netHTTPClientConn) InFlight() int { + cc.cc.mu.Lock() + defer cc.cc.mu.Unlock() + return cc.cc.currentRequestCountLocked() +} + +func (cc *ClientConn) maybeCallStateHook() { + if cc.internalStateHook != nil { + cc.internalStateHook() + } +} + func (t *Transport) idleConnTimeout() time.Duration { // to keep things backwards compatible, we use non-zero values of // IdleConnTimeout, followed by using the IdleConnTimeout on the underlying @@ -3400,16 +3417,6 @@ func traceFirstResponseByte(trace *httptrace.ClientTrace) { } } -func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { - return trace != nil && trace.WroteHeaderField != nil -} - -func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) { - if trace != nil && trace.WroteHeaderField != nil { - trace.WroteHeaderField(k, []string{v}) - } -} - func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { if trace != nil { return trace.Got1xxResponse diff --git a/vendor/golang.org/x/net/http2/unencrypted.go b/vendor/golang.org/x/net/http2/unencrypted.go new file mode 100644 index 00000000..b2de2116 --- /dev/null +++ b/vendor/golang.org/x/net/http2/unencrypted.go @@ -0,0 +1,32 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "crypto/tls" + "errors" + "net" +) + +const nextProtoUnencryptedHTTP2 = "unencrypted_http2" + +// unencryptedNetConnFromTLSConn retrieves a net.Conn wrapped in a *tls.Conn. +// +// TLSNextProto functions accept a *tls.Conn. +// +// When passing an unencrypted HTTP/2 connection to a TLSNextProto function, +// we pass a *tls.Conn with an underlying net.Conn containing the unencrypted connection. +// To be extra careful about mistakes (accidentally dropping TLS encryption in a place +// where we want it), the tls.Conn contains a net.Conn with an UnencryptedNetConn method +// that returns the actual connection we want to use. +func unencryptedNetConnFromTLSConn(tc *tls.Conn) (net.Conn, error) { + conner, ok := tc.NetConn().(interface { + UnencryptedNetConn() net.Conn + }) + if !ok { + return nil, errors.New("http2: TLS conn unexpectedly found in unencrypted handoff") + } + return conner.UnencryptedNetConn(), nil +} diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go index 33f61398..fdb35b94 100644 --- a/vendor/golang.org/x/net/http2/write.go +++ b/vendor/golang.org/x/net/http2/write.go @@ -13,6 +13,7 @@ import ( "golang.org/x/net/http/httpguts" "golang.org/x/net/http2/hpack" + "golang.org/x/net/internal/httpcommon" ) // writeFramer is implemented by any type that is used to write frames. @@ -131,6 +132,16 @@ func (se StreamError) writeFrame(ctx writeContext) error { func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } +type writePing struct { + data [8]byte +} + +func (w writePing) writeFrame(ctx writeContext) error { + return ctx.Framer().WritePing(false, w.data) +} + +func (w writePing) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.data) <= max } + type writePingAck struct{ pf *PingFrame } func (w writePingAck) writeFrame(ctx writeContext) error { @@ -341,7 +352,7 @@ func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) { } for _, k := range keys { vv := h[k] - k, ascii := lowerHeader(k) + k, ascii := httpcommon.LowerHeader(k) if !ascii { // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header // field names have to be ASCII characters (just as in HTTP/1.x). diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go index cc893adc..7de27be5 100644 --- a/vendor/golang.org/x/net/http2/writesched.go +++ b/vendor/golang.org/x/net/http2/writesched.go @@ -42,6 +42,8 @@ type OpenStreamOptions struct { // PusherID is zero if the stream was initiated by the client. Otherwise, // PusherID names the stream that pushed the newly opened stream. PusherID uint32 + // priority is used to set the priority of the newly opened stream. + priority PriorityParam } // FrameWriteRequest is a request to write a frame. @@ -183,45 +185,75 @@ func (wr *FrameWriteRequest) replyToWriter(err error) { } // writeQueue is used by implementations of WriteScheduler. +// +// Each writeQueue contains a queue of FrameWriteRequests, meant to store all +// FrameWriteRequests associated with a given stream. This is implemented as a +// two-stage queue: currQueue[currPos:] and nextQueue. Removing an item is done +// by incrementing currPos of currQueue. Adding an item is done by appending it +// to the nextQueue. If currQueue is empty when trying to remove an item, we +// can swap currQueue and nextQueue to remedy the situation. +// This two-stage queue is analogous to the use of two lists in Okasaki's +// purely functional queue but without the overhead of reversing the list when +// swapping stages. +// +// writeQueue also contains prev and next, this can be used by implementations +// of WriteScheduler to construct data structures that represent the order of +// writing between different streams (e.g. circular linked list). type writeQueue struct { - s []FrameWriteRequest + currQueue []FrameWriteRequest + nextQueue []FrameWriteRequest + currPos int + prev, next *writeQueue } -func (q *writeQueue) empty() bool { return len(q.s) == 0 } +func (q *writeQueue) empty() bool { + return (len(q.currQueue) - q.currPos + len(q.nextQueue)) == 0 +} func (q *writeQueue) push(wr FrameWriteRequest) { - q.s = append(q.s, wr) + q.nextQueue = append(q.nextQueue, wr) } func (q *writeQueue) shift() FrameWriteRequest { - if len(q.s) == 0 { + if q.empty() { panic("invalid use of queue") } - wr := q.s[0] - // TODO: less copy-happy queue. - copy(q.s, q.s[1:]) - q.s[len(q.s)-1] = FrameWriteRequest{} - q.s = q.s[:len(q.s)-1] + if q.currPos >= len(q.currQueue) { + q.currQueue, q.currPos, q.nextQueue = q.nextQueue, 0, q.currQueue[:0] + } + wr := q.currQueue[q.currPos] + q.currQueue[q.currPos] = FrameWriteRequest{} + q.currPos++ return wr } +func (q *writeQueue) peek() *FrameWriteRequest { + if q.currPos < len(q.currQueue) { + return &q.currQueue[q.currPos] + } + if len(q.nextQueue) > 0 { + return &q.nextQueue[0] + } + return nil +} + // consume consumes up to n bytes from q.s[0]. If the frame is // entirely consumed, it is removed from the queue. If the frame // is partially consumed, the frame is kept with the consumed // bytes removed. Returns true iff any bytes were consumed. func (q *writeQueue) consume(n int32) (FrameWriteRequest, bool) { - if len(q.s) == 0 { + if q.empty() { return FrameWriteRequest{}, false } - consumed, rest, numresult := q.s[0].Consume(n) + consumed, rest, numresult := q.peek().Consume(n) switch numresult { case 0: return FrameWriteRequest{}, false case 1: q.shift() case 2: - q.s[0] = rest + *q.peek() = rest } return consumed, true } @@ -230,10 +262,15 @@ type writeQueuePool []*writeQueue // put inserts an unused writeQueue into the pool. func (p *writeQueuePool) put(q *writeQueue) { - for i := range q.s { - q.s[i] = FrameWriteRequest{} + for i := range q.currQueue { + q.currQueue[i] = FrameWriteRequest{} } - q.s = q.s[:0] + for i := range q.nextQueue { + q.nextQueue[i] = FrameWriteRequest{} + } + q.currQueue = q.currQueue[:0] + q.nextQueue = q.nextQueue[:0] + q.currPos = 0 *p = append(*p, q) } diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go similarity index 76% rename from vendor/golang.org/x/net/http2/writesched_priority.go rename to vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go index 0a242c66..4e33c29a 100644 --- a/vendor/golang.org/x/net/http2/writesched_priority.go +++ b/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go @@ -11,7 +11,7 @@ import ( ) // RFC 7540, Section 5.3.5: the default weight is 16. -const priorityDefaultWeight = 15 // 16 = 15 + 1 +const priorityDefaultWeightRFC7540 = 15 // 16 = 15 + 1 // PriorityWriteSchedulerConfig configures a priorityWriteScheduler. type PriorityWriteSchedulerConfig struct { @@ -66,8 +66,8 @@ func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler } } - ws := &priorityWriteScheduler{ - nodes: make(map[uint32]*priorityNode), + ws := &priorityWriteSchedulerRFC7540{ + nodes: make(map[uint32]*priorityNodeRFC7540), maxClosedNodesInTree: cfg.MaxClosedNodesInTree, maxIdleNodesInTree: cfg.MaxIdleNodesInTree, enableWriteThrottle: cfg.ThrottleOutOfOrderWrites, @@ -81,32 +81,32 @@ func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler return ws } -type priorityNodeState int +type priorityNodeStateRFC7540 int const ( - priorityNodeOpen priorityNodeState = iota - priorityNodeClosed - priorityNodeIdle + priorityNodeOpenRFC7540 priorityNodeStateRFC7540 = iota + priorityNodeClosedRFC7540 + priorityNodeIdleRFC7540 ) -// priorityNode is a node in an HTTP/2 priority tree. +// priorityNodeRFC7540 is a node in an HTTP/2 priority tree. // Each node is associated with a single stream ID. // See RFC 7540, Section 5.3. -type priorityNode struct { - q writeQueue // queue of pending frames to write - id uint32 // id of the stream, or 0 for the root of the tree - weight uint8 // the actual weight is weight+1, so the value is in [1,256] - state priorityNodeState // open | closed | idle - bytes int64 // number of bytes written by this node, or 0 if closed - subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree +type priorityNodeRFC7540 struct { + q writeQueue // queue of pending frames to write + id uint32 // id of the stream, or 0 for the root of the tree + weight uint8 // the actual weight is weight+1, so the value is in [1,256] + state priorityNodeStateRFC7540 // open | closed | idle + bytes int64 // number of bytes written by this node, or 0 if closed + subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree // These links form the priority tree. - parent *priorityNode - kids *priorityNode // start of the kids list - prev, next *priorityNode // doubly-linked list of siblings + parent *priorityNodeRFC7540 + kids *priorityNodeRFC7540 // start of the kids list + prev, next *priorityNodeRFC7540 // doubly-linked list of siblings } -func (n *priorityNode) setParent(parent *priorityNode) { +func (n *priorityNodeRFC7540) setParent(parent *priorityNodeRFC7540) { if n == parent { panic("setParent to self") } @@ -141,7 +141,7 @@ func (n *priorityNode) setParent(parent *priorityNode) { } } -func (n *priorityNode) addBytes(b int64) { +func (n *priorityNodeRFC7540) addBytes(b int64) { n.bytes += b for ; n != nil; n = n.parent { n.subtreeBytes += b @@ -154,7 +154,7 @@ func (n *priorityNode) addBytes(b int64) { // // f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true // if any ancestor p of n is still open (ignoring the root node). -func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f func(*priorityNode, bool) bool) bool { +func (n *priorityNodeRFC7540) walkReadyInOrder(openParent bool, tmp *[]*priorityNodeRFC7540, f func(*priorityNodeRFC7540, bool) bool) bool { if !n.q.empty() && f(n, openParent) { return true } @@ -165,7 +165,7 @@ func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f // Don't consider the root "open" when updating openParent since // we can't send data frames on the root stream (only control frames). if n.id != 0 { - openParent = openParent || (n.state == priorityNodeOpen) + openParent = openParent || (n.state == priorityNodeOpenRFC7540) } // Common case: only one kid or all kids have the same weight. @@ -195,7 +195,7 @@ func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f *tmp = append(*tmp, n.kids) n.kids.setParent(nil) } - sort.Sort(sortPriorityNodeSiblings(*tmp)) + sort.Sort(sortPriorityNodeSiblingsRFC7540(*tmp)) for i := len(*tmp) - 1; i >= 0; i-- { (*tmp)[i].setParent(n) // setParent inserts at the head of n.kids } @@ -207,15 +207,15 @@ func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f return false } -type sortPriorityNodeSiblings []*priorityNode +type sortPriorityNodeSiblingsRFC7540 []*priorityNodeRFC7540 -func (z sortPriorityNodeSiblings) Len() int { return len(z) } -func (z sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] } -func (z sortPriorityNodeSiblings) Less(i, k int) bool { +func (z sortPriorityNodeSiblingsRFC7540) Len() int { return len(z) } +func (z sortPriorityNodeSiblingsRFC7540) Swap(i, k int) { z[i], z[k] = z[k], z[i] } +func (z sortPriorityNodeSiblingsRFC7540) Less(i, k int) bool { // Prefer the subtree that has sent fewer bytes relative to its weight. // See sections 5.3.2 and 5.3.4. - wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes) - wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes) + wi, bi := float64(z[i].weight)+1, float64(z[i].subtreeBytes) + wk, bk := float64(z[k].weight)+1, float64(z[k].subtreeBytes) if bi == 0 && bk == 0 { return wi >= wk } @@ -225,13 +225,13 @@ func (z sortPriorityNodeSiblings) Less(i, k int) bool { return bi/bk <= wi/wk } -type priorityWriteScheduler struct { +type priorityWriteSchedulerRFC7540 struct { // root is the root of the priority tree, where root.id = 0. // The root queues control frames that are not associated with any stream. - root priorityNode + root priorityNodeRFC7540 // nodes maps stream ids to priority tree nodes. - nodes map[uint32]*priorityNode + nodes map[uint32]*priorityNodeRFC7540 // maxID is the maximum stream id in nodes. maxID uint32 @@ -239,7 +239,7 @@ type priorityWriteScheduler struct { // lists of nodes that have been closed or are idle, but are kept in // the tree for improved prioritization. When the lengths exceed either // maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded. - closedNodes, idleNodes []*priorityNode + closedNodes, idleNodes []*priorityNodeRFC7540 // From the config. maxClosedNodesInTree int @@ -248,19 +248,19 @@ type priorityWriteScheduler struct { enableWriteThrottle bool // tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations. - tmp []*priorityNode + tmp []*priorityNodeRFC7540 // pool of empty queues for reuse. queuePool writeQueuePool } -func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { +func (ws *priorityWriteSchedulerRFC7540) OpenStream(streamID uint32, options OpenStreamOptions) { // The stream may be currently idle but cannot be opened or closed. if curr := ws.nodes[streamID]; curr != nil { - if curr.state != priorityNodeIdle { + if curr.state != priorityNodeIdleRFC7540 { panic(fmt.Sprintf("stream %d already opened", streamID)) } - curr.state = priorityNodeOpen + curr.state = priorityNodeOpenRFC7540 return } @@ -272,11 +272,11 @@ func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStream if parent == nil { parent = &ws.root } - n := &priorityNode{ + n := &priorityNodeRFC7540{ q: *ws.queuePool.get(), id: streamID, - weight: priorityDefaultWeight, - state: priorityNodeOpen, + weight: priorityDefaultWeightRFC7540, + state: priorityNodeOpenRFC7540, } n.setParent(parent) ws.nodes[streamID] = n @@ -285,24 +285,23 @@ func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStream } } -func (ws *priorityWriteScheduler) CloseStream(streamID uint32) { +func (ws *priorityWriteSchedulerRFC7540) CloseStream(streamID uint32) { if streamID == 0 { panic("violation of WriteScheduler interface: cannot close stream 0") } if ws.nodes[streamID] == nil { panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID)) } - if ws.nodes[streamID].state != priorityNodeOpen { + if ws.nodes[streamID].state != priorityNodeOpenRFC7540 { panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID)) } n := ws.nodes[streamID] - n.state = priorityNodeClosed + n.state = priorityNodeClosedRFC7540 n.addBytes(-n.bytes) q := n.q ws.queuePool.put(&q) - n.q.s = nil if ws.maxClosedNodesInTree > 0 { ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n) } else { @@ -310,7 +309,7 @@ func (ws *priorityWriteScheduler) CloseStream(streamID uint32) { } } -func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) { +func (ws *priorityWriteSchedulerRFC7540) AdjustStream(streamID uint32, priority PriorityParam) { if streamID == 0 { panic("adjustPriority on root") } @@ -324,11 +323,11 @@ func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority Priorit return } ws.maxID = streamID - n = &priorityNode{ + n = &priorityNodeRFC7540{ q: *ws.queuePool.get(), id: streamID, - weight: priorityDefaultWeight, - state: priorityNodeIdle, + weight: priorityDefaultWeightRFC7540, + state: priorityNodeIdleRFC7540, } n.setParent(&ws.root) ws.nodes[streamID] = n @@ -340,7 +339,7 @@ func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority Priorit parent := ws.nodes[priority.StreamDep] if parent == nil { n.setParent(&ws.root) - n.weight = priorityDefaultWeight + n.weight = priorityDefaultWeightRFC7540 return } @@ -381,8 +380,8 @@ func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority Priorit n.weight = priority.Weight } -func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) { - var n *priorityNode +func (ws *priorityWriteSchedulerRFC7540) Push(wr FrameWriteRequest) { + var n *priorityNodeRFC7540 if wr.isControl() { n = &ws.root } else { @@ -401,8 +400,8 @@ func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) { n.q.push(wr) } -func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) { - ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNode, openParent bool) bool { +func (ws *priorityWriteSchedulerRFC7540) Pop() (wr FrameWriteRequest, ok bool) { + ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNodeRFC7540, openParent bool) bool { limit := int32(math.MaxInt32) if openParent { limit = ws.writeThrottleLimit @@ -428,7 +427,7 @@ func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) { return wr, ok } -func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, maxSize int, n *priorityNode) { +func (ws *priorityWriteSchedulerRFC7540) addClosedOrIdleNode(list *[]*priorityNodeRFC7540, maxSize int, n *priorityNodeRFC7540) { if maxSize == 0 { return } @@ -442,9 +441,9 @@ func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, max *list = append(*list, n) } -func (ws *priorityWriteScheduler) removeNode(n *priorityNode) { - for k := n.kids; k != nil; k = k.next { - k.setParent(n.parent) +func (ws *priorityWriteSchedulerRFC7540) removeNode(n *priorityNodeRFC7540) { + for n.kids != nil { + n.kids.setParent(n.parent) } n.setParent(nil) delete(ws.nodes, n.id) diff --git a/vendor/golang.org/x/net/http2/writesched_priority_rfc9218.go b/vendor/golang.org/x/net/http2/writesched_priority_rfc9218.go new file mode 100644 index 00000000..cb4cadc3 --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched_priority_rfc9218.go @@ -0,0 +1,209 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "fmt" + "math" +) + +type streamMetadata struct { + location *writeQueue + priority PriorityParam +} + +type priorityWriteSchedulerRFC9218 struct { + // control contains control frames (SETTINGS, PING, etc.). + control writeQueue + + // heads contain the head of a circular list of streams. + // We put these heads within a nested array that represents urgency and + // incremental, as defined in + // https://www.rfc-editor.org/rfc/rfc9218.html#name-priority-parameters. + // 8 represents u=0 up to u=7, and 2 represents i=false and i=true. + heads [8][2]*writeQueue + + // streams contains a mapping between each stream ID and their metadata, so + // we can quickly locate them when needing to, for example, adjust their + // priority. + streams map[uint32]streamMetadata + + // queuePool are empty queues for reuse. + queuePool writeQueuePool + + // prioritizeIncremental is used to determine whether we should prioritize + // incremental streams or not, when urgency is the same in a given Pop() + // call. + prioritizeIncremental bool +} + +func newPriorityWriteSchedulerRFC9218() WriteScheduler { + ws := &priorityWriteSchedulerRFC9218{ + streams: make(map[uint32]streamMetadata), + } + return ws +} + +func (ws *priorityWriteSchedulerRFC9218) OpenStream(streamID uint32, opt OpenStreamOptions) { + if ws.streams[streamID].location != nil { + panic(fmt.Errorf("stream %d already opened", streamID)) + } + q := ws.queuePool.get() + ws.streams[streamID] = streamMetadata{ + location: q, + priority: opt.priority, + } + + u, i := opt.priority.urgency, opt.priority.incremental + if ws.heads[u][i] == nil { + ws.heads[u][i] = q + q.next = q + q.prev = q + } else { + // Queues are stored in a ring. + // Insert the new stream before ws.head, putting it at the end of the list. + q.prev = ws.heads[u][i].prev + q.next = ws.heads[u][i] + q.prev.next = q + q.next.prev = q + } +} + +func (ws *priorityWriteSchedulerRFC9218) CloseStream(streamID uint32) { + metadata := ws.streams[streamID] + q, u, i := metadata.location, metadata.priority.urgency, metadata.priority.incremental + if q == nil { + return + } + if q.next == q { + // This was the only open stream. + ws.heads[u][i] = nil + } else { + q.prev.next = q.next + q.next.prev = q.prev + if ws.heads[u][i] == q { + ws.heads[u][i] = q.next + } + } + delete(ws.streams, streamID) + ws.queuePool.put(q) +} + +func (ws *priorityWriteSchedulerRFC9218) AdjustStream(streamID uint32, priority PriorityParam) { + metadata := ws.streams[streamID] + q, u, i := metadata.location, metadata.priority.urgency, metadata.priority.incremental + if q == nil { + return + } + + // Remove stream from current location. + if q.next == q { + // This was the only open stream. + ws.heads[u][i] = nil + } else { + q.prev.next = q.next + q.next.prev = q.prev + if ws.heads[u][i] == q { + ws.heads[u][i] = q.next + } + } + + // Insert stream to the new queue. + u, i = priority.urgency, priority.incremental + if ws.heads[u][i] == nil { + ws.heads[u][i] = q + q.next = q + q.prev = q + } else { + // Queues are stored in a ring. + // Insert the new stream before ws.head, putting it at the end of the list. + q.prev = ws.heads[u][i].prev + q.next = ws.heads[u][i] + q.prev.next = q + q.next.prev = q + } + + // Update the metadata. + ws.streams[streamID] = streamMetadata{ + location: q, + priority: priority, + } +} + +func (ws *priorityWriteSchedulerRFC9218) Push(wr FrameWriteRequest) { + if wr.isControl() { + ws.control.push(wr) + return + } + q := ws.streams[wr.StreamID()].location + if q == nil { + // This is a closed stream. + // wr should not be a HEADERS or DATA frame. + // We push the request onto the control queue. + if wr.DataSize() > 0 { + panic("add DATA on non-open stream") + } + ws.control.push(wr) + return + } + q.push(wr) +} + +func (ws *priorityWriteSchedulerRFC9218) Pop() (FrameWriteRequest, bool) { + // Control and RST_STREAM frames first. + if !ws.control.empty() { + return ws.control.shift(), true + } + + // On the next Pop(), we want to prioritize incremental if we prioritized + // non-incremental request of the same urgency this time. Vice-versa. + // i.e. when there are incremental and non-incremental requests at the same + // priority, we give 50% of our bandwidth to the incremental ones in + // aggregate and 50% to the first non-incremental one (since + // non-incremental streams do not use round-robin writes). + ws.prioritizeIncremental = !ws.prioritizeIncremental + + // Always prioritize lowest u (i.e. highest urgency level). + for u := range ws.heads { + for i := range ws.heads[u] { + // When we want to prioritize incremental, we try to pop i=true + // first before i=false when u is the same. + if ws.prioritizeIncremental { + i = (i + 1) % 2 + } + q := ws.heads[u][i] + if q == nil { + continue + } + for { + if wr, ok := q.consume(math.MaxInt32); ok { + if i == 1 { + // For incremental streams, we update head to q.next so + // we can round-robin between multiple streams that can + // immediately benefit from partial writes. + ws.heads[u][i] = q.next + } else { + // For non-incremental streams, we try to finish one to + // completion rather than doing round-robin. However, + // we update head here so that if q.consume() is !ok + // (e.g. the stream has no more frame to consume), head + // is updated to the next q that has frames to consume + // on future iterations. This way, we do not prioritize + // writing to unavailable stream on next Pop() calls, + // preventing head-of-line blocking. + ws.heads[u][i] = q + } + return wr, true + } + q = q.next + if q == ws.heads[u][i] { + break + } + } + + } + } + return FrameWriteRequest{}, false +} diff --git a/vendor/golang.org/x/net/http2/writesched_roundrobin.go b/vendor/golang.org/x/net/http2/writesched_roundrobin.go index 54fe8632..737cff9e 100644 --- a/vendor/golang.org/x/net/http2/writesched_roundrobin.go +++ b/vendor/golang.org/x/net/http2/writesched_roundrobin.go @@ -25,7 +25,7 @@ type roundRobinWriteScheduler struct { } // newRoundRobinWriteScheduler constructs a new write scheduler. -// The round robin scheduler priorizes control frames +// The round robin scheduler prioritizes control frames // like SETTINGS and PING over DATA frames. // When there are no control frames to send, it performs a round-robin // selection from the ready streams. diff --git a/vendor/golang.org/x/net/internal/httpcommon/ascii.go b/vendor/golang.org/x/net/internal/httpcommon/ascii.go new file mode 100644 index 00000000..ed14da5a --- /dev/null +++ b/vendor/golang.org/x/net/internal/httpcommon/ascii.go @@ -0,0 +1,53 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package httpcommon + +import "strings" + +// The HTTP protocols are defined in terms of ASCII, not Unicode. This file +// contains helper functions which may use Unicode-aware functions which would +// otherwise be unsafe and could introduce vulnerabilities if used improperly. + +// asciiEqualFold is strings.EqualFold, ASCII only. It reports whether s and t +// are equal, ASCII-case-insensitively. +func asciiEqualFold(s, t string) bool { + if len(s) != len(t) { + return false + } + for i := 0; i < len(s); i++ { + if lower(s[i]) != lower(t[i]) { + return false + } + } + return true +} + +// lower returns the ASCII lowercase version of b. +func lower(b byte) byte { + if 'A' <= b && b <= 'Z' { + return b + ('a' - 'A') + } + return b +} + +// isASCIIPrint returns whether s is ASCII and printable according to +// https://tools.ietf.org/html/rfc20#section-4.2. +func isASCIIPrint(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] < ' ' || s[i] > '~' { + return false + } + } + return true +} + +// asciiToLower returns the lowercase version of s if s is ASCII and printable, +// and whether or not it was. +func asciiToLower(s string) (lower string, ok bool) { + if !isASCIIPrint(s) { + return "", false + } + return strings.ToLower(s), true +} diff --git a/vendor/golang.org/x/net/http2/headermap.go b/vendor/golang.org/x/net/internal/httpcommon/headermap.go similarity index 74% rename from vendor/golang.org/x/net/http2/headermap.go rename to vendor/golang.org/x/net/internal/httpcommon/headermap.go index 149b3dd2..92483d8e 100644 --- a/vendor/golang.org/x/net/http2/headermap.go +++ b/vendor/golang.org/x/net/internal/httpcommon/headermap.go @@ -1,11 +1,11 @@ -// Copyright 2014 The Go Authors. All rights reserved. +// Copyright 2025 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package http2 +package httpcommon import ( - "net/http" + "net/textproto" "sync" ) @@ -82,13 +82,15 @@ func buildCommonHeaderMaps() { commonLowerHeader = make(map[string]string, len(common)) commonCanonHeader = make(map[string]string, len(common)) for _, v := range common { - chk := http.CanonicalHeaderKey(v) + chk := textproto.CanonicalMIMEHeaderKey(v) commonLowerHeader[chk] = v commonCanonHeader[v] = chk } } -func lowerHeader(v string) (lower string, ascii bool) { +// LowerHeader returns the lowercase form of a header name, +// used on the wire for HTTP/2 and HTTP/3 requests. +func LowerHeader(v string) (lower string, ascii bool) { buildCommonHeaderMapsOnce() if s, ok := commonLowerHeader[v]; ok { return s, true @@ -96,10 +98,18 @@ func lowerHeader(v string) (lower string, ascii bool) { return asciiToLower(v) } -func canonicalHeader(v string) string { +// CanonicalHeader canonicalizes a header name. (For example, "host" becomes "Host".) +func CanonicalHeader(v string) string { buildCommonHeaderMapsOnce() if s, ok := commonCanonHeader[v]; ok { return s } - return http.CanonicalHeaderKey(v) + return textproto.CanonicalMIMEHeaderKey(v) +} + +// CachedCanonicalHeader returns the canonical form of a well-known header name. +func CachedCanonicalHeader(v string) (string, bool) { + buildCommonHeaderMapsOnce() + s, ok := commonCanonHeader[v] + return s, ok } diff --git a/vendor/golang.org/x/net/internal/httpcommon/request.go b/vendor/golang.org/x/net/internal/httpcommon/request.go new file mode 100644 index 00000000..1e10f89e --- /dev/null +++ b/vendor/golang.org/x/net/internal/httpcommon/request.go @@ -0,0 +1,467 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package httpcommon + +import ( + "context" + "errors" + "fmt" + "net/http/httptrace" + "net/textproto" + "net/url" + "sort" + "strconv" + "strings" + + "golang.org/x/net/http/httpguts" + "golang.org/x/net/http2/hpack" +) + +var ( + ErrRequestHeaderListSize = errors.New("request header list larger than peer's advertised limit") +) + +// Request is a subset of http.Request. +// It'd be simpler to pass an *http.Request, of course, but we can't depend on net/http +// without creating a dependency cycle. +type Request struct { + URL *url.URL + Method string + Host string + Header map[string][]string + Trailer map[string][]string + ActualContentLength int64 // 0 means 0, -1 means unknown +} + +// EncodeHeadersParam is parameters to EncodeHeaders. +type EncodeHeadersParam struct { + Request Request + + // AddGzipHeader indicates that an "accept-encoding: gzip" header should be + // added to the request. + AddGzipHeader bool + + // PeerMaxHeaderListSize, when non-zero, is the peer's MAX_HEADER_LIST_SIZE setting. + PeerMaxHeaderListSize uint64 + + // DefaultUserAgent is the User-Agent header to send when the request + // neither contains a User-Agent nor disables it. + DefaultUserAgent string +} + +// EncodeHeadersResult is the result of EncodeHeaders. +type EncodeHeadersResult struct { + HasBody bool + HasTrailers bool +} + +// EncodeHeaders constructs request headers common to HTTP/2 and HTTP/3. +// It validates a request and calls headerf with each pseudo-header and header +// for the request. +// The headerf function is called with the validated, canonicalized header name. +func EncodeHeaders(ctx context.Context, param EncodeHeadersParam, headerf func(name, value string)) (res EncodeHeadersResult, _ error) { + req := param.Request + + // Check for invalid connection-level headers. + if err := checkConnHeaders(req.Header); err != nil { + return res, err + } + + if req.URL == nil { + return res, errors.New("Request.URL is nil") + } + + host := req.Host + if host == "" { + host = req.URL.Host + } + host, err := httpguts.PunycodeHostPort(host) + if err != nil { + return res, err + } + if !httpguts.ValidHostHeader(host) { + return res, errors.New("invalid Host header") + } + + // isNormalConnect is true if this is a non-extended CONNECT request. + isNormalConnect := false + var protocol string + if vv := req.Header[":protocol"]; len(vv) > 0 { + protocol = vv[0] + } + if req.Method == "CONNECT" && protocol == "" { + isNormalConnect = true + } else if protocol != "" && req.Method != "CONNECT" { + return res, errors.New("invalid :protocol header in non-CONNECT request") + } + + // Validate the path, except for non-extended CONNECT requests which have no path. + var path string + if !isNormalConnect { + path = req.URL.RequestURI() + if !validPseudoPath(path) { + orig := path + path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host) + if !validPseudoPath(path) { + if req.URL.Opaque != "" { + return res, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque) + } else { + return res, fmt.Errorf("invalid request :path %q", orig) + } + } + } + } + + // Check for any invalid headers+trailers and return an error before we + // potentially pollute our hpack state. (We want to be able to + // continue to reuse the hpack encoder for future requests) + if err := validateHeaders(req.Header); err != "" { + return res, fmt.Errorf("invalid HTTP header %s", err) + } + if err := validateHeaders(req.Trailer); err != "" { + return res, fmt.Errorf("invalid HTTP trailer %s", err) + } + + trailers, err := commaSeparatedTrailers(req.Trailer) + if err != nil { + return res, err + } + + enumerateHeaders := func(f func(name, value string)) { + // 8.1.2.3 Request Pseudo-Header Fields + // The :path pseudo-header field includes the path and query parts of the + // target URI (the path-absolute production and optionally a '?' character + // followed by the query production, see Sections 3.3 and 3.4 of + // [RFC3986]). + f(":authority", host) + m := req.Method + if m == "" { + m = "GET" + } + f(":method", m) + if !isNormalConnect { + f(":path", path) + f(":scheme", req.URL.Scheme) + } + if protocol != "" { + f(":protocol", protocol) + } + if trailers != "" { + f("trailer", trailers) + } + + var didUA bool + for k, vv := range req.Header { + if asciiEqualFold(k, "host") || asciiEqualFold(k, "content-length") { + // Host is :authority, already sent. + // Content-Length is automatic, set below. + continue + } else if asciiEqualFold(k, "connection") || + asciiEqualFold(k, "proxy-connection") || + asciiEqualFold(k, "transfer-encoding") || + asciiEqualFold(k, "upgrade") || + asciiEqualFold(k, "keep-alive") { + // Per 8.1.2.2 Connection-Specific Header + // Fields, don't send connection-specific + // fields. We have already checked if any + // are error-worthy so just ignore the rest. + continue + } else if asciiEqualFold(k, "user-agent") { + // Match Go's http1 behavior: at most one + // User-Agent. If set to nil or empty string, + // then omit it. Otherwise if not mentioned, + // include the default (below). + didUA = true + if len(vv) < 1 { + continue + } + vv = vv[:1] + if vv[0] == "" { + continue + } + } else if asciiEqualFold(k, "cookie") { + // Per 8.1.2.5 To allow for better compression efficiency, the + // Cookie header field MAY be split into separate header fields, + // each with one or more cookie-pairs. + for _, v := range vv { + for { + p := strings.IndexByte(v, ';') + if p < 0 { + break + } + f("cookie", v[:p]) + p++ + // strip space after semicolon if any. + for p+1 <= len(v) && v[p] == ' ' { + p++ + } + v = v[p:] + } + if len(v) > 0 { + f("cookie", v) + } + } + continue + } else if k == ":protocol" { + // :protocol pseudo-header was already sent above. + continue + } + + for _, v := range vv { + f(k, v) + } + } + if shouldSendReqContentLength(req.Method, req.ActualContentLength) { + f("content-length", strconv.FormatInt(req.ActualContentLength, 10)) + } + if param.AddGzipHeader { + f("accept-encoding", "gzip") + } + if !didUA { + f("user-agent", param.DefaultUserAgent) + } + } + + // Do a first pass over the headers counting bytes to ensure + // we don't exceed cc.peerMaxHeaderListSize. This is done as a + // separate pass before encoding the headers to prevent + // modifying the hpack state. + if param.PeerMaxHeaderListSize > 0 { + hlSize := uint64(0) + enumerateHeaders(func(name, value string) { + hf := hpack.HeaderField{Name: name, Value: value} + hlSize += uint64(hf.Size()) + }) + + if hlSize > param.PeerMaxHeaderListSize { + return res, ErrRequestHeaderListSize + } + } + + trace := httptrace.ContextClientTrace(ctx) + + // Header list size is ok. Write the headers. + enumerateHeaders(func(name, value string) { + name, ascii := LowerHeader(name) + if !ascii { + // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header + // field names have to be ASCII characters (just as in HTTP/1.x). + return + } + + headerf(name, value) + + if trace != nil && trace.WroteHeaderField != nil { + trace.WroteHeaderField(name, []string{value}) + } + }) + + res.HasBody = req.ActualContentLength != 0 + res.HasTrailers = trailers != "" + return res, nil +} + +// IsRequestGzip reports whether we should add an Accept-Encoding: gzip header +// for a request. +func IsRequestGzip(method string, header map[string][]string, disableCompression bool) bool { + // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? + if !disableCompression && + len(header["Accept-Encoding"]) == 0 && + len(header["Range"]) == 0 && + method != "HEAD" { + // Request gzip only, not deflate. Deflate is ambiguous and + // not as universally supported anyway. + // See: https://zlib.net/zlib_faq.html#faq39 + // + // Note that we don't request this for HEAD requests, + // due to a bug in nginx: + // http://trac.nginx.org/nginx/ticket/358 + // https://golang.org/issue/5522 + // + // We don't request gzip if the request is for a range, since + // auto-decoding a portion of a gzipped document will just fail + // anyway. See https://golang.org/issue/8923 + return true + } + return false +} + +// checkConnHeaders checks whether req has any invalid connection-level headers. +// +// https://www.rfc-editor.org/rfc/rfc9114.html#section-4.2-3 +// https://www.rfc-editor.org/rfc/rfc9113.html#section-8.2.2-1 +// +// Certain headers are special-cased as okay but not transmitted later. +// For example, we allow "Transfer-Encoding: chunked", but drop the header when encoding. +func checkConnHeaders(h map[string][]string) error { + if vv := h["Upgrade"]; len(vv) > 0 && (vv[0] != "" && vv[0] != "chunked") { + return fmt.Errorf("invalid Upgrade request header: %q", vv) + } + if vv := h["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { + return fmt.Errorf("invalid Transfer-Encoding request header: %q", vv) + } + if vv := h["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !asciiEqualFold(vv[0], "close") && !asciiEqualFold(vv[0], "keep-alive")) { + return fmt.Errorf("invalid Connection request header: %q", vv) + } + return nil +} + +func commaSeparatedTrailers(trailer map[string][]string) (string, error) { + keys := make([]string, 0, len(trailer)) + for k := range trailer { + k = CanonicalHeader(k) + switch k { + case "Transfer-Encoding", "Trailer", "Content-Length": + return "", fmt.Errorf("invalid Trailer key %q", k) + } + keys = append(keys, k) + } + if len(keys) > 0 { + sort.Strings(keys) + return strings.Join(keys, ","), nil + } + return "", nil +} + +// validPseudoPath reports whether v is a valid :path pseudo-header +// value. It must be either: +// +// - a non-empty string starting with '/' +// - the string '*', for OPTIONS requests. +// +// For now this is only used a quick check for deciding when to clean +// up Opaque URLs before sending requests from the Transport. +// See golang.org/issue/16847 +// +// We used to enforce that the path also didn't start with "//", but +// Google's GFE accepts such paths and Chrome sends them, so ignore +// that part of the spec. See golang.org/issue/19103. +func validPseudoPath(v string) bool { + return (len(v) > 0 && v[0] == '/') || v == "*" +} + +func validateHeaders(hdrs map[string][]string) string { + for k, vv := range hdrs { + if !httpguts.ValidHeaderFieldName(k) && k != ":protocol" { + return fmt.Sprintf("name %q", k) + } + for _, v := range vv { + if !httpguts.ValidHeaderFieldValue(v) { + // Don't include the value in the error, + // because it may be sensitive. + return fmt.Sprintf("value for header %q", k) + } + } + } + return "" +} + +// shouldSendReqContentLength reports whether we should send +// a "content-length" request header. This logic is basically a copy of the net/http +// transferWriter.shouldSendContentLength. +// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown). +// -1 means unknown. +func shouldSendReqContentLength(method string, contentLength int64) bool { + if contentLength > 0 { + return true + } + if contentLength < 0 { + return false + } + // For zero bodies, whether we send a content-length depends on the method. + // It also kinda doesn't matter for http2 either way, with END_STREAM. + switch method { + case "POST", "PUT", "PATCH": + return true + default: + return false + } +} + +// ServerRequestParam is parameters to NewServerRequest. +type ServerRequestParam struct { + Method string + Scheme, Authority, Path string + Protocol string + Header map[string][]string +} + +// ServerRequestResult is the result of NewServerRequest. +type ServerRequestResult struct { + // Various http.Request fields. + URL *url.URL + RequestURI string + Trailer map[string][]string + + NeedsContinue bool // client provided an "Expect: 100-continue" header + + // If the request should be rejected, this is a short string suitable for passing + // to the http2 package's CountError function. + // It might be a bit odd to return errors this way rather than returning an error, + // but this ensures we don't forget to include a CountError reason. + InvalidReason string +} + +func NewServerRequest(rp ServerRequestParam) ServerRequestResult { + needsContinue := httpguts.HeaderValuesContainsToken(rp.Header["Expect"], "100-continue") + if needsContinue { + delete(rp.Header, "Expect") + } + // Merge Cookie headers into one "; "-delimited value. + if cookies := rp.Header["Cookie"]; len(cookies) > 1 { + rp.Header["Cookie"] = []string{strings.Join(cookies, "; ")} + } + + // Setup Trailers + var trailer map[string][]string + for _, v := range rp.Header["Trailer"] { + for _, key := range strings.Split(v, ",") { + key = textproto.CanonicalMIMEHeaderKey(textproto.TrimString(key)) + switch key { + case "Transfer-Encoding", "Trailer", "Content-Length": + // Bogus. (copy of http1 rules) + // Ignore. + default: + if trailer == nil { + trailer = make(map[string][]string) + } + trailer[key] = nil + } + } + } + delete(rp.Header, "Trailer") + + // "':authority' MUST NOT include the deprecated userinfo subcomponent + // for "http" or "https" schemed URIs." + // https://www.rfc-editor.org/rfc/rfc9113.html#section-8.3.1-2.3.8 + if strings.IndexByte(rp.Authority, '@') != -1 && (rp.Scheme == "http" || rp.Scheme == "https") { + return ServerRequestResult{ + InvalidReason: "userinfo_in_authority", + } + } + + var url_ *url.URL + var requestURI string + if rp.Method == "CONNECT" && rp.Protocol == "" { + url_ = &url.URL{Host: rp.Authority} + requestURI = rp.Authority // mimic HTTP/1 server behavior + } else { + var err error + url_, err = url.ParseRequestURI(rp.Path) + if err != nil { + return ServerRequestResult{ + InvalidReason: "bad_path", + } + } + requestURI = rp.Path + } + + return ServerRequestResult{ + URL: url_, + NeedsContinue: needsContinue, + RequestURI: requestURI, + Trailer: trailer, + } +} diff --git a/vendor/golang.org/x/sync/LICENSE b/vendor/golang.org/x/sync/LICENSE new file mode 100644 index 00000000..2a7cf70d --- /dev/null +++ b/vendor/golang.org/x/sync/LICENSE @@ -0,0 +1,27 @@ +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/sync/PATENTS b/vendor/golang.org/x/sync/PATENTS new file mode 100644 index 00000000..73309904 --- /dev/null +++ b/vendor/golang.org/x/sync/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go new file mode 100644 index 00000000..f69fd754 --- /dev/null +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -0,0 +1,151 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package errgroup provides synchronization, error propagation, and Context +// cancellation for groups of goroutines working on subtasks of a common task. +// +// [errgroup.Group] is related to [sync.WaitGroup] but adds handling of tasks +// returning errors. +package errgroup + +import ( + "context" + "fmt" + "sync" +) + +type token struct{} + +// A Group is a collection of goroutines working on subtasks that are part of +// the same overall task. A Group should not be reused for different tasks. +// +// A zero Group is valid, has no limit on the number of active goroutines, +// and does not cancel on error. +type Group struct { + cancel func(error) + + wg sync.WaitGroup + + sem chan token + + errOnce sync.Once + err error +} + +func (g *Group) done() { + if g.sem != nil { + <-g.sem + } + g.wg.Done() +} + +// WithContext returns a new Group and an associated Context derived from ctx. +// +// The derived Context is canceled the first time a function passed to Go +// returns a non-nil error or the first time Wait returns, whichever occurs +// first. +func WithContext(ctx context.Context) (*Group, context.Context) { + ctx, cancel := context.WithCancelCause(ctx) + return &Group{cancel: cancel}, ctx +} + +// Wait blocks until all function calls from the Go method have returned, then +// returns the first non-nil error (if any) from them. +func (g *Group) Wait() error { + g.wg.Wait() + if g.cancel != nil { + g.cancel(g.err) + } + return g.err +} + +// Go calls the given function in a new goroutine. +// +// The first call to Go must happen before a Wait. +// It blocks until the new goroutine can be added without the number of +// goroutines in the group exceeding the configured limit. +// +// The first goroutine in the group that returns a non-nil error will +// cancel the associated Context, if any. The error will be returned +// by Wait. +func (g *Group) Go(f func() error) { + if g.sem != nil { + g.sem <- token{} + } + + g.wg.Add(1) + go func() { + defer g.done() + + // It is tempting to propagate panics from f() + // up to the goroutine that calls Wait, but + // it creates more problems than it solves: + // - it delays panics arbitrarily, + // making bugs harder to detect; + // - it turns f's panic stack into a mere value, + // hiding it from crash-monitoring tools; + // - it risks deadlocks that hide the panic entirely, + // if f's panic leaves the program in a state + // that prevents the Wait call from being reached. + // See #53757, #74275, #74304, #74306. + + if err := f(); err != nil { + g.errOnce.Do(func() { + g.err = err + if g.cancel != nil { + g.cancel(g.err) + } + }) + } + }() +} + +// TryGo calls the given function in a new goroutine only if the number of +// active goroutines in the group is currently below the configured limit. +// +// The return value reports whether the goroutine was started. +func (g *Group) TryGo(f func() error) bool { + if g.sem != nil { + select { + case g.sem <- token{}: + // Note: this allows barging iff channels in general allow barging. + default: + return false + } + } + + g.wg.Add(1) + go func() { + defer g.done() + + if err := f(); err != nil { + g.errOnce.Do(func() { + g.err = err + if g.cancel != nil { + g.cancel(g.err) + } + }) + } + }() + return true +} + +// SetLimit limits the number of active goroutines in this group to at most n. +// A negative value indicates no limit. +// A limit of zero will prevent any new goroutines from being added. +// +// Any subsequent call to the Go method will block until it can add an active +// goroutine without exceeding the configured limit. +// +// The limit must not be modified while any goroutines in the group are active. +func (g *Group) SetLimit(n int) { + if n < 0 { + g.sem = nil + return + } + if active := len(g.sem); active != 0 { + panic(fmt.Errorf("errgroup: modify limit while %v goroutines in the group are still active", active)) + } + g.sem = make(chan token, n) +} diff --git a/vendor/golang.org/x/sys/LICENSE b/vendor/golang.org/x/sys/LICENSE index 6a66aea5..2a7cf70d 100644 --- a/vendor/golang.org/x/sys/LICENSE +++ b/vendor/golang.org/x/sys/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/sys/unix/README.md b/vendor/golang.org/x/sys/unix/README.md index 7d3c060e..6e08a76a 100644 --- a/vendor/golang.org/x/sys/unix/README.md +++ b/vendor/golang.org/x/sys/unix/README.md @@ -156,7 +156,7 @@ from the generated architecture-specific files listed below, and merge these into a common file for each OS. The merge is performed in the following steps: -1. Construct the set of common code that is idential in all architecture-specific files. +1. Construct the set of common code that is identical in all architecture-specific files. 2. Write this common code to the merged file. 3. Remove the common code from all architecture-specific files. diff --git a/vendor/golang.org/x/sys/unix/affinity_linux.go b/vendor/golang.org/x/sys/unix/affinity_linux.go index 6e5c81ac..3ea47038 100644 --- a/vendor/golang.org/x/sys/unix/affinity_linux.go +++ b/vendor/golang.org/x/sys/unix/affinity_linux.go @@ -38,8 +38,15 @@ func SchedSetaffinity(pid int, set *CPUSet) error { // Zero clears the set s, so that it contains no CPUs. func (s *CPUSet) Zero() { + clear(s[:]) +} + +// Fill adds all possible CPU bits to the set s. On Linux, [SchedSetaffinity] +// will silently ignore any invalid CPU bits in [CPUSet] so this is an +// efficient way of resetting the CPU affinity of a process. +func (s *CPUSet) Fill() { for i := range s { - s[i] = 0 + s[i] = ^cpuMask(0) } } diff --git a/vendor/golang.org/x/sys/unix/auxv.go b/vendor/golang.org/x/sys/unix/auxv.go new file mode 100644 index 00000000..37a82528 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/auxv.go @@ -0,0 +1,36 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.21 && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) + +package unix + +import ( + "syscall" + "unsafe" +) + +//go:linkname runtime_getAuxv runtime.getAuxv +func runtime_getAuxv() []uintptr + +// Auxv returns the ELF auxiliary vector as a sequence of key/value pairs. +// The returned slice is always a fresh copy, owned by the caller. +// It returns an error on non-ELF platforms, or if the auxiliary vector cannot be accessed, +// which happens in some locked-down environments and build modes. +func Auxv() ([][2]uintptr, error) { + vec := runtime_getAuxv() + vecLen := len(vec) + + if vecLen == 0 { + return nil, syscall.ENOENT + } + + if vecLen%2 != 0 { + return nil, syscall.EINVAL + } + + result := make([]uintptr, vecLen) + copy(result, vec) + return unsafe.Slice((*[2]uintptr)(unsafe.Pointer(&result[0])), vecLen/2), nil +} diff --git a/vendor/golang.org/x/sys/unix/auxv_unsupported.go b/vendor/golang.org/x/sys/unix/auxv_unsupported.go new file mode 100644 index 00000000..1200487f --- /dev/null +++ b/vendor/golang.org/x/sys/unix/auxv_unsupported.go @@ -0,0 +1,13 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.21 && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) + +package unix + +import "syscall" + +func Auxv() ([][2]uintptr, error) { + return nil, syscall.ENOTSUP +} diff --git a/vendor/golang.org/x/sys/unix/fdset.go b/vendor/golang.org/x/sys/unix/fdset.go index 9e83d18c..62ed1264 100644 --- a/vendor/golang.org/x/sys/unix/fdset.go +++ b/vendor/golang.org/x/sys/unix/fdset.go @@ -23,7 +23,5 @@ func (fds *FdSet) IsSet(fd int) bool { // Zero clears the set fds. func (fds *FdSet) Zero() { - for i := range fds.Bits { - fds.Bits[i] = 0 - } + clear(fds.Bits[:]) } diff --git a/vendor/golang.org/x/sys/unix/ifreq_linux.go b/vendor/golang.org/x/sys/unix/ifreq_linux.go index 848840ae..309f5a2b 100644 --- a/vendor/golang.org/x/sys/unix/ifreq_linux.go +++ b/vendor/golang.org/x/sys/unix/ifreq_linux.go @@ -111,9 +111,7 @@ func (ifr *Ifreq) SetUint32(v uint32) { // clear zeroes the ifreq's union field to prevent trailing garbage data from // being sent to the kernel if an ifreq is reused. func (ifr *Ifreq) clear() { - for i := range ifr.raw.Ifru { - ifr.raw.Ifru[i] = 0 - } + clear(ifr.raw.Ifru[:]) } // TODO(mdlayher): export as IfreqData? For now we can provide helpers such as diff --git a/vendor/golang.org/x/sys/unix/ioctl_linux.go b/vendor/golang.org/x/sys/unix/ioctl_linux.go index dbe680ea..7ca4fa12 100644 --- a/vendor/golang.org/x/sys/unix/ioctl_linux.go +++ b/vendor/golang.org/x/sys/unix/ioctl_linux.go @@ -58,6 +58,102 @@ func IoctlGetEthtoolDrvinfo(fd int, ifname string) (*EthtoolDrvinfo, error) { return &value, err } +// IoctlGetEthtoolTsInfo fetches ethtool timestamping and PHC +// association for the network device specified by ifname. +func IoctlGetEthtoolTsInfo(fd int, ifname string) (*EthtoolTsInfo, error) { + ifr, err := NewIfreq(ifname) + if err != nil { + return nil, err + } + + value := EthtoolTsInfo{Cmd: ETHTOOL_GET_TS_INFO} + ifrd := ifr.withData(unsafe.Pointer(&value)) + + err = ioctlIfreqData(fd, SIOCETHTOOL, &ifrd) + return &value, err +} + +// IoctlGetHwTstamp retrieves the hardware timestamping configuration +// for the network device specified by ifname. +func IoctlGetHwTstamp(fd int, ifname string) (*HwTstampConfig, error) { + ifr, err := NewIfreq(ifname) + if err != nil { + return nil, err + } + + value := HwTstampConfig{} + ifrd := ifr.withData(unsafe.Pointer(&value)) + + err = ioctlIfreqData(fd, SIOCGHWTSTAMP, &ifrd) + return &value, err +} + +// IoctlSetHwTstamp updates the hardware timestamping configuration for +// the network device specified by ifname. +func IoctlSetHwTstamp(fd int, ifname string, cfg *HwTstampConfig) error { + ifr, err := NewIfreq(ifname) + if err != nil { + return err + } + ifrd := ifr.withData(unsafe.Pointer(cfg)) + return ioctlIfreqData(fd, SIOCSHWTSTAMP, &ifrd) +} + +// FdToClockID derives the clock ID from the file descriptor number +// - see clock_gettime(3), FD_TO_CLOCKID macros. The resulting ID is +// suitable for system calls like ClockGettime. +func FdToClockID(fd int) int32 { return int32((int(^fd) << 3) | 3) } + +// IoctlPtpClockGetcaps returns the description of a given PTP device. +func IoctlPtpClockGetcaps(fd int) (*PtpClockCaps, error) { + var value PtpClockCaps + err := ioctlPtr(fd, PTP_CLOCK_GETCAPS2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpSysOffsetPrecise returns a description of the clock +// offset compared to the system clock. +func IoctlPtpSysOffsetPrecise(fd int) (*PtpSysOffsetPrecise, error) { + var value PtpSysOffsetPrecise + err := ioctlPtr(fd, PTP_SYS_OFFSET_PRECISE2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpSysOffsetExtended returns an extended description of the +// clock offset compared to the system clock. The samples parameter +// specifies the desired number of measurements. +func IoctlPtpSysOffsetExtended(fd int, samples uint) (*PtpSysOffsetExtended, error) { + value := PtpSysOffsetExtended{Samples: uint32(samples)} + err := ioctlPtr(fd, PTP_SYS_OFFSET_EXTENDED2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpPinGetfunc returns the configuration of the specified +// I/O pin on given PTP device. +func IoctlPtpPinGetfunc(fd int, index uint) (*PtpPinDesc, error) { + value := PtpPinDesc{Index: uint32(index)} + err := ioctlPtr(fd, PTP_PIN_GETFUNC2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpPinSetfunc updates configuration of the specified PTP +// I/O pin. +func IoctlPtpPinSetfunc(fd int, pd *PtpPinDesc) error { + return ioctlPtr(fd, PTP_PIN_SETFUNC2, unsafe.Pointer(pd)) +} + +// IoctlPtpPeroutRequest configures the periodic output mode of the +// PTP I/O pins. +func IoctlPtpPeroutRequest(fd int, r *PtpPeroutRequest) error { + return ioctlPtr(fd, PTP_PEROUT_REQUEST2, unsafe.Pointer(r)) +} + +// IoctlPtpExttsRequest configures the external timestamping mode +// of the PTP I/O pins. +func IoctlPtpExttsRequest(fd int, r *PtpExttsRequest) error { + return ioctlPtr(fd, PTP_EXTTS_REQUEST2, unsafe.Pointer(r)) +} + // IoctlGetWatchdogInfo fetches information about a watchdog device from the // Linux watchdog API. For more information, see: // https://www.kernel.org/doc/html/latest/watchdog/watchdog-api.html. diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh index e6f31d37..d0ed6119 100644 --- a/vendor/golang.org/x/sys/unix/mkall.sh +++ b/vendor/golang.org/x/sys/unix/mkall.sh @@ -49,6 +49,7 @@ esac if [[ "$GOOS" = "linux" ]]; then # Use the Docker-based build system # Files generated through docker (use $cmd so you can Ctl-C the build or run) + set -e $cmd docker build --tag generate:$GOOS $GOOS $cmd docker run --interactive --tty --volume $(cd -- "$(dirname -- "$0")/.." && pwd):/build generate:$GOOS exit diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 4ed2e488..fd39be4e 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -58,6 +58,7 @@ includes_Darwin=' #define _DARWIN_USE_64_BIT_INODE #define __APPLE_USE_RFC_3542 #include +#include #include #include #include @@ -157,6 +158,16 @@ includes_Linux=' #endif #define _GNU_SOURCE +// See the description in unix/linux/types.go +#if defined(__ARM_EABI__) || \ + (defined(__mips__) && (_MIPS_SIM == _ABIO32)) || \ + (defined(__powerpc__) && (!defined(__powerpc64__))) +# ifdef _TIME_BITS +# undef _TIME_BITS +# endif +# define _TIME_BITS 32 +#endif + // is broken on powerpc64, as it fails to include definitions of // these structures. We just include them copied from . #if defined(__powerpc__) @@ -215,6 +226,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -244,6 +256,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -255,6 +268,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -337,6 +351,9 @@ struct ltchars { #define _HIDIOCGRAWPHYS HIDIOCGRAWPHYS(_HIDIOCGRAWPHYS_LEN) #define _HIDIOCGRAWUNIQ HIDIOCGRAWUNIQ(_HIDIOCGRAWUNIQ_LEN) +// Renamed in v6.16, commit c6d732c38f93 ("net: ethtool: remove duplicate defines for family info") +#define ETHTOOL_FAMILY_NAME ETHTOOL_GENL_NAME +#define ETHTOOL_FAMILY_VERSION ETHTOOL_GENL_VERSION ' includes_NetBSD=' @@ -514,6 +531,7 @@ ccflags="$@" $2 ~ /^O[CNPFPL][A-Z]+[^_][A-Z]+$/ || $2 ~ /^(NL|CR|TAB|BS|VT|FF)DLY$/ || $2 ~ /^(NL|CR|TAB|BS|VT|FF)[0-9]$/ || + $2 ~ /^(DT|EI|ELF|EV|NN|NT|PF|SHF|SHN|SHT|STB|STT|VER)_/ || $2 ~ /^O?XTABS$/ || $2 ~ /^TC[IO](ON|OFF)$/ || $2 ~ /^IN_/ || @@ -526,6 +544,7 @@ ccflags="$@" $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MREMAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL|TCPOPT|UDP)_/ || $2 ~ /^NFC_(GENL|PROTO|COMM|RF|SE|DIRECTION|LLCP|SOCKPROTO)_/ || $2 ~ /^NFC_.*_(MAX)?SIZE$/ || + $2 ~ /^PTP_/ || $2 ~ /^RAW_PAYLOAD_/ || $2 ~ /^[US]F_/ || $2 ~ /^TP_STATUS_/ || @@ -551,6 +570,7 @@ ccflags="$@" $2 !~ /^RTC_VL_(ACCURACY|BACKUP|DATA)/ && $2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTC|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P|NETNSA)_/ || $2 ~ /^SOCK_|SK_DIAG_|SKNLGRP_$/ || + $2 ~ /^(CONNECT|SAE)_/ || $2 ~ /^FIORDCHK$/ || $2 ~ /^SIOC/ || $2 ~ /^TIOC/ || @@ -594,7 +614,7 @@ ccflags="$@" $2 !~ /IOC_MAGIC/ && $2 ~ /^[A-Z][A-Z0-9_]+_MAGIC2?$/ || $2 ~ /^(VM|VMADDR)_/ || - $2 ~ /^IOCTL_VM_SOCKETS_/ || + $2 ~ /^(IOCTL_VM_SOCKETS_|IOCTL_MEI_)/ || $2 ~ /^(TASKSTATS|TS)_/ || $2 ~ /^CGROUPSTATS_/ || $2 ~ /^GENL_/ || @@ -654,7 +674,7 @@ errors=$( signals=$( echo '#include ' | $CC -x c - -E -dM $ccflags | awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print $2 }' | - grep -v 'SIGSTKSIZE\|SIGSTKSZ\|SIGRT\|SIGMAX64' | + grep -E -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' | sort ) @@ -664,7 +684,7 @@ echo '#include ' | $CC -x c - -E -dM $ccflags | sort >_error.grep echo '#include ' | $CC -x c - -E -dM $ccflags | awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print "^\t" $2 "[ \t]*=" }' | - grep -v 'SIGSTKSIZE\|SIGSTKSZ\|SIGRT\|SIGMAX64' | + grep -E -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' | sort >_signal.grep echo '// mkerrors.sh' "$@" diff --git a/vendor/golang.org/x/sys/unix/mremap.go b/vendor/golang.org/x/sys/unix/mremap.go index fd45fe52..3a5e776f 100644 --- a/vendor/golang.org/x/sys/unix/mremap.go +++ b/vendor/golang.org/x/sys/unix/mremap.go @@ -50,3 +50,8 @@ func (m *mremapMmapper) Mremap(oldData []byte, newLength int, flags int) (data [ func Mremap(oldData []byte, newLength int, flags int) (data []byte, err error) { return mapper.Mremap(oldData, newLength, flags) } + +func MremapPtr(oldAddr unsafe.Pointer, oldSize uintptr, newAddr unsafe.Pointer, newSize uintptr, flags int) (ret unsafe.Pointer, err error) { + xaddr, err := mapper.mremap(uintptr(oldAddr), oldSize, newSize, flags, uintptr(newAddr)) + return unsafe.Pointer(xaddr), err +} diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index 67ce6cef..6f15ba1e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -360,7 +360,7 @@ func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, var status _C_int var r Pid_t err = ERESTART - // AIX wait4 may return with ERESTART errno, while the processus is still + // AIX wait4 may return with ERESTART errno, while the process is still // active. for err == ERESTART { r, err = wait4(Pid_t(pid), &status, options, rusage) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 59542a89..7838ca5d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -402,6 +402,18 @@ func IoctlSetIfreqMTU(fd int, ifreq *IfreqMTU) error { return ioctlPtr(fd, SIOCSIFMTU, unsafe.Pointer(ifreq)) } +//sys renamexNp(from string, to string, flag uint32) (err error) + +func RenamexNp(from string, to string, flag uint32) (err error) { + return renamexNp(from, to, flag) +} + +//sys renameatxNp(fromfd int, from string, tofd int, to string, flag uint32) (err error) + +func RenameatxNp(fromfd int, from string, tofd int, to string, flag uint32) (err error) { + return renameatxNp(fromfd, from, tofd, to, flag) +} + //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS_SYSCTL func Uname(uname *Utsname) error { @@ -542,6 +554,144 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { } } +//sys pthread_chdir_np(path string) (err error) + +func PthreadChdir(path string) (err error) { + return pthread_chdir_np(path) +} + +//sys pthread_fchdir_np(fd int) (err error) + +func PthreadFchdir(fd int) (err error) { + return pthread_fchdir_np(fd) +} + +// Connectx calls connectx(2) to initiate a connection on a socket. +// +// srcIf, srcAddr, and dstAddr are filled into a [SaEndpoints] struct and passed as the endpoints argument. +// +// - srcIf is the optional source interface index. 0 means unspecified. +// - srcAddr is the optional source address. nil means unspecified. +// - dstAddr is the destination address. +// +// On success, Connectx returns the number of bytes enqueued for transmission. +func Connectx(fd int, srcIf uint32, srcAddr, dstAddr Sockaddr, associd SaeAssocID, flags uint32, iov []Iovec, connid *SaeConnID) (n uintptr, err error) { + endpoints := SaEndpoints{ + Srcif: srcIf, + } + + if srcAddr != nil { + addrp, addrlen, err := srcAddr.sockaddr() + if err != nil { + return 0, err + } + endpoints.Srcaddr = (*RawSockaddr)(addrp) + endpoints.Srcaddrlen = uint32(addrlen) + } + + if dstAddr != nil { + addrp, addrlen, err := dstAddr.sockaddr() + if err != nil { + return 0, err + } + endpoints.Dstaddr = (*RawSockaddr)(addrp) + endpoints.Dstaddrlen = uint32(addrlen) + } + + err = connectx(fd, &endpoints, associd, flags, iov, &n, connid) + return +} + +const minIovec = 8 + +func Readv(fd int, iovs [][]byte) (n int, err error) { + iovecs := make([]Iovec, 0, minIovec) + iovecs = appendBytes(iovecs, iovs) + n, err = readv(fd, iovecs) + readvRacedetect(iovecs, n, err) + return n, err +} + +func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) { + iovecs := make([]Iovec, 0, minIovec) + iovecs = appendBytes(iovecs, iovs) + n, err = preadv(fd, iovecs, offset) + readvRacedetect(iovecs, n, err) + return n, err +} + +func Writev(fd int, iovs [][]byte) (n int, err error) { + iovecs := make([]Iovec, 0, minIovec) + iovecs = appendBytes(iovecs, iovs) + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + n, err = writev(fd, iovecs) + writevRacedetect(iovecs, n) + return n, err +} + +func Pwritev(fd int, iovs [][]byte, offset int64) (n int, err error) { + iovecs := make([]Iovec, 0, minIovec) + iovecs = appendBytes(iovecs, iovs) + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + n, err = pwritev(fd, iovecs, offset) + writevRacedetect(iovecs, n) + return n, err +} + +func appendBytes(vecs []Iovec, bs [][]byte) []Iovec { + for _, b := range bs { + var v Iovec + v.SetLen(len(b)) + if len(b) > 0 { + v.Base = &b[0] + } else { + v.Base = (*byte)(unsafe.Pointer(&_zero)) + } + vecs = append(vecs, v) + } + return vecs +} + +func writevRacedetect(iovecs []Iovec, n int) { + if !raceenabled { + return + } + for i := 0; n > 0 && i < len(iovecs); i++ { + m := int(iovecs[i].Len) + if m > n { + m = n + } + n -= m + if m > 0 { + raceReadRange(unsafe.Pointer(iovecs[i].Base), m) + } + } +} + +func readvRacedetect(iovecs []Iovec, n int, err error) { + if !raceenabled { + return + } + for i := 0; n > 0 && i < len(iovecs); i++ { + m := int(iovecs[i].Len) + if m > n { + m = n + } + n -= m + if m > 0 { + raceWriteRange(unsafe.Pointer(iovecs[i].Base), m) + } + } + if err == nil { + raceAcquire(unsafe.Pointer(&ioSync)) + } +} + +//sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) //sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) //sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error) @@ -644,3 +794,7 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { //sys write(fd int, p []byte) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) //sys munmap(addr uintptr, length uintptr) (err error) +//sys readv(fd int, iovecs []Iovec) (n int, err error) +//sys preadv(fd int, iovecs []Iovec, offset int64) (n int, err error) +//sys writev(fd int, iovecs []Iovec) (n int, err error) +//sys pwritev(fd int, iovecs []Iovec, offset int64) (n int, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index 97cb916f..be8c0020 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -246,6 +246,18 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e return sendfile(outfd, infd, offset, count) } +func Dup3(oldfd, newfd, flags int) error { + if oldfd == newfd || flags&^O_CLOEXEC != 0 { + return EINVAL + } + how := F_DUP2FD + if flags&O_CLOEXEC != 0 { + how = F_DUP2FD_CLOEXEC + } + _, err := fcntl(oldfd, how, newfd) + return err +} + /* * Exposed directly */ diff --git a/vendor/golang.org/x/sys/unix/syscall_hurd.go b/vendor/golang.org/x/sys/unix/syscall_hurd.go index ba46651f..a6a2d2fc 100644 --- a/vendor/golang.org/x/sys/unix/syscall_hurd.go +++ b/vendor/golang.org/x/sys/unix/syscall_hurd.go @@ -11,6 +11,7 @@ package unix int ioctl(int, unsigned long int, uintptr_t); */ import "C" +import "unsafe" func ioctl(fd int, req uint, arg uintptr) (err error) { r0, er := C.ioctl(C.int(fd), C.ulong(req), C.uintptr_t(arg)) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 5682e262..06c0eea6 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -13,6 +13,7 @@ package unix import ( "encoding/binary" + "slices" "strconv" "syscall" "time" @@ -417,7 +418,7 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) { return nil, 0, EINVAL } sa.raw.Family = AF_UNIX - for i := 0; i < n; i++ { + for i := range n { sa.raw.Path[i] = int8(name[i]) } // length is family (uint16), name, NUL. @@ -507,7 +508,7 @@ func (sa *SockaddrL2) sockaddr() (unsafe.Pointer, _Socklen, error) { psm := (*[2]byte)(unsafe.Pointer(&sa.raw.Psm)) psm[0] = byte(sa.PSM) psm[1] = byte(sa.PSM >> 8) - for i := 0; i < len(sa.Addr); i++ { + for i := range len(sa.Addr) { sa.raw.Bdaddr[i] = sa.Addr[len(sa.Addr)-1-i] } cid := (*[2]byte)(unsafe.Pointer(&sa.raw.Cid)) @@ -589,11 +590,11 @@ func (sa *SockaddrCAN) sockaddr() (unsafe.Pointer, _Socklen, error) { sa.raw.Family = AF_CAN sa.raw.Ifindex = int32(sa.Ifindex) rx := (*[4]byte)(unsafe.Pointer(&sa.RxID)) - for i := 0; i < 4; i++ { + for i := range 4 { sa.raw.Addr[i] = rx[i] } tx := (*[4]byte)(unsafe.Pointer(&sa.TxID)) - for i := 0; i < 4; i++ { + for i := range 4 { sa.raw.Addr[i+4] = tx[i] } return unsafe.Pointer(&sa.raw), SizeofSockaddrCAN, nil @@ -618,11 +619,11 @@ func (sa *SockaddrCANJ1939) sockaddr() (unsafe.Pointer, _Socklen, error) { sa.raw.Family = AF_CAN sa.raw.Ifindex = int32(sa.Ifindex) n := (*[8]byte)(unsafe.Pointer(&sa.Name)) - for i := 0; i < 8; i++ { + for i := range 8 { sa.raw.Addr[i] = n[i] } p := (*[4]byte)(unsafe.Pointer(&sa.PGN)) - for i := 0; i < 4; i++ { + for i := range 4 { sa.raw.Addr[i+8] = p[i] } sa.raw.Addr[12] = sa.Addr @@ -800,9 +801,7 @@ func (sa *SockaddrPPPoE) sockaddr() (unsafe.Pointer, _Socklen, error) { // one. The kernel expects SID to be in network byte order. binary.BigEndian.PutUint16(sa.raw[6:8], sa.SID) copy(sa.raw[8:14], sa.Remote) - for i := 14; i < 14+IFNAMSIZ; i++ { - sa.raw[i] = 0 - } + clear(sa.raw[14 : 14+IFNAMSIZ]) copy(sa.raw[14:], sa.Dev) return unsafe.Pointer(&sa.raw), SizeofSockaddrPPPoX, nil } @@ -911,7 +910,7 @@ func (sa *SockaddrIUCV) sockaddr() (unsafe.Pointer, _Socklen, error) { // These are EBCDIC encoded by the kernel, but we still need to pad them // with blanks. Initializing with blanks allows the caller to feed in either // a padded or an unpadded string. - for i := 0; i < 8; i++ { + for i := range 8 { sa.raw.Nodeid[i] = ' ' sa.raw.User_id[i] = ' ' sa.raw.Name[i] = ' ' @@ -1148,7 +1147,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { var user [8]byte var name [8]byte - for i := 0; i < 8; i++ { + for i := range 8 { user[i] = byte(pp.User_id[i]) name[i] = byte(pp.Name[i]) } @@ -1173,11 +1172,11 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { Ifindex: int(pp.Ifindex), } name := (*[8]byte)(unsafe.Pointer(&sa.Name)) - for i := 0; i < 8; i++ { + for i := range 8 { name[i] = pp.Addr[i] } pgn := (*[4]byte)(unsafe.Pointer(&sa.PGN)) - for i := 0; i < 4; i++ { + for i := range 4 { pgn[i] = pp.Addr[i+8] } addr := (*[1]byte)(unsafe.Pointer(&sa.Addr)) @@ -1188,11 +1187,11 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { Ifindex: int(pp.Ifindex), } rx := (*[4]byte)(unsafe.Pointer(&sa.RxID)) - for i := 0; i < 4; i++ { + for i := range 4 { rx[i] = pp.Addr[i] } tx := (*[4]byte)(unsafe.Pointer(&sa.TxID)) - for i := 0; i < 4; i++ { + for i := range 4 { tx[i] = pp.Addr[i+4] } return sa, nil @@ -1295,6 +1294,48 @@ func GetsockoptTCPInfo(fd, level, opt int) (*TCPInfo, error) { return &value, err } +// GetsockoptTCPCCVegasInfo returns algorithm specific congestion control information for a socket using the "vegas" +// algorithm. +// +// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option: +// +// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION) +func GetsockoptTCPCCVegasInfo(fd, level, opt int) (*TCPVegasInfo, error) { + var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment + vallen := _Socklen(SizeofTCPCCInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) + out := (*TCPVegasInfo)(unsafe.Pointer(&value[0])) + return out, err +} + +// GetsockoptTCPCCDCTCPInfo returns algorithm specific congestion control information for a socket using the "dctp" +// algorithm. +// +// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option: +// +// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION) +func GetsockoptTCPCCDCTCPInfo(fd, level, opt int) (*TCPDCTCPInfo, error) { + var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment + vallen := _Socklen(SizeofTCPCCInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) + out := (*TCPDCTCPInfo)(unsafe.Pointer(&value[0])) + return out, err +} + +// GetsockoptTCPCCBBRInfo returns algorithm specific congestion control information for a socket using the "bbr" +// algorithm. +// +// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option: +// +// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION) +func GetsockoptTCPCCBBRInfo(fd, level, opt int) (*TCPBBRInfo, error) { + var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment + vallen := _Socklen(SizeofTCPCCInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) + out := (*TCPBBRInfo)(unsafe.Pointer(&value[0])) + return out, err +} + // GetsockoptString returns the string value of the socket option opt for the // socket associated with fd at the given socket level. func GetsockoptString(fd, level, opt int) (string, error) { @@ -1818,6 +1859,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys ClockAdjtime(clockid int32, buf *Timex) (state int, err error) //sys ClockGetres(clockid int32, res *Timespec) (err error) //sys ClockGettime(clockid int32, time *Timespec) (err error) +//sys ClockSettime(clockid int32, time *Timespec) (err error) //sys ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) //sys Close(fd int) (err error) //sys CloseRange(first uint, last uint, flags uint) (err error) @@ -1959,7 +2001,26 @@ func Getpgrp() (pid int) { //sysnb Getpid() (pid int) //sysnb Getppid() (ppid int) //sys Getpriority(which int, who int) (prio int, err error) -//sys Getrandom(buf []byte, flags int) (n int, err error) + +func Getrandom(buf []byte, flags int) (n int, err error) { + vdsoRet, supported := vgetrandom(buf, uint32(flags)) + if supported { + if vdsoRet < 0 { + return 0, errnoErr(syscall.Errno(-vdsoRet)) + } + return vdsoRet, nil + } + var p *byte + if len(buf) > 0 { + p = &buf[0] + } + r, _, e := Syscall(SYS_GETRANDOM, uintptr(unsafe.Pointer(p)), uintptr(len(buf)), uintptr(flags)) + if e != 0 { + return 0, errnoErr(e) + } + return int(r), nil +} + //sysnb Getrusage(who int, rusage *Rusage) (err error) //sysnb Getsid(pid int) (sid int, err error) //sysnb Gettid() (tid int) @@ -2154,10 +2215,7 @@ func readvRacedetect(iovecs []Iovec, n int, err error) { return } for i := 0; n > 0 && i < len(iovecs); i++ { - m := int(iovecs[i].Len) - if m > n { - m = n - } + m := min(int(iovecs[i].Len), n) n -= m if m > 0 { raceWriteRange(unsafe.Pointer(iovecs[i].Base), m) @@ -2208,10 +2266,7 @@ func writevRacedetect(iovecs []Iovec, n int) { return } for i := 0; n > 0 && i < len(iovecs); i++ { - m := int(iovecs[i].Len) - if m > n { - m = n - } + m := min(int(iovecs[i].Len), n) n -= m if m > 0 { raceReadRange(unsafe.Pointer(iovecs[i].Base), m) @@ -2258,12 +2313,7 @@ func isGroupMember(gid int) bool { return false } - for _, g := range groups { - if g == gid { - return true - } - } - return false + return slices.Contains(groups, gid) } func isCapDacOverrideSet() bool { @@ -2592,3 +2642,10 @@ func SchedGetAttr(pid int, flags uint) (*SchedAttr, error) { } //sys Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) (err error) +//sys Mseal(b []byte, flags uint) (err error) + +//sys setMemPolicy(mode int, mask *CPUSet, size int) (err error) = SYS_SET_MEMPOLICY + +func SetMemPolicy(mode int, mask *CPUSet) error { + return setMemPolicy(mode, mask, _CPU_SETSIZE) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index cf2ee6c7..745e5c7e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -182,3 +182,5 @@ func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error } return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) } + +const SYS_FSTATAT = SYS_NEWFSTATAT diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go index 3d0e9845..dd2262a4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go @@ -214,3 +214,5 @@ func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error } return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) } + +const SYS_FSTATAT = SYS_NEWFSTATAT diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go index 6f5a2889..8cf3670b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -187,3 +187,5 @@ func RISCVHWProbe(pairs []RISCVHWProbePairs, set *CPUSet, flags uint) (err error } return riscvHWProbe(pairs, setSize, set, flags) } + +const SYS_FSTATAT = SYS_NEWFSTATAT diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go index 88162099..34a46769 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -248,6 +248,23 @@ func Statvfs(path string, buf *Statvfs_t) (err error) { return Statvfs1(path, buf, ST_WAIT) } +func Getvfsstat(buf []Statvfs_t, flags int) (n int, err error) { + var ( + _p0 unsafe.Pointer + bufsize uintptr + ) + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + bufsize = unsafe.Sizeof(Statvfs_t{}) * uintptr(len(buf)) + } + r0, _, e1 := Syscall(SYS_GETVFSSTAT, uintptr(_p0), bufsize, uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + /* * Exposed directly */ diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index b25343c7..b86ded54 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -293,6 +293,7 @@ func Uname(uname *Utsname) error { //sys Mkfifoat(dirfd int, path string, mode uint32) (err error) //sys Mknod(path string, mode uint32, dev int) (err error) //sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error) +//sys Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) //sys Nanosleep(time *Timespec, leftover *Timespec) (err error) //sys Open(path string, mode int, perm uint32) (fd int, err error) //sys Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index 21974af0..18a3d9bd 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -629,7 +629,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Kill(pid int, signum syscall.Signal) (err error) //sys Lchown(path string, uid int, gid int) (err error) //sys Link(path string, link string) (err error) -//sys Listen(s int, backlog int) (err error) = libsocket.__xnet_llisten +//sys Listen(s int, backlog int) (err error) = libsocket.__xnet_listen //sys Lstat(path string, stat *Stat_t) (err error) //sys Madvise(b []byte, advice int) (err error) //sys Mkdir(path string, mode uint32) (err error) @@ -1102,3 +1102,90 @@ func (s *Strioctl) SetInt(i int) { func IoctlSetStrioctlRetInt(fd int, req int, s *Strioctl) (int, error) { return ioctlPtrRet(fd, req, unsafe.Pointer(s)) } + +// Ucred Helpers +// See ucred(3c) and getpeerucred(3c) + +//sys getpeerucred(fd uintptr, ucred *uintptr) (err error) +//sys ucredFree(ucred uintptr) = ucred_free +//sys ucredGet(pid int) (ucred uintptr, err error) = ucred_get +//sys ucredGeteuid(ucred uintptr) (uid int) = ucred_geteuid +//sys ucredGetegid(ucred uintptr) (gid int) = ucred_getegid +//sys ucredGetruid(ucred uintptr) (uid int) = ucred_getruid +//sys ucredGetrgid(ucred uintptr) (gid int) = ucred_getrgid +//sys ucredGetsuid(ucred uintptr) (uid int) = ucred_getsuid +//sys ucredGetsgid(ucred uintptr) (gid int) = ucred_getsgid +//sys ucredGetpid(ucred uintptr) (pid int) = ucred_getpid + +// Ucred is an opaque struct that holds user credentials. +type Ucred struct { + ucred uintptr +} + +// We need to ensure that ucredFree is called on the underlying ucred +// when the Ucred is garbage collected. +func ucredFinalizer(u *Ucred) { + ucredFree(u.ucred) +} + +func GetPeerUcred(fd uintptr) (*Ucred, error) { + var ucred uintptr + err := getpeerucred(fd, &ucred) + if err != nil { + return nil, err + } + result := &Ucred{ + ucred: ucred, + } + // set the finalizer on the result so that the ucred will be freed + runtime.SetFinalizer(result, ucredFinalizer) + return result, nil +} + +func UcredGet(pid int) (*Ucred, error) { + ucred, err := ucredGet(pid) + if err != nil { + return nil, err + } + result := &Ucred{ + ucred: ucred, + } + // set the finalizer on the result so that the ucred will be freed + runtime.SetFinalizer(result, ucredFinalizer) + return result, nil +} + +func (u *Ucred) Geteuid() int { + defer runtime.KeepAlive(u) + return ucredGeteuid(u.ucred) +} + +func (u *Ucred) Getruid() int { + defer runtime.KeepAlive(u) + return ucredGetruid(u.ucred) +} + +func (u *Ucred) Getsuid() int { + defer runtime.KeepAlive(u) + return ucredGetsuid(u.ucred) +} + +func (u *Ucred) Getegid() int { + defer runtime.KeepAlive(u) + return ucredGetegid(u.ucred) +} + +func (u *Ucred) Getrgid() int { + defer runtime.KeepAlive(u) + return ucredGetrgid(u.ucred) +} + +func (u *Ucred) Getsgid() int { + defer runtime.KeepAlive(u) + return ucredGetsgid(u.ucred) +} + +func (u *Ucred) Getpid() int { + defer runtime.KeepAlive(u) + return ucredGetpid(u.ucred) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index 77081de8..4e92e5aa 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -154,6 +154,15 @@ func Munmap(b []byte) (err error) { return mapper.Munmap(b) } +func MmapPtr(fd int, offset int64, addr unsafe.Pointer, length uintptr, prot int, flags int) (ret unsafe.Pointer, err error) { + xaddr, err := mapper.mmap(uintptr(addr), length, prot, flags, fd, offset) + return unsafe.Pointer(xaddr), err +} + +func MunmapPtr(addr unsafe.Pointer, length uintptr) (err error) { + return mapper.munmap(uintptr(addr), length) +} + func Read(fd int, p []byte) (n int, err error) { n, err = read(fd, p) if raceenabled { diff --git a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go index 312ae6ac..7bf5c04b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -768,6 +768,15 @@ func Munmap(b []byte) (err error) { return mapper.Munmap(b) } +func MmapPtr(fd int, offset int64, addr unsafe.Pointer, length uintptr, prot int, flags int) (ret unsafe.Pointer, err error) { + xaddr, err := mapper.mmap(uintptr(addr), length, prot, flags, fd, offset) + return unsafe.Pointer(xaddr), err +} + +func MunmapPtr(addr unsafe.Pointer, length uintptr) (err error) { + return mapper.munmap(uintptr(addr), length) +} + //sys Gethostname(buf []byte) (err error) = SYS___GETHOSTNAME_A //sysnb Getgid() (gid int) //sysnb Getpid() (pid int) @@ -816,10 +825,10 @@ func Lstat(path string, stat *Stat_t) (err error) { // for checking symlinks begins with $VERSION/ $SYSNAME/ $SYSSYMR/ $SYSSYMA/ func isSpecialPath(path []byte) (v bool) { var special = [4][8]byte{ - [8]byte{'V', 'E', 'R', 'S', 'I', 'O', 'N', '/'}, - [8]byte{'S', 'Y', 'S', 'N', 'A', 'M', 'E', '/'}, - [8]byte{'S', 'Y', 'S', 'S', 'Y', 'M', 'R', '/'}, - [8]byte{'S', 'Y', 'S', 'S', 'Y', 'M', 'A', '/'}} + {'V', 'E', 'R', 'S', 'I', 'O', 'N', '/'}, + {'S', 'Y', 'S', 'N', 'A', 'M', 'E', '/'}, + {'S', 'Y', 'S', 'S', 'Y', 'M', 'R', '/'}, + {'S', 'Y', 'S', 'S', 'Y', 'M', 'A', '/'}} var i, j int for i = 0; i < len(special); i++ { @@ -3115,3 +3124,90 @@ func legacy_Mkfifoat(dirfd int, path string, mode uint32) (err error) { //sys Posix_openpt(oflag int) (fd int, err error) = SYS_POSIX_OPENPT //sys Grantpt(fildes int) (rc int, err error) = SYS_GRANTPT //sys Unlockpt(fildes int) (rc int, err error) = SYS_UNLOCKPT + +func fcntlAsIs(fd uintptr, cmd int, arg uintptr) (val int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCNTL<<4, uintptr(fd), uintptr(cmd), arg) + runtime.ExitSyscall() + val = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +func Fcntl(fd uintptr, cmd int, op interface{}) (ret int, err error) { + switch op.(type) { + case *Flock_t: + err = FcntlFlock(fd, cmd, op.(*Flock_t)) + if err != nil { + ret = -1 + } + return + case int: + return FcntlInt(fd, cmd, op.(int)) + case *F_cnvrt: + return fcntlAsIs(fd, cmd, uintptr(unsafe.Pointer(op.(*F_cnvrt)))) + case unsafe.Pointer: + return fcntlAsIs(fd, cmd, uintptr(op.(unsafe.Pointer))) + default: + return -1, EINVAL + } + return +} + +func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + return sendfile(outfd, infd, offset, count) +} + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + // TODO: use LE call instead if the call is implemented + originalOffset, err := Seek(infd, 0, SEEK_CUR) + if err != nil { + return -1, err + } + //start reading data from in_fd + if offset != nil { + _, err := Seek(infd, *offset, SEEK_SET) + if err != nil { + return -1, err + } + } + + buf := make([]byte, count) + readBuf := make([]byte, 0) + var n int = 0 + for i := 0; i < count; i += n { + n, err := Read(infd, buf) + if n == 0 { + if err != nil { + return -1, err + } else { // EOF + break + } + } + readBuf = append(readBuf, buf...) + buf = buf[0:0] + } + + n2, err := Write(outfd, readBuf) + if err != nil { + return -1, err + } + + //When sendfile() returns, this variable will be set to the + // offset of the byte following the last byte that was read. + if offset != nil { + *offset = *offset + int64(n) + // If offset is not NULL, then sendfile() does not modify the file + // offset of in_fd + _, err := Seek(infd, originalOffset, SEEK_SET) + if err != nil { + return -1, err + } + } + return n2, nil +} diff --git a/vendor/golang.org/x/sys/unix/vgetrandom_linux.go b/vendor/golang.org/x/sys/unix/vgetrandom_linux.go new file mode 100644 index 00000000..07ac8e09 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/vgetrandom_linux.go @@ -0,0 +1,13 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && go1.24 + +package unix + +import _ "unsafe" + +//go:linkname vgetrandom runtime.vgetrandom +//go:noescape +func vgetrandom(p []byte, flags uint32) (ret int, supported bool) diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go b/vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go similarity index 56% rename from vendor/golang.org/x/tools/internal/versions/toolchain_go119.go rename to vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go index f65beed9..297e97bc 100644 --- a/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go +++ b/vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go @@ -2,13 +2,10 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.19 -// +build go1.19 +//go:build !linux || !go1.24 -package versions +package unix -func init() { - if Compare(toolchain, Go1_19) < 0 { - toolchain = Go1_19 - } +func vgetrandom(p []byte, flags uint32) (ret int, supported bool) { + return -1, false } diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go index e40fa852..d73c4652 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go @@ -237,6 +237,9 @@ const ( CLOCK_UPTIME_RAW_APPROX = 0x9 CLONE_NOFOLLOW = 0x1 CLONE_NOOWNERCOPY = 0x2 + CONNECT_DATA_AUTHENTICATED = 0x4 + CONNECT_DATA_IDEMPOTENT = 0x2 + CONNECT_RESUME_ON_READ_WRITE = 0x1 CR0 = 0x0 CR1 = 0x1000 CR2 = 0x2000 @@ -1169,6 +1172,11 @@ const ( PT_WRITE_D = 0x5 PT_WRITE_I = 0x4 PT_WRITE_U = 0x6 + RENAME_EXCL = 0x4 + RENAME_NOFOLLOW_ANY = 0x10 + RENAME_RESERVED1 = 0x8 + RENAME_SECLUDE = 0x1 + RENAME_SWAP = 0x2 RLIMIT_AS = 0x5 RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1260,6 +1268,10 @@ const ( RTV_SSTHRESH = 0x20 RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 + SAE_ASSOCID_ALL = 0xffffffff + SAE_ASSOCID_ANY = 0x0 + SAE_CONNID_ALL = 0xffffffff + SAE_CONNID_ANY = 0x0 SCM_CREDS = 0x3 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go index bb02aa6c..4a55a400 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go @@ -237,6 +237,9 @@ const ( CLOCK_UPTIME_RAW_APPROX = 0x9 CLONE_NOFOLLOW = 0x1 CLONE_NOOWNERCOPY = 0x2 + CONNECT_DATA_AUTHENTICATED = 0x4 + CONNECT_DATA_IDEMPOTENT = 0x2 + CONNECT_RESUME_ON_READ_WRITE = 0x1 CR0 = 0x0 CR1 = 0x1000 CR2 = 0x2000 @@ -1169,6 +1172,11 @@ const ( PT_WRITE_D = 0x5 PT_WRITE_I = 0x4 PT_WRITE_U = 0x6 + RENAME_EXCL = 0x4 + RENAME_NOFOLLOW_ANY = 0x10 + RENAME_RESERVED1 = 0x8 + RENAME_SECLUDE = 0x1 + RENAME_SWAP = 0x2 RLIMIT_AS = 0x5 RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1260,6 +1268,10 @@ const ( RTV_SSTHRESH = 0x20 RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 + SAE_ASSOCID_ALL = 0xffffffff + SAE_ASSOCID_ANY = 0x0 + SAE_CONNID_ALL = 0xffffffff + SAE_CONNID_ANY = 0x0 SCM_CREDS = 0x3 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 877a62b4..120a7b35 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -319,11 +319,17 @@ const ( AUDIT_INTEGRITY_POLICY_RULE = 0x70f AUDIT_INTEGRITY_RULE = 0x70d AUDIT_INTEGRITY_STATUS = 0x70a + AUDIT_INTEGRITY_USERSPACE = 0x710 AUDIT_IPC = 0x517 AUDIT_IPC_SET_PERM = 0x51f + AUDIT_IPE_ACCESS = 0x58c + AUDIT_IPE_CONFIG_CHANGE = 0x58d + AUDIT_IPE_POLICY_LOAD = 0x58e AUDIT_KERNEL = 0x7d0 AUDIT_KERNEL_OTHER = 0x524 AUDIT_KERN_MODULE = 0x532 + AUDIT_LANDLOCK_ACCESS = 0x58f + AUDIT_LANDLOCK_DOMAIN = 0x590 AUDIT_LAST_FEATURE = 0x1 AUDIT_LAST_KERN_ANOM_MSG = 0x707 AUDIT_LAST_USER_MSG = 0x4af @@ -457,6 +463,7 @@ const ( B600 = 0x8 B75 = 0x2 B9600 = 0xd + BCACHEFS_SUPER_MAGIC = 0xca451a4e BDEVFS_MAGIC = 0x62646576 BINDERFS_SUPER_MAGIC = 0x6c6f6f70 BINFMTFS_MAGIC = 0x42494e4d @@ -487,13 +494,16 @@ const ( BPF_F_BEFORE = 0x8 BPF_F_ID = 0x20 BPF_F_NETFILTER_IP_DEFRAG = 0x1 + BPF_F_PREORDER = 0x40 BPF_F_QUERY_EFFECTIVE = 0x1 + BPF_F_REDIRECT_FLAGS = 0x19 BPF_F_REPLACE = 0x4 BPF_F_SLEEPABLE = 0x10 BPF_F_STRICT_ALIGNMENT = 0x1 BPF_F_TEST_REG_INVARIANTS = 0x80 BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TEST_RUN_ON_CPU = 0x1 + BPF_F_TEST_SKB_CHECKSUM_COMPLETE = 0x4 BPF_F_TEST_STATE_FREQ = 0x8 BPF_F_TEST_XDP_LIVE_FRAMES = 0x2 BPF_F_XDP_DEV_BOUND_ONLY = 0x40 @@ -521,6 +531,7 @@ const ( BPF_LDX = 0x1 BPF_LEN = 0x80 BPF_LL_OFF = -0x200000 + BPF_LOAD_ACQ = 0x100 BPF_LSH = 0x60 BPF_MAJOR_VERSION = 0x1 BPF_MAXINSNS = 0x1000 @@ -548,6 +559,7 @@ const ( BPF_RET = 0x6 BPF_RSH = 0x70 BPF_ST = 0x2 + BPF_STORE_REL = 0x110 BPF_STX = 0x3 BPF_SUB = 0x10 BPF_TAG_SIZE = 0x8 @@ -837,24 +849,90 @@ const ( DM_UUID_FLAG = 0x4000 DM_UUID_LEN = 0x81 DM_VERSION = 0xc138fd00 - DM_VERSION_EXTRA = "-ioctl (2023-03-01)" + DM_VERSION_EXTRA = "-ioctl (2025-04-28)" DM_VERSION_MAJOR = 0x4 - DM_VERSION_MINOR = 0x30 + DM_VERSION_MINOR = 0x32 DM_VERSION_PATCHLEVEL = 0x0 + DT_ADDRRNGHI = 0x6ffffeff + DT_ADDRRNGLO = 0x6ffffe00 DT_BLK = 0x6 DT_CHR = 0x2 + DT_DEBUG = 0x15 DT_DIR = 0x4 + DT_ENCODING = 0x20 DT_FIFO = 0x1 + DT_FINI = 0xd + DT_FLAGS_1 = 0x6ffffffb + DT_GNU_HASH = 0x6ffffef5 + DT_HASH = 0x4 + DT_HIOS = 0x6ffff000 + DT_HIPROC = 0x7fffffff + DT_INIT = 0xc + DT_JMPREL = 0x17 DT_LNK = 0xa + DT_LOOS = 0x6000000d + DT_LOPROC = 0x70000000 + DT_NEEDED = 0x1 + DT_NULL = 0x0 + DT_PLTGOT = 0x3 + DT_PLTREL = 0x14 + DT_PLTRELSZ = 0x2 DT_REG = 0x8 + DT_REL = 0x11 + DT_RELA = 0x7 + DT_RELACOUNT = 0x6ffffff9 + DT_RELAENT = 0x9 + DT_RELASZ = 0x8 + DT_RELCOUNT = 0x6ffffffa + DT_RELENT = 0x13 + DT_RELSZ = 0x12 + DT_RPATH = 0xf DT_SOCK = 0xc + DT_SONAME = 0xe + DT_STRSZ = 0xa + DT_STRTAB = 0x5 + DT_SYMBOLIC = 0x10 + DT_SYMENT = 0xb + DT_SYMTAB = 0x6 + DT_TEXTREL = 0x16 DT_UNKNOWN = 0x0 + DT_VALRNGHI = 0x6ffffdff + DT_VALRNGLO = 0x6ffffd00 + DT_VERDEF = 0x6ffffffc + DT_VERDEFNUM = 0x6ffffffd + DT_VERNEED = 0x6ffffffe + DT_VERNEEDNUM = 0x6fffffff + DT_VERSYM = 0x6ffffff0 DT_WHT = 0xe ECHO = 0x8 ECRYPTFS_SUPER_MAGIC = 0xf15f EFD_SEMAPHORE = 0x1 EFIVARFS_MAGIC = 0xde5e81e4 EFS_SUPER_MAGIC = 0x414a53 + EI_CLASS = 0x4 + EI_DATA = 0x5 + EI_MAG0 = 0x0 + EI_MAG1 = 0x1 + EI_MAG2 = 0x2 + EI_MAG3 = 0x3 + EI_NIDENT = 0x10 + EI_OSABI = 0x7 + EI_PAD = 0x8 + EI_VERSION = 0x6 + ELFCLASS32 = 0x1 + ELFCLASS64 = 0x2 + ELFCLASSNONE = 0x0 + ELFCLASSNUM = 0x3 + ELFDATA2LSB = 0x1 + ELFDATA2MSB = 0x2 + ELFDATANONE = 0x0 + ELFMAG = "\177ELF" + ELFMAG0 = 0x7f + ELFMAG1 = 'E' + ELFMAG2 = 'L' + ELFMAG3 = 'F' + ELFOSABI_LINUX = 0x3 + ELFOSABI_NONE = 0x0 EM_386 = 0x3 EM_486 = 0x6 EM_68K = 0x4 @@ -928,12 +1006,12 @@ const ( EPOLL_CTL_ADD = 0x1 EPOLL_CTL_DEL = 0x2 EPOLL_CTL_MOD = 0x3 + EPOLL_IOC_TYPE = 0x8a EROFS_SUPER_MAGIC_V1 = 0xe0f5e1e2 - ESP_V4_FLOW = 0xa - ESP_V6_FLOW = 0xc - ETHER_FLOW = 0x12 ETHTOOL_BUSINFO_LEN = 0x20 ETHTOOL_EROMVERS_LEN = 0x20 + ETHTOOL_FAMILY_NAME = "ethtool" + ETHTOOL_FAMILY_VERSION = 0x1 ETHTOOL_FEC_AUTO = 0x2 ETHTOOL_FEC_BASER = 0x10 ETHTOOL_FEC_LLRS = 0x20 @@ -941,9 +1019,6 @@ const ( ETHTOOL_FEC_OFF = 0x4 ETHTOOL_FEC_RS = 0x8 ETHTOOL_FLAG_ALL = 0x7 - ETHTOOL_FLAG_COMPACT_BITSETS = 0x1 - ETHTOOL_FLAG_OMIT_REPLY = 0x2 - ETHTOOL_FLAG_STATS = 0x4 ETHTOOL_FLASHDEV = 0x33 ETHTOOL_FLASH_MAX_FILENAME = 0x80 ETHTOOL_FWVERS_LEN = 0x20 @@ -1143,14 +1218,24 @@ const ( ETH_P_WCCP = 0x883e ETH_P_X25 = 0x805 ETH_P_XDSA = 0xf8 + ET_CORE = 0x4 + ET_DYN = 0x3 + ET_EXEC = 0x2 + ET_HIPROC = 0xffff + ET_LOPROC = 0xff00 + ET_NONE = 0x0 + ET_REL = 0x1 EV_ABS = 0x3 EV_CNT = 0x20 + EV_CURRENT = 0x1 EV_FF = 0x15 EV_FF_STATUS = 0x17 EV_KEY = 0x1 EV_LED = 0x11 EV_MAX = 0x1f EV_MSC = 0x4 + EV_NONE = 0x0 + EV_NUM = 0x2 EV_PWR = 0x16 EV_REL = 0x2 EV_REP = 0x14 @@ -1166,6 +1251,7 @@ const ( EXTA = 0xe EXTB = 0xf F2FS_SUPER_MAGIC = 0xf2f52010 + FALLOC_FL_ALLOCATE_RANGE = 0x0 FALLOC_FL_COLLAPSE_RANGE = 0x8 FALLOC_FL_INSERT_RANGE = 0x20 FALLOC_FL_KEEP_SIZE = 0x1 @@ -1198,13 +1284,18 @@ const ( FAN_DENY = 0x2 FAN_ENABLE_AUDIT = 0x40 FAN_EPIDFD = -0x2 + FAN_ERRNO_BITS = 0x8 + FAN_ERRNO_MASK = 0xff + FAN_ERRNO_SHIFT = 0x18 FAN_EVENT_INFO_TYPE_DFID = 0x3 FAN_EVENT_INFO_TYPE_DFID_NAME = 0x2 FAN_EVENT_INFO_TYPE_ERROR = 0x5 FAN_EVENT_INFO_TYPE_FID = 0x1 + FAN_EVENT_INFO_TYPE_MNT = 0x7 FAN_EVENT_INFO_TYPE_NEW_DFID_NAME = 0xc FAN_EVENT_INFO_TYPE_OLD_DFID_NAME = 0xa FAN_EVENT_INFO_TYPE_PIDFD = 0x4 + FAN_EVENT_INFO_TYPE_RANGE = 0x6 FAN_EVENT_METADATA_LEN = 0x18 FAN_EVENT_ON_CHILD = 0x8000000 FAN_FS_ERROR = 0x8000 @@ -1219,9 +1310,12 @@ const ( FAN_MARK_IGNORED_SURV_MODIFY = 0x40 FAN_MARK_IGNORE_SURV = 0x440 FAN_MARK_INODE = 0x0 + FAN_MARK_MNTNS = 0x110 FAN_MARK_MOUNT = 0x10 FAN_MARK_ONLYDIR = 0x8 FAN_MARK_REMOVE = 0x2 + FAN_MNT_ATTACH = 0x1000000 + FAN_MNT_DETACH = 0x2000000 FAN_MODIFY = 0x2 FAN_MOVE = 0xc0 FAN_MOVED_FROM = 0x40 @@ -1235,12 +1329,15 @@ const ( FAN_OPEN_EXEC = 0x1000 FAN_OPEN_EXEC_PERM = 0x40000 FAN_OPEN_PERM = 0x10000 + FAN_PRE_ACCESS = 0x100000 FAN_Q_OVERFLOW = 0x4000 FAN_RENAME = 0x10000000 FAN_REPORT_DFID_NAME = 0xc00 FAN_REPORT_DFID_NAME_TARGET = 0x1e00 FAN_REPORT_DIR_FID = 0x400 + FAN_REPORT_FD_ERROR = 0x2000 FAN_REPORT_FID = 0x200 + FAN_REPORT_MNT = 0x4000 FAN_REPORT_NAME = 0x800 FAN_REPORT_PIDFD = 0x80 FAN_REPORT_TARGET_FID = 0x1000 @@ -1260,6 +1357,7 @@ const ( FIB_RULE_PERMANENT = 0x1 FIB_RULE_UNRESOLVED = 0x4 FIDEDUPERANGE = 0xc0189436 + FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED = 0x1 FSCRYPT_KEY_DESCRIPTOR_SIZE = 0x8 FSCRYPT_KEY_DESC_PREFIX = "fscrypt:" FSCRYPT_KEY_DESC_PREFIX_SIZE = 0x8 @@ -1325,8 +1423,10 @@ const ( FUSE_SUPER_MAGIC = 0x65735546 FUTEXFS_SUPER_MAGIC = 0xbad1dea F_ADD_SEALS = 0x409 + F_CREATED_QUERY = 0x404 F_DUPFD = 0x0 F_DUPFD_CLOEXEC = 0x406 + F_DUPFD_QUERY = 0x403 F_EXLCK = 0x4 F_GETFD = 0x1 F_GETFL = 0x3 @@ -1515,6 +1615,8 @@ const ( IN_OPEN = 0x20 IN_Q_OVERFLOW = 0x4000 IN_UNMOUNT = 0x2000 + IOCTL_MEI_CONNECT_CLIENT = 0xc0104801 + IOCTL_MEI_CONNECT_CLIENT_VTAG = 0xc0144804 IPPROTO_AH = 0x33 IPPROTO_BEETPH = 0x5e IPPROTO_COMP = 0x6c @@ -1546,6 +1648,7 @@ const ( IPPROTO_ROUTING = 0x2b IPPROTO_RSVP = 0x2e IPPROTO_SCTP = 0x84 + IPPROTO_SMC = 0x100 IPPROTO_TCP = 0x6 IPPROTO_TP = 0x1d IPPROTO_UDP = 0x11 @@ -1565,7 +1668,6 @@ const ( IPV6_DONTFRAG = 0x3e IPV6_DROP_MEMBERSHIP = 0x15 IPV6_DSTOPTS = 0x3b - IPV6_FLOW = 0x11 IPV6_FREEBIND = 0x4e IPV6_HDRINCL = 0x24 IPV6_HOPLIMIT = 0x34 @@ -1616,8 +1718,9 @@ const ( IPV6_TRANSPARENT = 0x4b IPV6_UNICAST_HOPS = 0x10 IPV6_UNICAST_IF = 0x4c - IPV6_USER_FLOW = 0xe IPV6_V6ONLY = 0x1a + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 IPV6_XFRM_POLICY = 0x23 IP_ADD_MEMBERSHIP = 0x23 IP_ADD_SOURCE_MEMBERSHIP = 0x27 @@ -1676,7 +1779,6 @@ const ( IP_TTL = 0x2 IP_UNBLOCK_SOURCE = 0x25 IP_UNICAST_IF = 0x32 - IP_USER_FLOW = 0xd IP_XFRM_POLICY = 0x11 ISOFS_SUPER_MAGIC = 0x9660 ISTRIP = 0x20 @@ -1705,6 +1807,7 @@ const ( KEXEC_ARCH_S390 = 0x160000 KEXEC_ARCH_SH = 0x2a0000 KEXEC_ARCH_X86_64 = 0x3e0000 + KEXEC_CRASH_HOTPLUG_SUPPORT = 0x8 KEXEC_FILE_DEBUG = 0x8 KEXEC_FILE_NO_INITRAMFS = 0x4 KEXEC_FILE_ON_CRASH = 0x2 @@ -1780,6 +1883,7 @@ const ( KEY_SPEC_USER_KEYRING = -0x4 KEY_SPEC_USER_SESSION_KEYRING = -0x5 LANDLOCK_ACCESS_FS_EXECUTE = 0x1 + LANDLOCK_ACCESS_FS_IOCTL_DEV = 0x8000 LANDLOCK_ACCESS_FS_MAKE_BLOCK = 0x800 LANDLOCK_ACCESS_FS_MAKE_CHAR = 0x40 LANDLOCK_ACCESS_FS_MAKE_DIR = 0x80 @@ -1796,7 +1900,13 @@ const ( LANDLOCK_ACCESS_FS_WRITE_FILE = 0x2 LANDLOCK_ACCESS_NET_BIND_TCP = 0x1 LANDLOCK_ACCESS_NET_CONNECT_TCP = 0x2 + LANDLOCK_CREATE_RULESET_ERRATA = 0x2 LANDLOCK_CREATE_RULESET_VERSION = 0x1 + LANDLOCK_RESTRICT_SELF_LOG_NEW_EXEC_ON = 0x2 + LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF = 0x1 + LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF = 0x4 + LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET = 0x1 + LANDLOCK_SCOPE_SIGNAL = 0x2 LINUX_REBOOT_CMD_CAD_OFF = 0x0 LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef LINUX_REBOOT_CMD_HALT = 0xcdef0123 @@ -1858,9 +1968,23 @@ const ( MADV_UNMERGEABLE = 0xd MADV_WILLNEED = 0x3 MADV_WIPEONFORK = 0x12 + MAP_DROPPABLE = 0x8 MAP_FILE = 0x0 MAP_FIXED = 0x10 MAP_FIXED_NOREPLACE = 0x100000 + MAP_HUGE_16GB = 0x88000000 + MAP_HUGE_16KB = 0x38000000 + MAP_HUGE_16MB = 0x60000000 + MAP_HUGE_1GB = 0x78000000 + MAP_HUGE_1MB = 0x50000000 + MAP_HUGE_256MB = 0x70000000 + MAP_HUGE_2GB = 0x7c000000 + MAP_HUGE_2MB = 0x54000000 + MAP_HUGE_32MB = 0x64000000 + MAP_HUGE_512KB = 0x4c000000 + MAP_HUGE_512MB = 0x74000000 + MAP_HUGE_64KB = 0x40000000 + MAP_HUGE_8MB = 0x5c000000 MAP_HUGE_MASK = 0x3f MAP_HUGE_SHIFT = 0x1a MAP_PRIVATE = 0x2 @@ -1908,6 +2032,8 @@ const ( MNT_EXPIRE = 0x4 MNT_FORCE = 0x1 MNT_ID_REQ_SIZE_VER0 = 0x18 + MNT_ID_REQ_SIZE_VER1 = 0x20 + MNT_NS_INFO_SIZE_VER0 = 0x10 MODULE_INIT_COMPRESSED_FILE = 0x4 MODULE_INIT_IGNORE_MODVERSIONS = 0x1 MODULE_INIT_IGNORE_VERMAGIC = 0x2 @@ -1943,6 +2069,7 @@ const ( MSG_PEEK = 0x2 MSG_PROXY = 0x10 MSG_RST = 0x1000 + MSG_SOCK_DEVMEM = 0x2000000 MSG_SYN = 0x400 MSG_TRUNC = 0x20 MSG_TRYHARD = 0x4 @@ -2059,6 +2186,7 @@ const ( NFC_ATR_REQ_MAXSIZE = 0x40 NFC_ATR_RES_GB_MAXSIZE = 0x2f NFC_ATR_RES_MAXSIZE = 0x40 + NFC_ATS_MAXSIZE = 0x14 NFC_COMM_ACTIVE = 0x0 NFC_COMM_PASSIVE = 0x1 NFC_DEVICE_NAME_MAXSIZE = 0x8 @@ -2139,6 +2267,7 @@ const ( NFNL_SUBSYS_QUEUE = 0x3 NFNL_SUBSYS_ULOG = 0x4 NFS_SUPER_MAGIC = 0x6969 + NFT_BITWISE_BOOL = 0x0 NFT_CHAIN_FLAGS = 0x7 NFT_CHAIN_MAXNAMELEN = 0x100 NFT_CT_MAX = 0x17 @@ -2173,7 +2302,7 @@ const ( NFT_REG_SIZE = 0x10 NFT_REJECT_ICMPX_MAX = 0x3 NFT_RT_MAX = 0x4 - NFT_SECMARK_CTX_MAXLEN = 0x100 + NFT_SECMARK_CTX_MAXLEN = 0x1000 NFT_SET_MAXNAMELEN = 0x100 NFT_SOCKET_MAX = 0x3 NFT_TABLE_F_MASK = 0x7 @@ -2225,7 +2354,167 @@ const ( NLM_F_REPLACE = 0x100 NLM_F_REQUEST = 0x1 NLM_F_ROOT = 0x100 + NN_386_IOPERM = "LINUX" + NN_386_TLS = "LINUX" + NN_ARC_V2 = "LINUX" + NN_ARM_FPMR = "LINUX" + NN_ARM_GCS = "LINUX" + NN_ARM_HW_BREAK = "LINUX" + NN_ARM_HW_WATCH = "LINUX" + NN_ARM_PACA_KEYS = "LINUX" + NN_ARM_PACG_KEYS = "LINUX" + NN_ARM_PAC_ENABLED_KEYS = "LINUX" + NN_ARM_PAC_MASK = "LINUX" + NN_ARM_POE = "LINUX" + NN_ARM_SSVE = "LINUX" + NN_ARM_SVE = "LINUX" + NN_ARM_SYSTEM_CALL = "LINUX" + NN_ARM_TAGGED_ADDR_CTRL = "LINUX" + NN_ARM_TLS = "LINUX" + NN_ARM_VFP = "LINUX" + NN_ARM_ZA = "LINUX" + NN_ARM_ZT = "LINUX" + NN_AUXV = "CORE" + NN_FILE = "CORE" + NN_GNU_PROPERTY_TYPE_0 = "GNU" + NN_LOONGARCH_CPUCFG = "LINUX" + NN_LOONGARCH_CSR = "LINUX" + NN_LOONGARCH_HW_BREAK = "LINUX" + NN_LOONGARCH_HW_WATCH = "LINUX" + NN_LOONGARCH_LASX = "LINUX" + NN_LOONGARCH_LBT = "LINUX" + NN_LOONGARCH_LSX = "LINUX" + NN_MIPS_DSP = "LINUX" + NN_MIPS_FP_MODE = "LINUX" + NN_MIPS_MSA = "LINUX" + NN_PPC_DEXCR = "LINUX" + NN_PPC_DSCR = "LINUX" + NN_PPC_EBB = "LINUX" + NN_PPC_HASHKEYR = "LINUX" + NN_PPC_PKEY = "LINUX" + NN_PPC_PMU = "LINUX" + NN_PPC_PPR = "LINUX" + NN_PPC_SPE = "LINUX" + NN_PPC_TAR = "LINUX" + NN_PPC_TM_CDSCR = "LINUX" + NN_PPC_TM_CFPR = "LINUX" + NN_PPC_TM_CGPR = "LINUX" + NN_PPC_TM_CPPR = "LINUX" + NN_PPC_TM_CTAR = "LINUX" + NN_PPC_TM_CVMX = "LINUX" + NN_PPC_TM_CVSX = "LINUX" + NN_PPC_TM_SPR = "LINUX" + NN_PPC_VMX = "LINUX" + NN_PPC_VSX = "LINUX" + NN_PRFPREG = "CORE" + NN_PRPSINFO = "CORE" + NN_PRSTATUS = "CORE" + NN_PRXFPREG = "LINUX" + NN_RISCV_CSR = "LINUX" + NN_RISCV_TAGGED_ADDR_CTRL = "LINUX" + NN_RISCV_VECTOR = "LINUX" + NN_S390_CTRS = "LINUX" + NN_S390_GS_BC = "LINUX" + NN_S390_GS_CB = "LINUX" + NN_S390_HIGH_GPRS = "LINUX" + NN_S390_LAST_BREAK = "LINUX" + NN_S390_PREFIX = "LINUX" + NN_S390_PV_CPU_DATA = "LINUX" + NN_S390_RI_CB = "LINUX" + NN_S390_SYSTEM_CALL = "LINUX" + NN_S390_TDB = "LINUX" + NN_S390_TIMER = "LINUX" + NN_S390_TODCMP = "LINUX" + NN_S390_TODPREG = "LINUX" + NN_S390_VXRS_HIGH = "LINUX" + NN_S390_VXRS_LOW = "LINUX" + NN_SIGINFO = "CORE" + NN_TASKSTRUCT = "CORE" + NN_VMCOREDD = "LINUX" + NN_X86_SHSTK = "LINUX" + NN_X86_XSAVE_LAYOUT = "LINUX" + NN_X86_XSTATE = "LINUX" NSFS_MAGIC = 0x6e736673 + NT_386_IOPERM = 0x201 + NT_386_TLS = 0x200 + NT_ARC_V2 = 0x600 + NT_ARM_FPMR = 0x40e + NT_ARM_GCS = 0x410 + NT_ARM_HW_BREAK = 0x402 + NT_ARM_HW_WATCH = 0x403 + NT_ARM_PACA_KEYS = 0x407 + NT_ARM_PACG_KEYS = 0x408 + NT_ARM_PAC_ENABLED_KEYS = 0x40a + NT_ARM_PAC_MASK = 0x406 + NT_ARM_POE = 0x40f + NT_ARM_SSVE = 0x40b + NT_ARM_SVE = 0x405 + NT_ARM_SYSTEM_CALL = 0x404 + NT_ARM_TAGGED_ADDR_CTRL = 0x409 + NT_ARM_TLS = 0x401 + NT_ARM_VFP = 0x400 + NT_ARM_ZA = 0x40c + NT_ARM_ZT = 0x40d + NT_AUXV = 0x6 + NT_FILE = 0x46494c45 + NT_GNU_PROPERTY_TYPE_0 = 0x5 + NT_LOONGARCH_CPUCFG = 0xa00 + NT_LOONGARCH_CSR = 0xa01 + NT_LOONGARCH_HW_BREAK = 0xa05 + NT_LOONGARCH_HW_WATCH = 0xa06 + NT_LOONGARCH_LASX = 0xa03 + NT_LOONGARCH_LBT = 0xa04 + NT_LOONGARCH_LSX = 0xa02 + NT_MIPS_DSP = 0x800 + NT_MIPS_FP_MODE = 0x801 + NT_MIPS_MSA = 0x802 + NT_PPC_DEXCR = 0x111 + NT_PPC_DSCR = 0x105 + NT_PPC_EBB = 0x106 + NT_PPC_HASHKEYR = 0x112 + NT_PPC_PKEY = 0x110 + NT_PPC_PMU = 0x107 + NT_PPC_PPR = 0x104 + NT_PPC_SPE = 0x101 + NT_PPC_TAR = 0x103 + NT_PPC_TM_CDSCR = 0x10f + NT_PPC_TM_CFPR = 0x109 + NT_PPC_TM_CGPR = 0x108 + NT_PPC_TM_CPPR = 0x10e + NT_PPC_TM_CTAR = 0x10d + NT_PPC_TM_CVMX = 0x10a + NT_PPC_TM_CVSX = 0x10b + NT_PPC_TM_SPR = 0x10c + NT_PPC_VMX = 0x100 + NT_PPC_VSX = 0x102 + NT_PRFPREG = 0x2 + NT_PRPSINFO = 0x3 + NT_PRSTATUS = 0x1 + NT_PRXFPREG = 0x46e62b7f + NT_RISCV_CSR = 0x900 + NT_RISCV_TAGGED_ADDR_CTRL = 0x902 + NT_RISCV_VECTOR = 0x901 + NT_S390_CTRS = 0x304 + NT_S390_GS_BC = 0x30c + NT_S390_GS_CB = 0x30b + NT_S390_HIGH_GPRS = 0x300 + NT_S390_LAST_BREAK = 0x306 + NT_S390_PREFIX = 0x305 + NT_S390_PV_CPU_DATA = 0x30e + NT_S390_RI_CB = 0x30d + NT_S390_SYSTEM_CALL = 0x307 + NT_S390_TDB = 0x308 + NT_S390_TIMER = 0x301 + NT_S390_TODCMP = 0x302 + NT_S390_TODPREG = 0x303 + NT_S390_VXRS_HIGH = 0x30a + NT_S390_VXRS_LOW = 0x309 + NT_SIGINFO = 0x53494749 + NT_TASKSTRUCT = 0x4 + NT_VMCOREDD = 0x700 + NT_X86_SHSTK = 0x204 + NT_X86_XSAVE_LAYOUT = 0x205 + NT_X86_XSTATE = 0x202 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -2342,9 +2631,11 @@ const ( PERF_MEM_LVLNUM_IO = 0xa PERF_MEM_LVLNUM_L1 = 0x1 PERF_MEM_LVLNUM_L2 = 0x2 + PERF_MEM_LVLNUM_L2_MHB = 0x5 PERF_MEM_LVLNUM_L3 = 0x3 PERF_MEM_LVLNUM_L4 = 0x4 PERF_MEM_LVLNUM_LFB = 0xc + PERF_MEM_LVLNUM_MSC = 0x6 PERF_MEM_LVLNUM_NA = 0xf PERF_MEM_LVLNUM_PMEM = 0xe PERF_MEM_LVLNUM_RAM = 0xd @@ -2410,6 +2701,59 @@ const ( PERF_RECORD_MISC_USER = 0x2 PERF_SAMPLE_BRANCH_PLM_ALL = 0x7 PERF_SAMPLE_WEIGHT_TYPE = 0x1004000 + PF_ALG = 0x26 + PF_APPLETALK = 0x5 + PF_ASH = 0x12 + PF_ATMPVC = 0x8 + PF_ATMSVC = 0x14 + PF_AX25 = 0x3 + PF_BLUETOOTH = 0x1f + PF_BRIDGE = 0x7 + PF_CAIF = 0x25 + PF_CAN = 0x1d + PF_DECnet = 0xc + PF_ECONET = 0x13 + PF_FILE = 0x1 + PF_IB = 0x1b + PF_IEEE802154 = 0x24 + PF_INET = 0x2 + PF_INET6 = 0xa + PF_IPX = 0x4 + PF_IRDA = 0x17 + PF_ISDN = 0x22 + PF_IUCV = 0x20 + PF_KCM = 0x29 + PF_KEY = 0xf + PF_LLC = 0x1a + PF_LOCAL = 0x1 + PF_MAX = 0x2e + PF_MCTP = 0x2d + PF_MPLS = 0x1c + PF_NETBEUI = 0xd + PF_NETLINK = 0x10 + PF_NETROM = 0x6 + PF_NFC = 0x27 + PF_PACKET = 0x11 + PF_PHONET = 0x23 + PF_PPPOX = 0x18 + PF_QIPCRTR = 0x2a + PF_R = 0x4 + PF_RDS = 0x15 + PF_ROSE = 0xb + PF_ROUTE = 0x10 + PF_RXRPC = 0x21 + PF_SECURITY = 0xe + PF_SMC = 0x2b + PF_SNA = 0x16 + PF_TIPC = 0x1e + PF_UNIX = 0x1 + PF_UNSPEC = 0x0 + PF_VSOCK = 0x28 + PF_W = 0x2 + PF_WANPIPE = 0x19 + PF_X = 0x1 + PF_X25 = 0x9 + PF_XDP = 0x2c PID_FS_MAGIC = 0x50494446 PIPEFS_MAGIC = 0x50495045 PPPIOCGNPMODE = 0xc008744c @@ -2417,6 +2761,7 @@ const ( PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 + PROCFS_IOCTL_MAGIC = 'f' PROC_SUPER_MAGIC = 0x9fa0 PROT_EXEC = 0x4 PROT_GROWSDOWN = 0x1000000 @@ -2448,6 +2793,10 @@ const ( PR_FP_EXC_UND = 0x40000 PR_FP_MODE_FR = 0x1 PR_FP_MODE_FRE = 0x2 + PR_FUTEX_HASH = 0x4e + PR_FUTEX_HASH_GET_IMMUTABLE = 0x3 + PR_FUTEX_HASH_GET_SLOTS = 0x2 + PR_FUTEX_HASH_SET_SLOTS = 0x1 PR_GET_AUXV = 0x41555856 PR_GET_CHILD_SUBREAPER = 0x25 PR_GET_DUMPABLE = 0x3 @@ -2464,6 +2813,7 @@ const ( PR_GET_PDEATHSIG = 0x2 PR_GET_SECCOMP = 0x15 PR_GET_SECUREBITS = 0x1b + PR_GET_SHADOW_STACK_STATUS = 0x4a PR_GET_SPECULATION_CTRL = 0x34 PR_GET_TAGGED_ADDR_CTRL = 0x38 PR_GET_THP_DISABLE = 0x2a @@ -2472,6 +2822,7 @@ const ( PR_GET_TIMING = 0xd PR_GET_TSC = 0x19 PR_GET_UNALIGN = 0x5 + PR_LOCK_SHADOW_STACK_STATUS = 0x4c PR_MCE_KILL = 0x21 PR_MCE_KILL_CLEAR = 0x0 PR_MCE_KILL_DEFAULT = 0x2 @@ -2498,6 +2849,25 @@ const ( PR_PAC_GET_ENABLED_KEYS = 0x3d PR_PAC_RESET_KEYS = 0x36 PR_PAC_SET_ENABLED_KEYS = 0x3c + PR_PMLEN_MASK = 0x7f000000 + PR_PMLEN_SHIFT = 0x18 + PR_PPC_DEXCR_CTRL_CLEAR = 0x4 + PR_PPC_DEXCR_CTRL_CLEAR_ONEXEC = 0x10 + PR_PPC_DEXCR_CTRL_EDITABLE = 0x1 + PR_PPC_DEXCR_CTRL_MASK = 0x1f + PR_PPC_DEXCR_CTRL_SET = 0x2 + PR_PPC_DEXCR_CTRL_SET_ONEXEC = 0x8 + PR_PPC_DEXCR_IBRTPD = 0x1 + PR_PPC_DEXCR_NPHIE = 0x3 + PR_PPC_DEXCR_SBHE = 0x0 + PR_PPC_DEXCR_SRAPD = 0x2 + PR_PPC_GET_DEXCR = 0x48 + PR_PPC_SET_DEXCR = 0x49 + PR_RISCV_CTX_SW_FENCEI_OFF = 0x1 + PR_RISCV_CTX_SW_FENCEI_ON = 0x0 + PR_RISCV_SCOPE_PER_PROCESS = 0x0 + PR_RISCV_SCOPE_PER_THREAD = 0x1 + PR_RISCV_SET_ICACHE_FLUSH_CTX = 0x47 PR_RISCV_V_GET_CONTROL = 0x46 PR_RISCV_V_SET_CONTROL = 0x45 PR_RISCV_V_VSTATE_CTRL_CUR_MASK = 0x3 @@ -2548,6 +2918,7 @@ const ( PR_SET_PTRACER = 0x59616d61 PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c + PR_SET_SHADOW_STACK_STATUS = 0x4b PR_SET_SPECULATION_CTRL = 0x35 PR_SET_SYSCALL_USER_DISPATCH = 0x3b PR_SET_TAGGED_ADDR_CTRL = 0x37 @@ -2558,6 +2929,9 @@ const ( PR_SET_UNALIGN = 0x6 PR_SET_VMA = 0x53564d41 PR_SET_VMA_ANON_NAME = 0x0 + PR_SHADOW_STACK_ENABLE = 0x1 + PR_SHADOW_STACK_PUSH = 0x4 + PR_SHADOW_STACK_WRITE = 0x2 PR_SME_GET_VL = 0x40 PR_SME_SET_VL = 0x3f PR_SME_SET_VL_ONEXEC = 0x40000 @@ -2582,6 +2956,10 @@ const ( PR_TAGGED_ADDR_ENABLE = 0x1 PR_TASK_PERF_EVENTS_DISABLE = 0x1f PR_TASK_PERF_EVENTS_ENABLE = 0x20 + PR_TIMER_CREATE_RESTORE_IDS = 0x4d + PR_TIMER_CREATE_RESTORE_IDS_GET = 0x2 + PR_TIMER_CREATE_RESTORE_IDS_OFF = 0x0 + PR_TIMER_CREATE_RESTORE_IDS_ON = 0x1 PR_TIMING_STATISTICAL = 0x0 PR_TIMING_TIMESTAMP = 0x1 PR_TSC_ENABLE = 0x1 @@ -2589,6 +2967,28 @@ const ( PR_UNALIGN_NOPRINT = 0x1 PR_UNALIGN_SIGBUS = 0x2 PSTOREFS_MAGIC = 0x6165676c + PTP_CLK_MAGIC = '=' + PTP_ENABLE_FEATURE = 0x1 + PTP_EXTTS_EDGES = 0x6 + PTP_EXTTS_EVENT_VALID = 0x1 + PTP_EXTTS_V1_VALID_FLAGS = 0x7 + PTP_EXTTS_VALID_FLAGS = 0x1f + PTP_EXT_OFFSET = 0x10 + PTP_FALLING_EDGE = 0x4 + PTP_MAX_SAMPLES = 0x19 + PTP_PEROUT_DUTY_CYCLE = 0x2 + PTP_PEROUT_ONE_SHOT = 0x1 + PTP_PEROUT_PHASE = 0x4 + PTP_PEROUT_V1_VALID_FLAGS = 0x0 + PTP_PEROUT_VALID_FLAGS = 0x7 + PTP_PIN_GETFUNC = 0xc0603d06 + PTP_PIN_GETFUNC2 = 0xc0603d0f + PTP_RISING_EDGE = 0x2 + PTP_STRICT_FLAGS = 0x8 + PTP_SYS_OFFSET_EXTENDED = 0xc4c03d09 + PTP_SYS_OFFSET_EXTENDED2 = 0xc4c03d12 + PTP_SYS_OFFSET_PRECISE = 0xc0403d08 + PTP_SYS_OFFSET_PRECISE2 = 0xc0403d11 PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 @@ -2640,6 +3040,7 @@ const ( PTRACE_SETREGSET = 0x4205 PTRACE_SETSIGINFO = 0x4203 PTRACE_SETSIGMASK = 0x420b + PTRACE_SET_SYSCALL_INFO = 0x4212 PTRACE_SET_SYSCALL_USER_DISPATCH_CONFIG = 0x4210 PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 @@ -2648,6 +3049,23 @@ const ( PTRACE_SYSCALL_INFO_NONE = 0x0 PTRACE_SYSCALL_INFO_SECCOMP = 0x3 PTRACE_TRACEME = 0x0 + PT_AARCH64_MEMTAG_MTE = 0x70000002 + PT_DYNAMIC = 0x2 + PT_GNU_EH_FRAME = 0x6474e550 + PT_GNU_PROPERTY = 0x6474e553 + PT_GNU_RELRO = 0x6474e552 + PT_GNU_STACK = 0x6474e551 + PT_HIOS = 0x6fffffff + PT_HIPROC = 0x7fffffff + PT_INTERP = 0x3 + PT_LOAD = 0x1 + PT_LOOS = 0x60000000 + PT_LOPROC = 0x70000000 + PT_NOTE = 0x4 + PT_NULL = 0x0 + PT_PHDR = 0x6 + PT_SHLIB = 0x5 + PT_TLS = 0x7 P_ALL = 0x0 P_PGID = 0x2 P_PID = 0x1 @@ -2703,7 +3121,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1e + RTA_MAX = 0x1f RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -2780,10 +3198,12 @@ const ( RTM_DELACTION = 0x31 RTM_DELADDR = 0x15 RTM_DELADDRLABEL = 0x49 + RTM_DELANYCAST = 0x3d RTM_DELCHAIN = 0x65 RTM_DELLINK = 0x11 RTM_DELLINKPROP = 0x6d RTM_DELMDB = 0x55 + RTM_DELMULTICAST = 0x39 RTM_DELNEIGH = 0x1d RTM_DELNETCONF = 0x51 RTM_DELNEXTHOP = 0x69 @@ -2833,11 +3253,13 @@ const ( RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 + RTM_NEWANYCAST = 0x3c RTM_NEWCACHEREPORT = 0x60 RTM_NEWCHAIN = 0x64 RTM_NEWLINK = 0x10 RTM_NEWLINKPROP = 0x6c RTM_NEWMDB = 0x54 + RTM_NEWMULTICAST = 0x38 RTM_NEWNDUSEROPT = 0x44 RTM_NEWNEIGH = 0x1c RTM_NEWNEIGHTBL = 0x40 @@ -2845,7 +3267,6 @@ const ( RTM_NEWNEXTHOP = 0x68 RTM_NEWNEXTHOPBUCKET = 0x74 RTM_NEWNSID = 0x58 - RTM_NEWNVLAN = 0x70 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 RTM_NEWROUTE = 0x18 @@ -2854,6 +3275,7 @@ const ( RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c RTM_NEWTUNNEL = 0x78 + RTM_NEWVLAN = 0x70 RTM_NR_FAMILIES = 0x1b RTM_NR_MSGTYPES = 0x6c RTM_SETDCB = 0x4f @@ -2886,6 +3308,7 @@ const ( RTPROT_NTK = 0xf RTPROT_OPENR = 0x63 RTPROT_OSPF = 0xbc + RTPROT_OVN = 0x54 RTPROT_RA = 0x9 RTPROT_REDIRECT = 0x1 RTPROT_RIP = 0xbd @@ -2902,15 +3325,18 @@ const ( RUSAGE_SELF = 0x0 RUSAGE_THREAD = 0x1 RWF_APPEND = 0x10 + RWF_ATOMIC = 0x40 + RWF_DONTCACHE = 0x80 RWF_DSYNC = 0x2 RWF_HIPRI = 0x1 RWF_NOAPPEND = 0x20 RWF_NOWAIT = 0x8 - RWF_SUPPORTED = 0x3f + RWF_SUPPORTED = 0xff RWF_SYNC = 0x4 RWF_WRITE_LIFE_NOT_SET = 0x0 SCHED_BATCH = 0x3 SCHED_DEADLINE = 0x6 + SCHED_EXT = 0x7 SCHED_FIFO = 0x1 SCHED_FLAG_ALL = 0x7f SCHED_FLAG_DL_OVERRUN = 0x4 @@ -2973,6 +3399,47 @@ const ( SEEK_MAX = 0x4 SEEK_SET = 0x0 SELINUX_MAGIC = 0xf97cff8c + SHF_ALLOC = 0x2 + SHF_EXCLUDE = 0x8000000 + SHF_EXECINSTR = 0x4 + SHF_GROUP = 0x200 + SHF_INFO_LINK = 0x40 + SHF_LINK_ORDER = 0x80 + SHF_MASKOS = 0xff00000 + SHF_MASKPROC = 0xf0000000 + SHF_MERGE = 0x10 + SHF_ORDERED = 0x4000000 + SHF_OS_NONCONFORMING = 0x100 + SHF_RELA_LIVEPATCH = 0x100000 + SHF_RO_AFTER_INIT = 0x200000 + SHF_STRINGS = 0x20 + SHF_TLS = 0x400 + SHF_WRITE = 0x1 + SHN_ABS = 0xfff1 + SHN_COMMON = 0xfff2 + SHN_HIPROC = 0xff1f + SHN_HIRESERVE = 0xffff + SHN_LIVEPATCH = 0xff20 + SHN_LOPROC = 0xff00 + SHN_LORESERVE = 0xff00 + SHN_UNDEF = 0x0 + SHT_DYNAMIC = 0x6 + SHT_DYNSYM = 0xb + SHT_HASH = 0x5 + SHT_HIPROC = 0x7fffffff + SHT_HIUSER = 0xffffffff + SHT_LOPROC = 0x70000000 + SHT_LOUSER = 0x80000000 + SHT_NOBITS = 0x8 + SHT_NOTE = 0x7 + SHT_NULL = 0x0 + SHT_NUM = 0xc + SHT_PROGBITS = 0x1 + SHT_REL = 0x9 + SHT_RELA = 0x4 + SHT_SHLIB = 0xa + SHT_STRTAB = 0x3 + SHT_SYMTAB = 0x2 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -3179,11 +3646,13 @@ const ( STATX_ATTR_MOUNT_ROOT = 0x2000 STATX_ATTR_NODUMP = 0x40 STATX_ATTR_VERITY = 0x100000 + STATX_ATTR_WRITE_ATOMIC = 0x400000 STATX_BASIC_STATS = 0x7ff STATX_BLOCKS = 0x400 STATX_BTIME = 0x800 STATX_CTIME = 0x80 STATX_DIOALIGN = 0x2000 + STATX_DIO_READ_ALIGN = 0x20000 STATX_GID = 0x10 STATX_INO = 0x100 STATX_MNT_ID = 0x1000 @@ -3192,9 +3661,21 @@ const ( STATX_MTIME = 0x40 STATX_NLINK = 0x4 STATX_SIZE = 0x200 + STATX_SUBVOL = 0x8000 STATX_TYPE = 0x1 STATX_UID = 0x8 + STATX_WRITE_ATOMIC = 0x10000 STATX__RESERVED = 0x80000000 + STB_GLOBAL = 0x1 + STB_LOCAL = 0x0 + STB_WEAK = 0x2 + STT_COMMON = 0x5 + STT_FILE = 0x4 + STT_FUNC = 0x2 + STT_NOTYPE = 0x0 + STT_OBJECT = 0x1 + STT_SECTION = 0x3 + STT_TLS = 0x6 SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 @@ -3233,7 +3714,7 @@ const ( TASKSTATS_GENL_NAME = "TASKSTATS" TASKSTATS_GENL_VERSION = 0x1 TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0xe + TASKSTATS_VERSION = 0x10 TCIFLUSH = 0x0 TCIOFF = 0x2 TCIOFLUSH = 0x2 @@ -3303,8 +3784,6 @@ const ( TCP_TX_DELAY = 0x25 TCP_ULP = 0x1f TCP_USER_TIMEOUT = 0x12 - TCP_V4_FLOW = 0x1 - TCP_V6_FLOW = 0x5 TCP_WINDOW_CLAMP = 0xa TCP_ZEROCOPY_RECEIVE = 0x23 TFD_TIMER_ABSTIME = 0x1 @@ -3414,6 +3893,7 @@ const ( TP_STATUS_WRONG_FORMAT = 0x4 TRACEFS_MAGIC = 0x74726163 TS_COMM_LEN = 0x20 + UBI_IOCECNFO = 0xc01c6f06 UDF_SUPER_MAGIC = 0x15013346 UDP_CORK = 0x1 UDP_ENCAP = 0x64 @@ -3426,14 +3906,14 @@ const ( UDP_NO_CHECK6_RX = 0x66 UDP_NO_CHECK6_TX = 0x65 UDP_SEGMENT = 0x67 - UDP_V4_FLOW = 0x2 - UDP_V6_FLOW = 0x6 UMOUNT_NOFOLLOW = 0x8 USBDEVICE_SUPER_MAGIC = 0x9fa2 UTIME_NOW = 0x3fffffff UTIME_OMIT = 0x3ffffffe V9FS_MAGIC = 0x1021997 VERASE = 0x2 + VER_FLG_BASE = 0x1 + VER_FLG_WEAK = 0x2 VINTR = 0x0 VKILL = 0x3 VLNEXT = 0xf @@ -3470,7 +3950,7 @@ const ( WDIOS_TEMPPANIC = 0x4 WDIOS_UNKNOWN = -0x1 WEXITED = 0x4 - WGALLOWEDIP_A_MAX = 0x3 + WGALLOWEDIP_A_MAX = 0x4 WGDEVICE_A_MAX = 0x8 WGPEER_A_MAX = 0xa WG_CMD_MAX = 0x1 @@ -3584,6 +4064,7 @@ const ( XDP_SHARED_UMEM = 0x1 XDP_STATISTICS = 0x7 XDP_TXMD_FLAGS_CHECKSUM = 0x2 + XDP_TXMD_FLAGS_LAUNCH_TIME = 0x4 XDP_TXMD_FLAGS_TIMESTAMP = 0x1 XDP_TX_METADATA = 0x2 XDP_TX_RING = 0x3 @@ -3592,6 +4073,7 @@ const ( XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 XDP_UMEM_PGOFF_FILL_RING = 0x100000000 XDP_UMEM_REG = 0x4 + XDP_UMEM_TX_METADATA_LEN = 0x4 XDP_UMEM_TX_SW_CSUM = 0x2 XDP_UMEM_UNALIGNED_CHUNK_FLAG = 0x1 XDP_USE_NEED_WAKEUP = 0x8 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index e4bc0bd5..97a61fc5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -78,6 +79,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -107,12 +110,17 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -151,9 +159,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 @@ -230,6 +243,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_GETFPREGS = 0xe PTRACE_GETFPXREGS = 0x12 PTRACE_GET_THREAD_AREA = 0x19 @@ -276,10 +303,13 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 @@ -314,6 +344,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 @@ -330,6 +363,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -342,6 +376,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 689317af..a0d6d498 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -78,6 +79,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -107,12 +110,17 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -151,9 +159,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 @@ -230,6 +243,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_ARCH_PRCTL = 0x1e PTRACE_GETFPREGS = 0xe PTRACE_GETFPXREGS = 0x12 @@ -277,10 +304,13 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 @@ -315,6 +345,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 @@ -331,6 +364,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -343,6 +377,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 5cca668a..dd9c903f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -78,6 +79,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -106,12 +109,17 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -148,9 +156,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 @@ -227,6 +240,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_GETCRUNCHREGS = 0x19 PTRACE_GETFDPIC = 0x1f PTRACE_GETFDPIC_EXEC = 0x0 @@ -282,10 +309,13 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 @@ -320,6 +350,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 @@ -336,6 +369,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -348,6 +382,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 14270508..384c61ca 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -78,6 +79,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 ESR_MAGIC = 0x45535201 EXTPROC = 0x10000 @@ -107,15 +110,21 @@ const ( F_SETOWN = 0x8 F_UNLCK = 0x2 F_WRLCK = 0x1 + GCS_MAGIC = 0x47435300 HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -152,9 +161,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 @@ -198,6 +212,7 @@ const ( PERF_EVENT_IOC_SET_BPF = 0x40042408 PERF_EVENT_IOC_SET_FILTER = 0x40082406 PERF_EVENT_IOC_SET_OUTPUT = 0x2405 + POE_MAGIC = 0x504f4530 PPPIOCATTACH = 0x4004743d PPPIOCATTCHAN = 0x40047438 PPPIOCBRIDGECHAN = 0x40047435 @@ -233,6 +248,20 @@ const ( PROT_BTI = 0x10 PROT_MTE = 0x20 PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_PEEKMTETAGS = 0x21 PTRACE_POKEMTETAGS = 0x22 PTRACE_SYSEMU = 0x1f @@ -273,10 +302,13 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 @@ -311,6 +343,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 @@ -327,6 +362,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -339,6 +375,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 28e39afd..6384c983 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -78,6 +79,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -107,12 +110,17 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -152,9 +160,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 @@ -231,6 +244,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_SYSEMU = 0x1f PTRACE_SYSEMU_SINGLESTEP = 0x20 RLIMIT_AS = 0x9 @@ -269,10 +296,13 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 @@ -307,6 +337,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 @@ -323,6 +356,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -335,6 +369,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index cd66e92c..553c1c6f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -78,6 +79,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x80 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -106,12 +109,17 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -148,9 +156,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 @@ -227,6 +240,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 @@ -275,10 +302,13 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 @@ -313,6 +343,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 @@ -329,6 +362,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 @@ -341,6 +375,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index c1595eba..b3339f20 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -78,6 +79,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x80 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -106,12 +109,17 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -148,9 +156,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 @@ -227,6 +240,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 @@ -275,10 +302,13 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 @@ -313,6 +343,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 @@ -329,6 +362,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 @@ -341,6 +375,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index ee9456b0..177091d2 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -78,6 +79,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x80 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -106,12 +109,17 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -148,9 +156,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 @@ -227,6 +240,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 @@ -275,10 +302,13 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 @@ -313,6 +343,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 @@ -329,6 +362,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 @@ -341,6 +375,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 8cfca81e..c5abf156 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -78,6 +79,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x80 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -106,12 +109,17 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -148,9 +156,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 @@ -227,6 +240,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 @@ -275,10 +302,13 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 @@ -313,6 +343,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 @@ -329,6 +362,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 @@ -341,6 +375,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 60b0deb3..f1f3fadf 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -68,6 +68,7 @@ const ( CS8 = 0x300 CSIZE = 0x300 CSTOPB = 0x400 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x40 @@ -78,6 +79,8 @@ const ( ECHOPRT = 0x20 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000000 FF1 = 0x4000 @@ -106,12 +109,17 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x4000 ICANON = 0x100 IEXTEN = 0x400 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x80 IUCLC = 0x1000 IXOFF = 0x400 @@ -150,9 +158,14 @@ const ( NL3 = 0x300 NLDLY = 0x300 NOFLSH = 0x80000000 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 @@ -230,6 +243,20 @@ const ( PPPIOCXFERUNIT = 0x2000744e PROT_SAO = 0x10 PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETEVRREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETREGS64 = 0x16 @@ -330,10 +357,13 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 @@ -368,6 +398,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 @@ -384,6 +417,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 @@ -396,6 +430,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x10 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x12 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x12 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index f90aa728..203ad9c5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x300 CSIZE = 0x300 CSTOPB = 0x400 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x40 @@ -78,6 +79,8 @@ const ( ECHOPRT = 0x20 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000000 FF1 = 0x4000 @@ -106,12 +109,17 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x4000 ICANON = 0x100 IEXTEN = 0x400 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x80 IUCLC = 0x1000 IXOFF = 0x400 @@ -150,9 +158,14 @@ const ( NL3 = 0x300 NLDLY = 0x300 NOFLSH = 0x80000000 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 @@ -230,6 +243,20 @@ const ( PPPIOCXFERUNIT = 0x2000744e PROT_SAO = 0x10 PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETEVRREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETREGS64 = 0x16 @@ -334,10 +361,13 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 @@ -372,6 +402,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 @@ -388,6 +421,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 @@ -400,6 +434,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x10 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x12 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x12 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index ba9e0150..4b9abcb2 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -68,6 +68,7 @@ const ( CS8 = 0x300 CSIZE = 0x300 CSTOPB = 0x400 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x40 @@ -78,6 +79,8 @@ const ( ECHOPRT = 0x20 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000000 FF1 = 0x4000 @@ -106,12 +109,17 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x4000 ICANON = 0x100 IEXTEN = 0x400 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x80 IUCLC = 0x1000 IXOFF = 0x400 @@ -150,9 +158,14 @@ const ( NL3 = 0x300 NLDLY = 0x300 NOFLSH = 0x80000000 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 @@ -230,6 +243,20 @@ const ( PPPIOCXFERUNIT = 0x2000744e PROT_SAO = 0x10 PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETEVRREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETREGS64 = 0x16 @@ -334,10 +361,13 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 @@ -372,6 +402,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 @@ -388,6 +421,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 @@ -400,6 +434,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x10 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x12 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x12 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 07cdfd6e..f8798303 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -78,6 +79,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -106,12 +109,17 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -148,9 +156,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 @@ -227,6 +240,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_GETFDPIC = 0x21 PTRACE_GETFDPIC_EXEC = 0x0 PTRACE_GETFDPIC_INTERP = 0x1 @@ -266,10 +293,13 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 @@ -304,6 +334,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 @@ -320,6 +353,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -332,6 +366,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 2f1dd214..64347eb3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -78,6 +79,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -106,12 +109,17 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -148,9 +156,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 @@ -227,6 +240,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_DISABLE_TE = 0x5010 PTRACE_ENABLE_TE = 0x5009 PTRACE_GET_LAST_BREAK = 0x5006 @@ -338,10 +365,13 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 @@ -376,6 +406,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 @@ -392,6 +425,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -404,6 +438,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index f40519d9..7d719117 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -71,6 +71,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -82,6 +83,8 @@ const ( EFD_CLOEXEC = 0x400000 EFD_NONBLOCK = 0x4000 EMT_TAGOVF = 0x1 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x400000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -110,12 +113,17 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 IN_CLOEXEC = 0x400000 IN_NONBLOCK = 0x4000 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -153,9 +161,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 @@ -232,6 +245,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPAREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETFPREGS64 = 0x19 @@ -329,10 +356,13 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x58 + SCM_DEVMEM_LINEAR = 0x57 SCM_TIMESTAMPING = 0x23 SCM_TIMESTAMPING_OPT_STATS = 0x38 SCM_TIMESTAMPING_PKTINFO = 0x3c SCM_TIMESTAMPNS = 0x21 + SCM_TS_OPT_ID = 0x5a SCM_TXTIME = 0x3f SCM_WIFI_STATUS = 0x25 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 @@ -415,6 +445,9 @@ const ( SO_CNX_ADVICE = 0x37 SO_COOKIE = 0x3b SO_DETACH_REUSEPORT_BPF = 0x47 + SO_DEVMEM_DMABUF = 0x58 + SO_DEVMEM_DONTNEED = 0x59 + SO_DEVMEM_LINEAR = 0x57 SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 @@ -431,6 +464,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x2 SO_PASSPIDFD = 0x55 + SO_PASSRIGHTS = 0x5c SO_PASSSEC = 0x1f SO_PEEK_OFF = 0x26 SO_PEERCRED = 0x40 @@ -443,6 +477,7 @@ const ( SO_RCVBUFFORCE = 0x100b SO_RCVLOWAT = 0x800 SO_RCVMARK = 0x54 + SO_RCVPRIORITY = 0x5b SO_RCVTIMEO = 0x2000 SO_RCVTIMEO_NEW = 0x44 SO_RCVTIMEO_OLD = 0x2000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go index da08b2ab..1ec2b140 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go @@ -581,6 +581,8 @@ const ( AT_EMPTY_PATH = 0x1000 AT_REMOVEDIR = 0x200 RENAME_NOREPLACE = 1 << 0 + ST_RDONLY = 1 + ST_NOSUID = 2 ) const ( diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index ccb02f24..813c05b6 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -740,6 +740,54 @@ func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func renamexNp(from string, to string, flag uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_renamex_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flag)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_renamex_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renamex_np renamex_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func renameatxNp(fromfd int, from string, tofd int, to string, flag uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_renameatx_np_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), uintptr(flag), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_renameatx_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renameatx_np renameatx_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -760,6 +808,59 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func pthread_chdir_np(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_pthread_chdir_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_chdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_chdir_np pthread_chdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pthread_fchdir_np(fd int) (err error) { + _, _, e1 := syscall_syscall(libc_pthread_fchdir_np_trampoline_addr, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_fchdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_fchdir_np pthread_fchdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) { + var _p0 unsafe.Pointer + if len(iov) > 0 { + _p0 = unsafe.Pointer(&iov[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall9(libc_connectx_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(endpoints)), uintptr(associd), uintptr(flags), uintptr(_p0), uintptr(len(iov)), uintptr(unsafe.Pointer(n)), uintptr(unsafe.Pointer(connid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_connectx_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connectx connectx "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { @@ -2411,6 +2512,90 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func readv(fd int, iovecs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovecs) > 0 { + _p0 = unsafe.Pointer(&iovecs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_readv_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_readv_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readv readv "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func preadv(fd int, iovecs []Iovec, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovecs) > 0 { + _p0 = unsafe.Pointer(&iovecs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(libc_preadv_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_preadv_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_preadv preadv "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writev(fd int, iovecs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovecs) > 0 { + _p0 = unsafe.Pointer(&iovecs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_writev_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_writev_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_writev writev "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwritev(fd int, iovecs []Iovec, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovecs) > 0 { + _p0 = unsafe.Pointer(&iovecs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(libc_pwritev_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pwritev_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pwritev pwritev "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := syscall_syscall(libc_fstat64_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index 8b8bb284..fda32858 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -223,11 +223,36 @@ TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) +TEXT libc_renamex_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renamex_np(SB) +GLOBL ·libc_renamex_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renamex_np_trampoline_addr(SB)/8, $libc_renamex_np_trampoline<>(SB) + +TEXT libc_renameatx_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renameatx_np(SB) +GLOBL ·libc_renameatx_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renameatx_np_trampoline_addr(SB)/8, $libc_renameatx_np_trampoline<>(SB) + TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sysctl(SB) GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_pthread_chdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_chdir_np(SB) +GLOBL ·libc_pthread_chdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_chdir_np_trampoline_addr(SB)/8, $libc_pthread_chdir_np_trampoline<>(SB) + +TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_fchdir_np(SB) +GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB) + +TEXT libc_connectx_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connectx(SB) +GLOBL ·libc_connectx_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connectx_trampoline_addr(SB)/8, $libc_connectx_trampoline<>(SB) + TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 @@ -713,6 +738,26 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) +TEXT libc_readv_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readv(SB) +GLOBL ·libc_readv_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readv_trampoline_addr(SB)/8, $libc_readv_trampoline<>(SB) + +TEXT libc_preadv_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_preadv(SB) +GLOBL ·libc_preadv_trampoline_addr(SB), RODATA, $8 +DATA ·libc_preadv_trampoline_addr(SB)/8, $libc_preadv_trampoline<>(SB) + +TEXT libc_writev_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_writev(SB) +GLOBL ·libc_writev_trampoline_addr(SB), RODATA, $8 +DATA ·libc_writev_trampoline_addr(SB)/8, $libc_writev_trampoline<>(SB) + +TEXT libc_pwritev_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pwritev(SB) +GLOBL ·libc_pwritev_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pwritev_trampoline_addr(SB)/8, $libc_pwritev_trampoline<>(SB) + TEXT libc_fstat64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstat64(SB) GLOBL ·libc_fstat64_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 1b40b997..e6f58f3c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -740,6 +740,54 @@ func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func renamexNp(from string, to string, flag uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_renamex_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flag)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_renamex_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renamex_np renamex_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func renameatxNp(fromfd int, from string, tofd int, to string, flag uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_renameatx_np_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), uintptr(flag), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_renameatx_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renameatx_np renameatx_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -760,6 +808,59 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func pthread_chdir_np(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_pthread_chdir_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_chdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_chdir_np pthread_chdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pthread_fchdir_np(fd int) (err error) { + _, _, e1 := syscall_syscall(libc_pthread_fchdir_np_trampoline_addr, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_fchdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_fchdir_np pthread_fchdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) { + var _p0 unsafe.Pointer + if len(iov) > 0 { + _p0 = unsafe.Pointer(&iov[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall9(libc_connectx_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(endpoints)), uintptr(associd), uintptr(flags), uintptr(_p0), uintptr(len(iov)), uintptr(unsafe.Pointer(n)), uintptr(unsafe.Pointer(connid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_connectx_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connectx connectx "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { @@ -2411,6 +2512,90 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func readv(fd int, iovecs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovecs) > 0 { + _p0 = unsafe.Pointer(&iovecs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_readv_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_readv_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readv readv "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func preadv(fd int, iovecs []Iovec, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovecs) > 0 { + _p0 = unsafe.Pointer(&iovecs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(libc_preadv_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_preadv_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_preadv preadv "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writev(fd int, iovecs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovecs) > 0 { + _p0 = unsafe.Pointer(&iovecs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_writev_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_writev_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_writev writev "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwritev(fd int, iovecs []Iovec, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovecs) > 0 { + _p0 = unsafe.Pointer(&iovecs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(libc_pwritev_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pwritev_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pwritev pwritev "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := syscall_syscall(libc_fstat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index 08362c1a..7f8998b9 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -223,11 +223,36 @@ TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) +TEXT libc_renamex_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renamex_np(SB) +GLOBL ·libc_renamex_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renamex_np_trampoline_addr(SB)/8, $libc_renamex_np_trampoline<>(SB) + +TEXT libc_renameatx_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renameatx_np(SB) +GLOBL ·libc_renameatx_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renameatx_np_trampoline_addr(SB)/8, $libc_renameatx_np_trampoline<>(SB) + TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sysctl(SB) GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_pthread_chdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_chdir_np(SB) +GLOBL ·libc_pthread_chdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_chdir_np_trampoline_addr(SB)/8, $libc_pthread_chdir_np_trampoline<>(SB) + +TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_fchdir_np(SB) +GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB) + +TEXT libc_connectx_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connectx(SB) +GLOBL ·libc_connectx_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connectx_trampoline_addr(SB)/8, $libc_connectx_trampoline<>(SB) + TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 @@ -713,6 +738,26 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) +TEXT libc_readv_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readv(SB) +GLOBL ·libc_readv_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readv_trampoline_addr(SB)/8, $libc_readv_trampoline<>(SB) + +TEXT libc_preadv_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_preadv(SB) +GLOBL ·libc_preadv_trampoline_addr(SB), RODATA, $8 +DATA ·libc_preadv_trampoline_addr(SB)/8, $libc_preadv_trampoline<>(SB) + +TEXT libc_writev_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_writev(SB) +GLOBL ·libc_writev_trampoline_addr(SB), RODATA, $8 +DATA ·libc_writev_trampoline_addr(SB)/8, $libc_writev_trampoline<>(SB) + +TEXT libc_pwritev_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pwritev(SB) +GLOBL ·libc_pwritev_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pwritev_trampoline_addr(SB)/8, $libc_pwritev_trampoline<>(SB) + TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstat(SB) GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 87d8612a..8935d10a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -592,6 +592,16 @@ func ClockGettime(clockid int32, time *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockSettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_SETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) { _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0) if e1 != 0 { @@ -971,23 +981,6 @@ func Getpriority(which int, who int) (prio int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getrandom(buf []byte, flags int) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getrusage(who int, rusage *Rusage) (err error) { _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { @@ -2229,3 +2222,29 @@ func Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mseal(b []byte, flags uint) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSEAL, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setMemPolicy(mode int, mask *CPUSet, size int) (err error) { + _, _, e1 := Syscall(SYS_SET_MEMPOLICY, uintptr(mode), uintptr(unsafe.Pointer(mask)), uintptr(size)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index 9dc42410..1851df14 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s index 41b56173..0b43c693 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $4 DATA ·libc_mknodat_trampoline_addr(SB)/4, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mount_trampoline_addr(SB)/4, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $4 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index 0d3a0751..e1ec0dbe 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s index 4019a656..880c6d6e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index c39f7776..7c8452a6 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s index ac4af24f..b8ef95b0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $4 DATA ·libc_mknodat_trampoline_addr(SB)/4, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mount_trampoline_addr(SB)/4, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $4 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go index 57571d07..2ffdf861 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s index f77d5321..2af3b5c7 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go index e62963e6..1da08d52 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s index fae140b6..b7a25135 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go index 00831354..6e85b0aa 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s index 9d1e0ff0..f15dadf0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s @@ -555,6 +555,12 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_mount(SB) + RET +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 CALL libc_nanosleep(SB) RET diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go index 79029ed5..28b487df 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s index da115f9a..1e7f321e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index 829b87fe..b4609c20 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -72,7 +72,7 @@ import ( //go:cgo_import_dynamic libc_kill kill "libc.so" //go:cgo_import_dynamic libc_lchown lchown "libc.so" //go:cgo_import_dynamic libc_link link "libc.so" -//go:cgo_import_dynamic libc___xnet_llisten __xnet_llisten "libsocket.so" +//go:cgo_import_dynamic libc___xnet_listen __xnet_listen "libsocket.so" //go:cgo_import_dynamic libc_lstat lstat "libc.so" //go:cgo_import_dynamic libc_madvise madvise "libc.so" //go:cgo_import_dynamic libc_mkdir mkdir "libc.so" @@ -141,6 +141,16 @@ import ( //go:cgo_import_dynamic libc_getpeername getpeername "libsocket.so" //go:cgo_import_dynamic libc_setsockopt setsockopt "libsocket.so" //go:cgo_import_dynamic libc_recvfrom recvfrom "libsocket.so" +//go:cgo_import_dynamic libc_getpeerucred getpeerucred "libc.so" +//go:cgo_import_dynamic libc_ucred_get ucred_get "libc.so" +//go:cgo_import_dynamic libc_ucred_geteuid ucred_geteuid "libc.so" +//go:cgo_import_dynamic libc_ucred_getegid ucred_getegid "libc.so" +//go:cgo_import_dynamic libc_ucred_getruid ucred_getruid "libc.so" +//go:cgo_import_dynamic libc_ucred_getrgid ucred_getrgid "libc.so" +//go:cgo_import_dynamic libc_ucred_getsuid ucred_getsuid "libc.so" +//go:cgo_import_dynamic libc_ucred_getsgid ucred_getsgid "libc.so" +//go:cgo_import_dynamic libc_ucred_getpid ucred_getpid "libc.so" +//go:cgo_import_dynamic libc_ucred_free ucred_free "libc.so" //go:cgo_import_dynamic libc_port_create port_create "libc.so" //go:cgo_import_dynamic libc_port_associate port_associate "libc.so" //go:cgo_import_dynamic libc_port_dissociate port_dissociate "libc.so" @@ -211,7 +221,7 @@ import ( //go:linkname procKill libc_kill //go:linkname procLchown libc_lchown //go:linkname procLink libc_link -//go:linkname proc__xnet_llisten libc___xnet_llisten +//go:linkname proc__xnet_listen libc___xnet_listen //go:linkname procLstat libc_lstat //go:linkname procMadvise libc_madvise //go:linkname procMkdir libc_mkdir @@ -280,6 +290,16 @@ import ( //go:linkname procgetpeername libc_getpeername //go:linkname procsetsockopt libc_setsockopt //go:linkname procrecvfrom libc_recvfrom +//go:linkname procgetpeerucred libc_getpeerucred +//go:linkname procucred_get libc_ucred_get +//go:linkname procucred_geteuid libc_ucred_geteuid +//go:linkname procucred_getegid libc_ucred_getegid +//go:linkname procucred_getruid libc_ucred_getruid +//go:linkname procucred_getrgid libc_ucred_getrgid +//go:linkname procucred_getsuid libc_ucred_getsuid +//go:linkname procucred_getsgid libc_ucred_getsgid +//go:linkname procucred_getpid libc_ucred_getpid +//go:linkname procucred_free libc_ucred_free //go:linkname procport_create libc_port_create //go:linkname procport_associate libc_port_associate //go:linkname procport_dissociate libc_port_dissociate @@ -351,7 +371,7 @@ var ( procKill, procLchown, procLink, - proc__xnet_llisten, + proc__xnet_listen, procLstat, procMadvise, procMkdir, @@ -420,6 +440,16 @@ var ( procgetpeername, procsetsockopt, procrecvfrom, + procgetpeerucred, + procucred_get, + procucred_geteuid, + procucred_getegid, + procucred_getruid, + procucred_getrgid, + procucred_getsuid, + procucred_getsgid, + procucred_getpid, + procucred_free, procport_create, procport_associate, procport_dissociate, @@ -1148,7 +1178,7 @@ func Link(path string, link string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Listen(s int, backlog int) (err error) { - _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_llisten)), 2, uintptr(s), uintptr(backlog), 0, 0, 0, 0) + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_listen)), 2, uintptr(s), uintptr(backlog), 0, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -2029,6 +2059,90 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getpeerucred(fd uintptr, ucred *uintptr) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgetpeerucred)), 2, uintptr(fd), uintptr(unsafe.Pointer(ucred)), 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGet(pid int) (ucred uintptr, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procucred_get)), 1, uintptr(pid), 0, 0, 0, 0, 0) + ucred = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGeteuid(ucred uintptr) (uid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_geteuid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetegid(ucred uintptr) (gid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getegid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetruid(ucred uintptr) (uid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getruid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetrgid(ucred uintptr) (gid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getrgid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetsuid(ucred uintptr) (uid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getsuid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetsgid(ucred uintptr) (gid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getsgid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetpid(ucred uintptr) (pid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getpid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredFree(ucred uintptr) { + sysvicall6(uintptr(unsafe.Pointer(&procucred_free)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func port_create() (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_create)), 0, 0, 0, 0, 0, 0, 0) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index 53aef5dc..aca56ee4 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -457,4 +457,10 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 71d52476..2ea1ef58 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -341,6 +341,7 @@ const ( SYS_STATX = 332 SYS_IO_PGETEVENTS = 333 SYS_RSEQ = 334 + SYS_URETPROBE = 335 SYS_PIDFD_SEND_SIGNAL = 424 SYS_IO_URING_SETUP = 425 SYS_IO_URING_ENTER = 426 @@ -379,4 +380,10 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index c7477061..d22c8af3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -421,4 +421,10 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index f96e214f..5ee264ae 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -85,7 +85,7 @@ const ( SYS_SPLICE = 76 SYS_TEE = 77 SYS_READLINKAT = 78 - SYS_FSTATAT = 79 + SYS_NEWFSTATAT = 79 SYS_FSTAT = 80 SYS_SYNC = 81 SYS_FSYNC = 82 @@ -324,4 +324,10 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index 28425346..f9f03ebf 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -84,6 +84,8 @@ const ( SYS_SPLICE = 76 SYS_TEE = 77 SYS_READLINKAT = 78 + SYS_NEWFSTATAT = 79 + SYS_FSTAT = 80 SYS_SYNC = 81 SYS_FSYNC = 82 SYS_FDATASYNC = 83 @@ -318,4 +320,10 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index d0953018..87c2118e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -441,4 +441,10 @@ const ( SYS_LSM_GET_SELF_ATTR = 4459 SYS_LSM_SET_SELF_ATTR = 4460 SYS_LSM_LIST_MODULES = 4461 + SYS_MSEAL = 4462 + SYS_SETXATTRAT = 4463 + SYS_GETXATTRAT = 4464 + SYS_LISTXATTRAT = 4465 + SYS_REMOVEXATTRAT = 4466 + SYS_OPEN_TREE_ATTR = 4467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 295c7f4b..391ad102 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -371,4 +371,10 @@ const ( SYS_LSM_GET_SELF_ATTR = 5459 SYS_LSM_SET_SELF_ATTR = 5460 SYS_LSM_LIST_MODULES = 5461 + SYS_MSEAL = 5462 + SYS_SETXATTRAT = 5463 + SYS_GETXATTRAT = 5464 + SYS_LISTXATTRAT = 5465 + SYS_REMOVEXATTRAT = 5466 + SYS_OPEN_TREE_ATTR = 5467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index d1a9eaca..56561577 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -371,4 +371,10 @@ const ( SYS_LSM_GET_SELF_ATTR = 5459 SYS_LSM_SET_SELF_ATTR = 5460 SYS_LSM_LIST_MODULES = 5461 + SYS_MSEAL = 5462 + SYS_SETXATTRAT = 5463 + SYS_GETXATTRAT = 5464 + SYS_LISTXATTRAT = 5465 + SYS_REMOVEXATTRAT = 5466 + SYS_OPEN_TREE_ATTR = 5467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index bec157c3..0482b52e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -441,4 +441,10 @@ const ( SYS_LSM_GET_SELF_ATTR = 4459 SYS_LSM_SET_SELF_ATTR = 4460 SYS_LSM_LIST_MODULES = 4461 + SYS_MSEAL = 4462 + SYS_SETXATTRAT = 4463 + SYS_GETXATTRAT = 4464 + SYS_LISTXATTRAT = 4465 + SYS_REMOVEXATTRAT = 4466 + SYS_OPEN_TREE_ATTR = 4467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index 7ee7bdc4..71806f08 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -448,4 +448,10 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index fad1f25b..e35a7105 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -420,4 +420,10 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 7d3e1635..2aea4767 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -420,4 +420,10 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 0ed53ad9..6c9bb4e5 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -84,7 +84,7 @@ const ( SYS_SPLICE = 76 SYS_TEE = 77 SYS_READLINKAT = 78 - SYS_FSTATAT = 79 + SYS_NEWFSTATAT = 79 SYS_FSTAT = 80 SYS_SYNC = 81 SYS_FSYNC = 82 @@ -325,4 +325,10 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index 2fba04ad..680bc991 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -386,4 +386,10 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 621d00d7..620f2710 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -399,4 +399,10 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index 091d107f..17c53bd9 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -306,6 +306,19 @@ type XVSockPgen struct { type _Socklen uint32 +type SaeAssocID uint32 + +type SaeConnID uint32 + +type SaEndpoints struct { + Srcif uint32 + Srcaddr *RawSockaddr + Srcaddrlen uint32 + Dstaddr *RawSockaddr + Dstaddrlen uint32 + _ [4]byte +} + type Xucred struct { Version uint32 Uid uint32 @@ -449,11 +462,14 @@ type FdSet struct { const ( SizeofIfMsghdr = 0x70 + SizeofIfMsghdr2 = 0xa0 SizeofIfData = 0x60 + SizeofIfData64 = 0x80 SizeofIfaMsghdr = 0x14 SizeofIfmaMsghdr = 0x10 SizeofIfmaMsghdr2 = 0x14 SizeofRtMsghdr = 0x5c + SizeofRtMsghdr2 = 0x5c SizeofRtMetrics = 0x38 ) @@ -467,6 +483,20 @@ type IfMsghdr struct { Data IfData } +type IfMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Snd_len int32 + Snd_maxlen int32 + Snd_drops int32 + Timer int32 + Data IfData64 +} + type IfData struct { Type uint8 Typelen uint8 @@ -499,6 +529,34 @@ type IfData struct { Reserved2 uint32 } +type IfData64 struct { + Type uint8 + Typelen uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Recvquota uint8 + Xmitquota uint8 + Unused1 uint8 + Mtu uint32 + Metric uint32 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Noproto uint64 + Recvtiming uint32 + Xmittiming uint32 + Lastchange Timeval32 +} + type IfaMsghdr struct { Msglen uint16 Version uint8 @@ -544,6 +602,21 @@ type RtMsghdr struct { Rmx RtMetrics } +type RtMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Flags int32 + Addrs int32 + Refcnt int32 + Parentflags int32 + Reserved int32 + Use int32 + Inits uint32 + Rmx RtMetrics +} + type RtMetrics struct { Locks uint32 Mtu uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index 28ff4ef7..2392226a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -306,6 +306,19 @@ type XVSockPgen struct { type _Socklen uint32 +type SaeAssocID uint32 + +type SaeConnID uint32 + +type SaEndpoints struct { + Srcif uint32 + Srcaddr *RawSockaddr + Srcaddrlen uint32 + Dstaddr *RawSockaddr + Dstaddrlen uint32 + _ [4]byte +} + type Xucred struct { Version uint32 Uid uint32 @@ -449,11 +462,14 @@ type FdSet struct { const ( SizeofIfMsghdr = 0x70 + SizeofIfMsghdr2 = 0xa0 SizeofIfData = 0x60 + SizeofIfData64 = 0x80 SizeofIfaMsghdr = 0x14 SizeofIfmaMsghdr = 0x10 SizeofIfmaMsghdr2 = 0x14 SizeofRtMsghdr = 0x5c + SizeofRtMsghdr2 = 0x5c SizeofRtMetrics = 0x38 ) @@ -467,6 +483,20 @@ type IfMsghdr struct { Data IfData } +type IfMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Snd_len int32 + Snd_maxlen int32 + Snd_drops int32 + Timer int32 + Data IfData64 +} + type IfData struct { Type uint8 Typelen uint8 @@ -499,6 +529,34 @@ type IfData struct { Reserved2 uint32 } +type IfData64 struct { + Type uint8 + Typelen uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Recvquota uint8 + Xmitquota uint8 + Unused1 uint8 + Mtu uint32 + Metric uint32 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Noproto uint64 + Recvtiming uint32 + Xmittiming uint32 + Lastchange Timeval32 +} + type IfaMsghdr struct { Msglen uint16 Version uint8 @@ -544,6 +602,21 @@ type RtMsghdr struct { Rmx RtMetrics } +type RtMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Flags int32 + Addrs int32 + Refcnt int32 + Parentflags int32 + Reserved int32 + Use int32 + Inits uint32 + Rmx RtMetrics +} + type RtMetrics struct { Locks uint32 Mtu uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index 6cbd094a..51e13eb0 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -625,6 +625,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index 7c03b6ee..d002d8ef 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -630,6 +630,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index 422107ee..3f863d89 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -616,6 +616,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go index 505a12ac..61c72931 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go @@ -610,6 +610,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go index cc986c79..b5d17414 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go @@ -612,6 +612,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 4740b834..c1a46701 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -87,30 +87,37 @@ type StatxTimestamp struct { } type Statx_t struct { - Mask uint32 - Blksize uint32 - Attributes uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Mode uint16 - _ [1]uint16 - Ino uint64 - Size uint64 - Blocks uint64 - Attributes_mask uint64 - Atime StatxTimestamp - Btime StatxTimestamp - Ctime StatxTimestamp - Mtime StatxTimestamp - Rdev_major uint32 - Rdev_minor uint32 - Dev_major uint32 - Dev_minor uint32 - Mnt_id uint64 - Dio_mem_align uint32 - Dio_offset_align uint32 - _ [12]uint64 + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + Mnt_id uint64 + Dio_mem_align uint32 + Dio_offset_align uint32 + Subvol uint64 + Atomic_write_unit_min uint32 + Atomic_write_unit_max uint32 + Atomic_write_segments_max uint32 + Dio_read_offset_align uint32 + Atomic_write_unit_max_opt uint32 + _ [1]uint32 + _ [8]uint64 } type Fsid struct { @@ -194,7 +201,8 @@ type FscryptAddKeyArg struct { Key_spec FscryptKeySpecifier Raw_size uint32 Key_id uint32 - _ [8]uint32 + Flags uint32 + _ [7]uint32 } type FscryptRemoveKeyArg struct { @@ -515,6 +523,29 @@ type TCPInfo struct { Total_rto_time uint32 } +type TCPVegasInfo struct { + Enabled uint32 + Rttcnt uint32 + Rtt uint32 + Minrtt uint32 +} + +type TCPDCTCPInfo struct { + Enabled uint16 + Ce_state uint16 + Alpha uint32 + Ab_ecn uint32 + Ab_tot uint32 +} + +type TCPBBRInfo struct { + Bw_lo uint32 + Bw_hi uint32 + Min_rtt uint32 + Pacing_gain uint32 + Cwnd_gain uint32 +} + type CanFilter struct { Id uint32 Mask uint32 @@ -556,6 +587,7 @@ const ( SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc SizeofTCPInfo = 0xf8 + SizeofTCPCCInfo = 0x14 SizeofCanFilter = 0x8 SizeofTCPRepairOpt = 0x8 ) @@ -600,6 +632,8 @@ const ( IFA_FLAGS = 0x8 IFA_RT_PRIORITY = 0x9 IFA_TARGET_NETNSID = 0xa + IFAL_LABEL = 0x2 + IFAL_ADDRESS = 0x1 RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -657,6 +691,7 @@ const ( SizeofRtAttr = 0x4 SizeofIfInfomsg = 0x10 SizeofIfAddrmsg = 0x8 + SizeofIfAddrlblmsg = 0xc SizeofIfaCacheinfo = 0x10 SizeofRtMsg = 0xc SizeofRtNexthop = 0x8 @@ -708,6 +743,15 @@ type IfAddrmsg struct { Index uint32 } +type IfAddrlblmsg struct { + Family uint8 + _ uint8 + Prefixlen uint8 + Flags uint8 + Index uint32 + Seq uint32 +} + type IfaCacheinfo struct { Prefered uint32 Valid uint32 @@ -1723,12 +1767,6 @@ const ( IFLA_IPVLAN_UNSPEC = 0x0 IFLA_IPVLAN_MODE = 0x1 IFLA_IPVLAN_FLAGS = 0x2 - NETKIT_NEXT = -0x1 - NETKIT_PASS = 0x0 - NETKIT_DROP = 0x2 - NETKIT_REDIRECT = 0x7 - NETKIT_L2 = 0x0 - NETKIT_L3 = 0x1 IFLA_NETKIT_UNSPEC = 0x0 IFLA_NETKIT_PEER_INFO = 0x1 IFLA_NETKIT_PRIMARY = 0x2 @@ -1767,6 +1805,7 @@ const ( IFLA_VXLAN_DF = 0x1d IFLA_VXLAN_VNIFILTER = 0x1e IFLA_VXLAN_LOCALBYPASS = 0x1f + IFLA_VXLAN_LABEL_POLICY = 0x20 IFLA_GENEVE_UNSPEC = 0x0 IFLA_GENEVE_ID = 0x1 IFLA_GENEVE_REMOTE = 0x2 @@ -1796,6 +1835,8 @@ const ( IFLA_GTP_ROLE = 0x4 IFLA_GTP_CREATE_SOCKETS = 0x5 IFLA_GTP_RESTART_COUNT = 0x6 + IFLA_GTP_LOCAL = 0x7 + IFLA_GTP_LOCAL6 = 0x8 IFLA_BOND_UNSPEC = 0x0 IFLA_BOND_MODE = 0x1 IFLA_BOND_ACTIVE_SLAVE = 0x2 @@ -1828,6 +1869,7 @@ const ( IFLA_BOND_AD_LACP_ACTIVE = 0x1d IFLA_BOND_MISSED_MAX = 0x1e IFLA_BOND_NS_IP6_TARGET = 0x1f + IFLA_BOND_COUPLED_CONTROL = 0x20 IFLA_BOND_AD_INFO_UNSPEC = 0x0 IFLA_BOND_AD_INFO_AGGREGATOR = 0x1 IFLA_BOND_AD_INFO_NUM_PORTS = 0x2 @@ -1896,6 +1938,7 @@ const ( IFLA_HSR_SEQ_NR = 0x5 IFLA_HSR_VERSION = 0x6 IFLA_HSR_PROTOCOL = 0x7 + IFLA_HSR_INTERLINK = 0x8 IFLA_STATS_UNSPEC = 0x0 IFLA_STATS_LINK_64 = 0x1 IFLA_STATS_LINK_XSTATS = 0x2 @@ -1948,6 +1991,15 @@ const ( IFLA_DSA_MASTER = 0x1 ) +const ( + NETKIT_NEXT = -0x1 + NETKIT_PASS = 0x0 + NETKIT_DROP = 0x2 + NETKIT_REDIRECT = 0x7 + NETKIT_L2 = 0x0 + NETKIT_L3 = 0x1 +) + const ( NF_INET_PRE_ROUTING = 0x0 NF_INET_LOCAL_IN = 0x1 @@ -2189,8 +2241,11 @@ const ( NFT_PAYLOAD_LL_HEADER = 0x0 NFT_PAYLOAD_NETWORK_HEADER = 0x1 NFT_PAYLOAD_TRANSPORT_HEADER = 0x2 + NFT_PAYLOAD_INNER_HEADER = 0x3 + NFT_PAYLOAD_TUN_HEADER = 0x4 NFT_PAYLOAD_CSUM_NONE = 0x0 NFT_PAYLOAD_CSUM_INET = 0x1 + NFT_PAYLOAD_CSUM_SCTP = 0x2 NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1 NFTA_PAYLOAD_UNSPEC = 0x0 NFTA_PAYLOAD_DREG = 0x1 @@ -2277,6 +2332,11 @@ const ( NFT_CT_AVGPKT = 0x10 NFT_CT_ZONE = 0x11 NFT_CT_EVENTMASK = 0x12 + NFT_CT_SRC_IP = 0x13 + NFT_CT_DST_IP = 0x14 + NFT_CT_SRC_IP6 = 0x15 + NFT_CT_DST_IP6 = 0x16 + NFT_CT_ID = 0x17 NFTA_CT_UNSPEC = 0x0 NFTA_CT_DREG = 0x1 NFTA_CT_KEY = 0x2 @@ -2485,7 +2545,7 @@ type XDPMmapOffsets struct { type XDPUmemReg struct { Addr uint64 Len uint64 - Chunk_size uint32 + Size uint32 Headroom uint32 Flags uint32 Tx_metadata_len uint32 @@ -2557,8 +2617,8 @@ const ( SOF_TIMESTAMPING_BIND_PHC = 0x8000 SOF_TIMESTAMPING_OPT_ID_TCP = 0x10000 - SOF_TIMESTAMPING_LAST = 0x10000 - SOF_TIMESTAMPING_MASK = 0x1ffff + SOF_TIMESTAMPING_LAST = 0x40000 + SOF_TIMESTAMPING_MASK = 0x7ffff SCM_TSTAMP_SND = 0x0 SCM_TSTAMP_SCHED = 0x1 @@ -3004,6 +3064,23 @@ const ( ) const ( + TCA_UNSPEC = 0x0 + TCA_KIND = 0x1 + TCA_OPTIONS = 0x2 + TCA_STATS = 0x3 + TCA_XSTATS = 0x4 + TCA_RATE = 0x5 + TCA_FCNT = 0x6 + TCA_STATS2 = 0x7 + TCA_STAB = 0x8 + TCA_PAD = 0x9 + TCA_DUMP_INVISIBLE = 0xa + TCA_CHAIN = 0xb + TCA_HW_OFFLOAD = 0xc + TCA_INGRESS_BLOCK = 0xd + TCA_EGRESS_BLOCK = 0xe + TCA_DUMP_FLAGS = 0xf + TCA_EXT_WARN_MSG = 0x10 RTNLGRP_NONE = 0x0 RTNLGRP_LINK = 0x1 RTNLGRP_NOTIFY = 0x2 @@ -3038,6 +3115,18 @@ const ( RTNLGRP_IPV6_MROUTE_R = 0x1f RTNLGRP_NEXTHOP = 0x20 RTNLGRP_BRVLAN = 0x21 + RTNLGRP_MCTP_IFADDR = 0x22 + RTNLGRP_TUNNEL = 0x23 + RTNLGRP_STATS = 0x24 + RTNLGRP_IPV4_MCADDR = 0x25 + RTNLGRP_IPV6_MCADDR = 0x26 + RTNLGRP_IPV6_ACADDR = 0x27 + TCA_ROOT_UNSPEC = 0x0 + TCA_ROOT_TAB = 0x1 + TCA_ROOT_FLAGS = 0x2 + TCA_ROOT_COUNT = 0x3 + TCA_ROOT_TIME_DELTA = 0x4 + TCA_ROOT_EXT_WARN_MSG = 0x5 ) type CapUserHeader struct { @@ -3473,7 +3562,7 @@ const ( DEVLINK_PORT_FN_ATTR_STATE = 0x2 DEVLINK_PORT_FN_ATTR_OPSTATE = 0x3 DEVLINK_PORT_FN_ATTR_CAPS = 0x4 - DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x5 + DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x6 ) type FsverityDigest struct { @@ -3501,13 +3590,17 @@ type Nhmsg struct { Flags uint32 } +const SizeofNhmsg = 0x8 + type NexthopGrp struct { Id uint32 Weight uint8 - Resvd1 uint8 + High uint8 Resvd2 uint16 } +const SizeofNexthopGrp = 0x8 + const ( NHA_UNSPEC = 0x0 NHA_ID = 0x1 @@ -3765,7 +3858,16 @@ const ( ETHTOOL_MSG_PSE_GET = 0x24 ETHTOOL_MSG_PSE_SET = 0x25 ETHTOOL_MSG_RSS_GET = 0x26 - ETHTOOL_MSG_USER_MAX = 0x2b + ETHTOOL_MSG_PLCA_GET_CFG = 0x27 + ETHTOOL_MSG_PLCA_SET_CFG = 0x28 + ETHTOOL_MSG_PLCA_GET_STATUS = 0x29 + ETHTOOL_MSG_MM_GET = 0x2a + ETHTOOL_MSG_MM_SET = 0x2b + ETHTOOL_MSG_MODULE_FW_FLASH_ACT = 0x2c + ETHTOOL_MSG_PHY_GET = 0x2d + ETHTOOL_MSG_TSCONFIG_GET = 0x2e + ETHTOOL_MSG_TSCONFIG_SET = 0x2f + ETHTOOL_MSG_USER_MAX = 0x2f ETHTOOL_MSG_KERNEL_NONE = 0x0 ETHTOOL_MSG_STRSET_GET_REPLY = 0x1 ETHTOOL_MSG_LINKINFO_GET_REPLY = 0x2 @@ -3805,12 +3907,25 @@ const ( ETHTOOL_MSG_MODULE_NTF = 0x24 ETHTOOL_MSG_PSE_GET_REPLY = 0x25 ETHTOOL_MSG_RSS_GET_REPLY = 0x26 - ETHTOOL_MSG_KERNEL_MAX = 0x2b + ETHTOOL_MSG_PLCA_GET_CFG_REPLY = 0x27 + ETHTOOL_MSG_PLCA_GET_STATUS_REPLY = 0x28 + ETHTOOL_MSG_PLCA_NTF = 0x29 + ETHTOOL_MSG_MM_GET_REPLY = 0x2a + ETHTOOL_MSG_MM_NTF = 0x2b + ETHTOOL_MSG_MODULE_FW_FLASH_NTF = 0x2c + ETHTOOL_MSG_PHY_GET_REPLY = 0x2d + ETHTOOL_MSG_PHY_NTF = 0x2e + ETHTOOL_MSG_TSCONFIG_GET_REPLY = 0x2f + ETHTOOL_MSG_TSCONFIG_SET_REPLY = 0x30 + ETHTOOL_MSG_KERNEL_MAX = 0x30 + ETHTOOL_FLAG_COMPACT_BITSETS = 0x1 + ETHTOOL_FLAG_OMIT_REPLY = 0x2 + ETHTOOL_FLAG_STATS = 0x4 ETHTOOL_A_HEADER_UNSPEC = 0x0 ETHTOOL_A_HEADER_DEV_INDEX = 0x1 ETHTOOL_A_HEADER_DEV_NAME = 0x2 ETHTOOL_A_HEADER_FLAGS = 0x3 - ETHTOOL_A_HEADER_MAX = 0x3 + ETHTOOL_A_HEADER_MAX = 0x4 ETHTOOL_A_BITSET_BIT_UNSPEC = 0x0 ETHTOOL_A_BITSET_BIT_INDEX = 0x1 ETHTOOL_A_BITSET_BIT_NAME = 0x2 @@ -3909,7 +4024,12 @@ const ( ETHTOOL_A_RINGS_TCP_DATA_SPLIT = 0xb ETHTOOL_A_RINGS_CQE_SIZE = 0xc ETHTOOL_A_RINGS_TX_PUSH = 0xd - ETHTOOL_A_RINGS_MAX = 0x10 + ETHTOOL_A_RINGS_RX_PUSH = 0xe + ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN = 0xf + ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN_MAX = 0x10 + ETHTOOL_A_RINGS_HDS_THRESH = 0x11 + ETHTOOL_A_RINGS_HDS_THRESH_MAX = 0x12 + ETHTOOL_A_RINGS_MAX = 0x12 ETHTOOL_A_CHANNELS_UNSPEC = 0x0 ETHTOOL_A_CHANNELS_HEADER = 0x1 ETHTOOL_A_CHANNELS_RX_MAX = 0x2 @@ -3947,7 +4067,7 @@ const ( ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL = 0x17 ETHTOOL_A_COALESCE_USE_CQE_MODE_TX = 0x18 ETHTOOL_A_COALESCE_USE_CQE_MODE_RX = 0x19 - ETHTOOL_A_COALESCE_MAX = 0x1c + ETHTOOL_A_COALESCE_MAX = 0x1e ETHTOOL_A_PAUSE_UNSPEC = 0x0 ETHTOOL_A_PAUSE_HEADER = 0x1 ETHTOOL_A_PAUSE_AUTONEG = 0x2 @@ -3975,7 +4095,9 @@ const ( ETHTOOL_A_TSINFO_TX_TYPES = 0x3 ETHTOOL_A_TSINFO_RX_FILTERS = 0x4 ETHTOOL_A_TSINFO_PHC_INDEX = 0x5 - ETHTOOL_A_TSINFO_MAX = 0x5 + ETHTOOL_A_TSINFO_STATS = 0x6 + ETHTOOL_A_TSINFO_HWTSTAMP_PROVIDER = 0x7 + ETHTOOL_A_TSINFO_MAX = 0x9 ETHTOOL_A_CABLE_TEST_UNSPEC = 0x0 ETHTOOL_A_CABLE_TEST_HEADER = 0x1 ETHTOOL_A_CABLE_TEST_MAX = 0x1 @@ -3991,11 +4113,11 @@ const ( ETHTOOL_A_CABLE_RESULT_UNSPEC = 0x0 ETHTOOL_A_CABLE_RESULT_PAIR = 0x1 ETHTOOL_A_CABLE_RESULT_CODE = 0x2 - ETHTOOL_A_CABLE_RESULT_MAX = 0x2 + ETHTOOL_A_CABLE_RESULT_MAX = 0x3 ETHTOOL_A_CABLE_FAULT_LENGTH_UNSPEC = 0x0 ETHTOOL_A_CABLE_FAULT_LENGTH_PAIR = 0x1 ETHTOOL_A_CABLE_FAULT_LENGTH_CM = 0x2 - ETHTOOL_A_CABLE_FAULT_LENGTH_MAX = 0x2 + ETHTOOL_A_CABLE_FAULT_LENGTH_MAX = 0x3 ETHTOOL_A_CABLE_TEST_NTF_STATUS_UNSPEC = 0x0 ETHTOOL_A_CABLE_TEST_NTF_STATUS_STARTED = 0x1 ETHTOOL_A_CABLE_TEST_NTF_STATUS_COMPLETED = 0x2 @@ -4061,6 +4183,19 @@ const ( ETHTOOL_A_TUNNEL_INFO_MAX = 0x2 ) +const ( + TCP_V4_FLOW = 0x1 + UDP_V4_FLOW = 0x2 + TCP_V6_FLOW = 0x5 + UDP_V6_FLOW = 0x6 + ESP_V4_FLOW = 0xa + ESP_V6_FLOW = 0xc + IP_USER_FLOW = 0xd + IPV6_USER_FLOW = 0xe + IPV6_FLOW = 0x11 + ETHER_FLOW = 0x12 +) + const SPEED_UNKNOWN = -0x1 type EthtoolDrvinfo struct { @@ -4078,6 +4213,107 @@ type EthtoolDrvinfo struct { Regdump_len uint32 } +type EthtoolTsInfo struct { + Cmd uint32 + So_timestamping uint32 + Phc_index int32 + Tx_types uint32 + Tx_reserved [3]uint32 + Rx_filters uint32 + Rx_reserved [3]uint32 +} + +type HwTstampConfig struct { + Flags int32 + Tx_type int32 + Rx_filter int32 +} + +const ( + HWTSTAMP_FILTER_NONE = 0x0 + HWTSTAMP_FILTER_ALL = 0x1 + HWTSTAMP_FILTER_SOME = 0x2 + HWTSTAMP_FILTER_PTP_V1_L4_EVENT = 0x3 + HWTSTAMP_FILTER_PTP_V2_L4_EVENT = 0x6 + HWTSTAMP_FILTER_PTP_V2_L2_EVENT = 0x9 + HWTSTAMP_FILTER_PTP_V2_EVENT = 0xc +) + +const ( + HWTSTAMP_TX_OFF = 0x0 + HWTSTAMP_TX_ON = 0x1 + HWTSTAMP_TX_ONESTEP_SYNC = 0x2 +) + +type ( + PtpClockCaps struct { + Max_adj int32 + N_alarm int32 + N_ext_ts int32 + N_per_out int32 + Pps int32 + N_pins int32 + Cross_timestamping int32 + Adjust_phase int32 + Max_phase_adj int32 + Rsv [11]int32 + } + PtpClockTime struct { + Sec int64 + Nsec uint32 + Reserved uint32 + } + PtpExttsEvent struct { + T PtpClockTime + Index uint32 + Flags uint32 + Rsv [2]uint32 + } + PtpExttsRequest struct { + Index uint32 + Flags uint32 + Rsv [2]uint32 + } + PtpPeroutRequest struct { + StartOrPhase PtpClockTime + Period PtpClockTime + Index uint32 + Flags uint32 + On PtpClockTime + } + PtpPinDesc struct { + Name [64]byte + Index uint32 + Func uint32 + Chan uint32 + Rsv [5]uint32 + } + PtpSysOffset struct { + Samples uint32 + Rsv [3]uint32 + Ts [51]PtpClockTime + } + PtpSysOffsetExtended struct { + Samples uint32 + Clockid int32 + Rsv [2]uint32 + Ts [25][3]PtpClockTime + } + PtpSysOffsetPrecise struct { + Device PtpClockTime + Realtime PtpClockTime + Monoraw PtpClockTime + Rsv [4]uint32 + } +) + +const ( + PTP_PF_NONE = 0x0 + PTP_PF_EXTTS = 0x1 + PTP_PF_PEROUT = 0x2 + PTP_PF_PHYSYNC = 0x3 +) + type ( HIDRawReportDescriptor struct { Size uint32 @@ -4259,6 +4495,7 @@ const ( type LandlockRulesetAttr struct { Access_fs uint64 Access_net uint64 + Scoped uint64 } type LandlockPathBeneathAttr struct { @@ -4471,6 +4708,7 @@ const ( NL80211_ATTR_AKM_SUITES = 0x4c NL80211_ATTR_AP_ISOLATE = 0x60 NL80211_ATTR_AP_SETTINGS_FLAGS = 0x135 + NL80211_ATTR_ASSOC_SPP_AMSDU = 0x14a NL80211_ATTR_AUTH_DATA = 0x9c NL80211_ATTR_AUTH_TYPE = 0x35 NL80211_ATTR_BANDS = 0xef @@ -4481,6 +4719,7 @@ const ( NL80211_ATTR_BSS_BASIC_RATES = 0x24 NL80211_ATTR_BSS = 0x2f NL80211_ATTR_BSS_CTS_PROT = 0x1c + NL80211_ATTR_BSS_DUMP_INCLUDE_USE_DATA = 0x147 NL80211_ATTR_BSS_HT_OPMODE = 0x6d NL80211_ATTR_BSSID = 0xf5 NL80211_ATTR_BSS_SELECT = 0xe3 @@ -4540,6 +4779,7 @@ const ( NL80211_ATTR_DTIM_PERIOD = 0xd NL80211_ATTR_DURATION = 0x57 NL80211_ATTR_EHT_CAPABILITY = 0x136 + NL80211_ATTR_EMA_RNR_ELEMS = 0x145 NL80211_ATTR_EML_CAPABILITY = 0x13d NL80211_ATTR_EXT_CAPA = 0xa9 NL80211_ATTR_EXT_CAPA_MASK = 0xaa @@ -4575,6 +4815,7 @@ const ( NL80211_ATTR_HIDDEN_SSID = 0x7e NL80211_ATTR_HT_CAPABILITY = 0x1f NL80211_ATTR_HT_CAPABILITY_MASK = 0x94 + NL80211_ATTR_HW_TIMESTAMP_ENABLED = 0x144 NL80211_ATTR_IE_ASSOC_RESP = 0x80 NL80211_ATTR_IE = 0x2a NL80211_ATTR_IE_PROBE_RESP = 0x7f @@ -4605,9 +4846,10 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x14a + NL80211_ATTR_MAX = 0x151 NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce + NL80211_ATTR_MAX_HW_TIMESTAMP_PEERS = 0x143 NL80211_ATTR_MAX_MATCH_SETS = 0x85 NL80211_ATTR_MAX_NUM_AKM_SUITES = 0x13c NL80211_ATTR_MAX_NUM_PMKIDS = 0x56 @@ -4632,9 +4874,12 @@ const ( NL80211_ATTR_MGMT_SUBTYPE = 0x29 NL80211_ATTR_MLD_ADDR = 0x13a NL80211_ATTR_MLD_CAPA_AND_OPS = 0x13e + NL80211_ATTR_MLO_LINK_DISABLED = 0x146 NL80211_ATTR_MLO_LINK_ID = 0x139 NL80211_ATTR_MLO_LINKS = 0x138 NL80211_ATTR_MLO_SUPPORT = 0x13b + NL80211_ATTR_MLO_TTLM_DLINK = 0x148 + NL80211_ATTR_MLO_TTLM_ULINK = 0x149 NL80211_ATTR_MNTR_FLAGS = 0x17 NL80211_ATTR_MPATH_INFO = 0x1b NL80211_ATTR_MPATH_NEXT_HOP = 0x1a @@ -4667,12 +4912,14 @@ const ( NL80211_ATTR_PORT_AUTHORIZED = 0x103 NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN = 0x5 NL80211_ATTR_POWER_RULE_MAX_EIRP = 0x6 + NL80211_ATTR_POWER_RULE_PSD = 0x8 NL80211_ATTR_PREV_BSSID = 0x4f NL80211_ATTR_PRIVACY = 0x46 NL80211_ATTR_PROBE_RESP = 0x91 NL80211_ATTR_PROBE_RESP_OFFLOAD = 0x90 NL80211_ATTR_PROTOCOL_FEATURES = 0xad NL80211_ATTR_PS_STATE = 0x5d + NL80211_ATTR_PUNCT_BITMAP = 0x142 NL80211_ATTR_QOS_MAP = 0xc7 NL80211_ATTR_RADAR_BACKGROUND = 0x134 NL80211_ATTR_RADAR_EVENT = 0xa8 @@ -4801,7 +5048,9 @@ const ( NL80211_ATTR_WIPHY_FREQ = 0x26 NL80211_ATTR_WIPHY_FREQ_HINT = 0xc9 NL80211_ATTR_WIPHY_FREQ_OFFSET = 0x122 + NL80211_ATTR_WIPHY_INTERFACE_COMBINATIONS = 0x14c NL80211_ATTR_WIPHY_NAME = 0x2 + NL80211_ATTR_WIPHY_RADIOS = 0x14b NL80211_ATTR_WIPHY_RETRY_LONG = 0x3e NL80211_ATTR_WIPHY_RETRY_SHORT = 0x3d NL80211_ATTR_WIPHY_RTS_THRESHOLD = 0x40 @@ -4836,6 +5085,8 @@ const ( NL80211_BAND_ATTR_IFTYPE_DATA = 0x9 NL80211_BAND_ATTR_MAX = 0xd NL80211_BAND_ATTR_RATES = 0x2 + NL80211_BAND_ATTR_S1G_CAPA = 0xd + NL80211_BAND_ATTR_S1G_MCS_NSS_SET = 0xc NL80211_BAND_ATTR_VHT_CAPA = 0x8 NL80211_BAND_ATTR_VHT_MCS_SET = 0x7 NL80211_BAND_IFTYPE_ATTR_EHT_CAP_MAC = 0x8 @@ -4859,6 +5110,10 @@ const ( NL80211_BSS_BEACON_INTERVAL = 0x4 NL80211_BSS_BEACON_TSF = 0xd NL80211_BSS_BSSID = 0x1 + NL80211_BSS_CANNOT_USE_6GHZ_PWR_MISMATCH = 0x2 + NL80211_BSS_CANNOT_USE_NSTR_NONPRIMARY = 0x1 + NL80211_BSS_CANNOT_USE_REASONS = 0x18 + NL80211_BSS_CANNOT_USE_UHB_PWR_MISMATCH = 0x2 NL80211_BSS_CAPABILITY = 0x5 NL80211_BSS_CHAIN_SIGNAL = 0x13 NL80211_BSS_CHAN_WIDTH_10 = 0x1 @@ -4890,6 +5145,9 @@ const ( NL80211_BSS_STATUS = 0x9 NL80211_BSS_STATUS_IBSS_JOINED = 0x2 NL80211_BSS_TSF = 0x3 + NL80211_BSS_USE_FOR = 0x17 + NL80211_BSS_USE_FOR_MLD_LINK = 0x2 + NL80211_BSS_USE_FOR_NORMAL = 0x1 NL80211_CHAN_HT20 = 0x1 NL80211_CHAN_HT40MINUS = 0x2 NL80211_CHAN_HT40PLUS = 0x3 @@ -4975,7 +5233,8 @@ const ( NL80211_CMD_LEAVE_IBSS = 0x2c NL80211_CMD_LEAVE_MESH = 0x45 NL80211_CMD_LEAVE_OCB = 0x6d - NL80211_CMD_MAX = 0x9b + NL80211_CMD_LINKS_REMOVED = 0x9a + NL80211_CMD_MAX = 0x9d NL80211_CMD_MICHAEL_MIC_FAILURE = 0x29 NL80211_CMD_MODIFY_LINK_STA = 0x97 NL80211_CMD_NAN_MATCH = 0x78 @@ -5019,6 +5278,7 @@ const ( NL80211_CMD_SET_COALESCE = 0x65 NL80211_CMD_SET_CQM = 0x3f NL80211_CMD_SET_FILS_AAD = 0x92 + NL80211_CMD_SET_HW_TIMESTAMP = 0x99 NL80211_CMD_SET_INTERFACE = 0x6 NL80211_CMD_SET_KEY = 0xa NL80211_CMD_SET_MAC_ACL = 0x5d @@ -5038,6 +5298,7 @@ const ( NL80211_CMD_SET_SAR_SPECS = 0x8c NL80211_CMD_SET_STATION = 0x12 NL80211_CMD_SET_TID_CONFIG = 0x89 + NL80211_CMD_SET_TID_TO_LINK_MAPPING = 0x9b NL80211_CMD_SET_TX_BITRATE_MASK = 0x39 NL80211_CMD_SET_WDS_PEER = 0x42 NL80211_CMD_SET_WIPHY = 0x2 @@ -5105,6 +5366,7 @@ const ( NL80211_EXT_FEATURE_AIRTIME_FAIRNESS = 0x21 NL80211_EXT_FEATURE_AP_PMKSA_CACHING = 0x22 NL80211_EXT_FEATURE_AQL = 0x28 + NL80211_EXT_FEATURE_AUTH_AND_DEAUTH_RANDOM_TA = 0x40 NL80211_EXT_FEATURE_BEACON_PROTECTION_CLIENT = 0x2e NL80211_EXT_FEATURE_BEACON_PROTECTION = 0x29 NL80211_EXT_FEATURE_BEACON_RATE_HE = 0x36 @@ -5120,6 +5382,7 @@ const ( NL80211_EXT_FEATURE_CQM_RSSI_LIST = 0xd NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT = 0x1b NL80211_EXT_FEATURE_DEL_IBSS_STA = 0x2c + NL80211_EXT_FEATURE_DFS_CONCURRENT = 0x43 NL80211_EXT_FEATURE_DFS_OFFLOAD = 0x19 NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER = 0x20 NL80211_EXT_FEATURE_EXT_KEY_ID = 0x24 @@ -5139,9 +5402,12 @@ const ( NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION = 0x14 NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE = 0x13 NL80211_EXT_FEATURE_OPERATING_CHANNEL_VALIDATION = 0x31 + NL80211_EXT_FEATURE_OWE_OFFLOAD_AP = 0x42 + NL80211_EXT_FEATURE_OWE_OFFLOAD = 0x41 NL80211_EXT_FEATURE_POWERED_ADDR_CHANGE = 0x3d NL80211_EXT_FEATURE_PROTECTED_TWT = 0x2b NL80211_EXT_FEATURE_PROT_RANGE_NEGO_AND_MEASURE = 0x39 + NL80211_EXT_FEATURE_PUNCT = 0x3e NL80211_EXT_FEATURE_RADAR_BACKGROUND = 0x3c NL80211_EXT_FEATURE_RRM = 0x1 NL80211_EXT_FEATURE_SAE_OFFLOAD_AP = 0x33 @@ -5153,8 +5419,10 @@ const ( NL80211_EXT_FEATURE_SCHED_SCAN_BAND_SPECIFIC_RSSI_THOLD = 0x23 NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI = 0xc NL80211_EXT_FEATURE_SECURE_LTF = 0x37 + NL80211_EXT_FEATURE_SECURE_NAN = 0x3f NL80211_EXT_FEATURE_SECURE_RTT = 0x38 NL80211_EXT_FEATURE_SET_SCAN_DWELL = 0x5 + NL80211_EXT_FEATURE_SPP_AMSDU_SUPPORT = 0x44 NL80211_EXT_FEATURE_STA_TX_PWR = 0x25 NL80211_EXT_FEATURE_TXQS = 0x1c NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP = 0x35 @@ -5201,7 +5469,10 @@ const ( NL80211_FREQUENCY_ATTR_2MHZ = 0x16 NL80211_FREQUENCY_ATTR_4MHZ = 0x17 NL80211_FREQUENCY_ATTR_8MHZ = 0x18 + NL80211_FREQUENCY_ATTR_ALLOW_6GHZ_VLP_AP = 0x21 + NL80211_FREQUENCY_ATTR_CAN_MONITOR = 0x20 NL80211_FREQUENCY_ATTR_DFS_CAC_TIME = 0xd + NL80211_FREQUENCY_ATTR_DFS_CONCURRENT = 0x1d NL80211_FREQUENCY_ATTR_DFS_STATE = 0x7 NL80211_FREQUENCY_ATTR_DFS_TIME = 0x8 NL80211_FREQUENCY_ATTR_DISABLED = 0x2 @@ -5209,12 +5480,14 @@ const ( NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf - NL80211_FREQUENCY_ATTR_MAX = 0x20 + NL80211_FREQUENCY_ATTR_MAX = 0x22 NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc NL80211_FREQUENCY_ATTR_NO_20MHZ = 0x10 NL80211_FREQUENCY_ATTR_NO_320MHZ = 0x1a + NL80211_FREQUENCY_ATTR_NO_6GHZ_AFC_CLIENT = 0x1f + NL80211_FREQUENCY_ATTR_NO_6GHZ_VLP_CLIENT = 0x1e NL80211_FREQUENCY_ATTR_NO_80MHZ = 0xb NL80211_FREQUENCY_ATTR_NO_EHT = 0x1b NL80211_FREQUENCY_ATTR_NO_HE = 0x13 @@ -5222,8 +5495,11 @@ const ( NL80211_FREQUENCY_ATTR_NO_HT40_PLUS = 0xa NL80211_FREQUENCY_ATTR_NO_IBSS = 0x3 NL80211_FREQUENCY_ATTR_NO_IR = 0x3 + NL80211_FREQUENCY_ATTR_NO_UHB_AFC_CLIENT = 0x1f + NL80211_FREQUENCY_ATTR_NO_UHB_VLP_CLIENT = 0x1e NL80211_FREQUENCY_ATTR_OFFSET = 0x14 NL80211_FREQUENCY_ATTR_PASSIVE_SCAN = 0x3 + NL80211_FREQUENCY_ATTR_PSD = 0x1c NL80211_FREQUENCY_ATTR_RADAR = 0x5 NL80211_FREQUENCY_ATTR_WMM = 0x12 NL80211_FTM_RESP_ATTR_CIVICLOC = 0x3 @@ -5288,6 +5564,7 @@ const ( NL80211_IFTYPE_STATION = 0x2 NL80211_IFTYPE_UNSPECIFIED = 0x0 NL80211_IFTYPE_WDS = 0x5 + NL80211_KCK_EXT_LEN_32 = 0x20 NL80211_KCK_EXT_LEN = 0x18 NL80211_KCK_LEN = 0x10 NL80211_KEK_EXT_LEN = 0x20 @@ -5316,9 +5593,10 @@ const ( NL80211_MAX_SUPP_HT_RATES = 0x4d NL80211_MAX_SUPP_RATES = 0x20 NL80211_MAX_SUPP_REG_RULES = 0x80 + NL80211_MAX_SUPP_SELECTORS = 0x80 NL80211_MBSSID_CONFIG_ATTR_EMA = 0x5 NL80211_MBSSID_CONFIG_ATTR_INDEX = 0x3 - NL80211_MBSSID_CONFIG_ATTR_MAX = 0x5 + NL80211_MBSSID_CONFIG_ATTR_MAX = 0x6 NL80211_MBSSID_CONFIG_ATTR_MAX_EMA_PROFILE_PERIODICITY = 0x2 NL80211_MBSSID_CONFIG_ATTR_MAX_INTERFACES = 0x1 NL80211_MBSSID_CONFIG_ATTR_TX_IFINDEX = 0x4 @@ -5377,7 +5655,7 @@ const ( NL80211_MNTR_FLAG_CONTROL = 0x3 NL80211_MNTR_FLAG_COOK_FRAMES = 0x5 NL80211_MNTR_FLAG_FCSFAIL = 0x1 - NL80211_MNTR_FLAG_MAX = 0x6 + NL80211_MNTR_FLAG_MAX = 0x7 NL80211_MNTR_FLAG_OTHER_BSS = 0x4 NL80211_MNTR_FLAG_PLCPFAIL = 0x2 NL80211_MPATH_FLAG_ACTIVE = 0x1 @@ -5561,11 +5839,16 @@ const ( NL80211_RADAR_PRE_CAC_EXPIRED = 0x4 NL80211_RATE_INFO_10_MHZ_WIDTH = 0xb NL80211_RATE_INFO_160_MHZ_WIDTH = 0xa + NL80211_RATE_INFO_16_MHZ_WIDTH = 0x1d + NL80211_RATE_INFO_1_MHZ_WIDTH = 0x19 + NL80211_RATE_INFO_2_MHZ_WIDTH = 0x1a NL80211_RATE_INFO_320_MHZ_WIDTH = 0x12 NL80211_RATE_INFO_40_MHZ_WIDTH = 0x3 + NL80211_RATE_INFO_4_MHZ_WIDTH = 0x1b NL80211_RATE_INFO_5_MHZ_WIDTH = 0xc NL80211_RATE_INFO_80_MHZ_WIDTH = 0x8 NL80211_RATE_INFO_80P80_MHZ_WIDTH = 0x9 + NL80211_RATE_INFO_8_MHZ_WIDTH = 0x1c NL80211_RATE_INFO_BITRATE32 = 0x5 NL80211_RATE_INFO_BITRATE = 0x1 NL80211_RATE_INFO_EHT_GI_0_8 = 0x0 @@ -5611,6 +5894,8 @@ const ( NL80211_RATE_INFO_HE_RU_ALLOC = 0x11 NL80211_RATE_INFO_MAX = 0x1d NL80211_RATE_INFO_MCS = 0x2 + NL80211_RATE_INFO_S1G_MCS = 0x17 + NL80211_RATE_INFO_S1G_NSS = 0x18 NL80211_RATE_INFO_SHORT_GI = 0x4 NL80211_RATE_INFO_VHT_MCS = 0x6 NL80211_RATE_INFO_VHT_NSS = 0x7 @@ -5628,14 +5913,19 @@ const ( NL80211_REKEY_DATA_KEK = 0x1 NL80211_REKEY_DATA_REPLAY_CTR = 0x3 NL80211_REPLAY_CTR_LEN = 0x8 + NL80211_RRF_ALLOW_6GHZ_VLP_AP = 0x1000000 NL80211_RRF_AUTO_BW = 0x800 NL80211_RRF_DFS = 0x10 + NL80211_RRF_DFS_CONCURRENT = 0x200000 NL80211_RRF_GO_CONCURRENT = 0x1000 NL80211_RRF_IR_CONCURRENT = 0x1000 NL80211_RRF_NO_160MHZ = 0x10000 NL80211_RRF_NO_320MHZ = 0x40000 + NL80211_RRF_NO_6GHZ_AFC_CLIENT = 0x800000 + NL80211_RRF_NO_6GHZ_VLP_CLIENT = 0x400000 NL80211_RRF_NO_80MHZ = 0x8000 NL80211_RRF_NO_CCK = 0x2 + NL80211_RRF_NO_EHT = 0x80000 NL80211_RRF_NO_HE = 0x20000 NL80211_RRF_NO_HT40 = 0x6000 NL80211_RRF_NO_HT40MINUS = 0x2000 @@ -5646,7 +5936,10 @@ const ( NL80211_RRF_NO_IR = 0x80 NL80211_RRF_NO_OFDM = 0x1 NL80211_RRF_NO_OUTDOOR = 0x8 + NL80211_RRF_NO_UHB_AFC_CLIENT = 0x800000 + NL80211_RRF_NO_UHB_VLP_CLIENT = 0x400000 NL80211_RRF_PASSIVE_SCAN = 0x80 + NL80211_RRF_PSD = 0x100000 NL80211_RRF_PTMP_ONLY = 0x40 NL80211_RRF_PTP_ONLY = 0x20 NL80211_RXMGMT_FLAG_ANSWERED = 0x1 @@ -5707,6 +6000,7 @@ const ( NL80211_STA_FLAG_MAX_OLD_API = 0x6 NL80211_STA_FLAG_MFP = 0x4 NL80211_STA_FLAG_SHORT_PREAMBLE = 0x2 + NL80211_STA_FLAG_SPP_AMSDU = 0x8 NL80211_STA_FLAG_TDLS_PEER = 0x6 NL80211_STA_FLAG_WME = 0x3 NL80211_STA_INFO_ACK_SIGNAL_AVG = 0x23 @@ -5865,6 +6159,13 @@ const ( NL80211_VHT_CAPABILITY_LEN = 0xc NL80211_VHT_NSS_MAX = 0x8 NL80211_WIPHY_NAME_MAXLEN = 0x40 + NL80211_WIPHY_RADIO_ATTR_FREQ_RANGE = 0x2 + NL80211_WIPHY_RADIO_ATTR_INDEX = 0x1 + NL80211_WIPHY_RADIO_ATTR_INTERFACE_COMBINATION = 0x3 + NL80211_WIPHY_RADIO_ATTR_MAX = 0x4 + NL80211_WIPHY_RADIO_FREQ_ATTR_END = 0x2 + NL80211_WIPHY_RADIO_FREQ_ATTR_MAX = 0x2 + NL80211_WIPHY_RADIO_FREQ_ATTR_START = 0x1 NL80211_WMMR_AIFSN = 0x3 NL80211_WMMR_CW_MAX = 0x2 NL80211_WMMR_CW_MIN = 0x1 @@ -5896,6 +6197,7 @@ const ( NL80211_WOWLAN_TRIG_PKT_PATTERN = 0x4 NL80211_WOWLAN_TRIG_RFKILL_RELEASE = 0x9 NL80211_WOWLAN_TRIG_TCP_CONNECTION = 0xe + NL80211_WOWLAN_TRIG_UNPROTECTED_DEAUTH_DISASSOC = 0x14 NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211 = 0xa NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211_LEN = 0xb NL80211_WOWLAN_TRIG_WAKEUP_PKT_8023 = 0xc @@ -6032,3 +6334,32 @@ type SockDiagReq struct { Family uint8 Protocol uint8 } + +const RTM_NEWNVLAN = 0x70 + +const ( + MPOL_BIND = 0x2 + MPOL_DEFAULT = 0x0 + MPOL_F_ADDR = 0x2 + MPOL_F_MEMS_ALLOWED = 0x4 + MPOL_F_MOF = 0x8 + MPOL_F_MORON = 0x10 + MPOL_F_NODE = 0x1 + MPOL_F_NUMA_BALANCING = 0x2000 + MPOL_F_RELATIVE_NODES = 0x4000 + MPOL_F_SHARED = 0x1 + MPOL_F_STATIC_NODES = 0x8000 + MPOL_INTERLEAVE = 0x3 + MPOL_LOCAL = 0x4 + MPOL_MAX = 0x7 + MPOL_MF_INTERNAL = 0x10 + MPOL_MF_LAZY = 0x8 + MPOL_MF_MOVE_ALL = 0x4 + MPOL_MF_MOVE = 0x2 + MPOL_MF_STRICT = 0x1 + MPOL_MF_VALID = 0x7 + MPOL_MODE_FLAGS = 0xe000 + MPOL_PREFERRED = 0x1 + MPOL_PREFERRED_MANY = 0x5 + MPOL_WEIGHTED_INTERLEAVE = 0x6 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index fd402da4..485f2d3a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -282,7 +282,7 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 Blkio_count uint64 @@ -338,6 +338,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index eb7a5e18..ecbd1ad8 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -351,6 +351,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index d78ac108..02f0463a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -91,7 +91,7 @@ type Stat_t struct { Gid uint32 Rdev uint64 _ uint16 - _ [4]byte + _ [6]byte Size int64 Blksize int32 _ [4]byte @@ -273,7 +273,7 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 Blkio_count uint64 @@ -329,6 +329,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index cd06d47f..6f4d400d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -330,6 +330,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go index 2f28fe26..cd532cfa 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go @@ -331,6 +331,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 71d6cac2..41336208 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -278,7 +278,7 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 Blkio_count uint64 @@ -334,6 +334,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index 8596d453..eaa37eb7 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -333,6 +333,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index cd60ea18..98ae6a1e 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -333,6 +333,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index b0ae420c..cae19615 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -278,7 +278,7 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 Blkio_count uint64 @@ -334,6 +334,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go index 83597287..6ce3b4e0 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go @@ -90,7 +90,7 @@ type Stat_t struct { Gid uint32 Rdev uint64 _ uint16 - _ [4]byte + _ [6]byte Size int64 Blksize int32 _ [4]byte @@ -285,7 +285,7 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 Blkio_count uint64 @@ -341,6 +341,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 69eb6a5c..c7429c6a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -340,6 +340,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 5f583cb6..4bf4baf4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -340,6 +340,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 15adc041..e9709d70 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -358,6 +358,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 @@ -727,6 +743,37 @@ const ( RISCV_HWPROBE_EXT_ZBA = 0x8 RISCV_HWPROBE_EXT_ZBB = 0x10 RISCV_HWPROBE_EXT_ZBS = 0x20 + RISCV_HWPROBE_EXT_ZICBOZ = 0x40 + RISCV_HWPROBE_EXT_ZBC = 0x80 + RISCV_HWPROBE_EXT_ZBKB = 0x100 + RISCV_HWPROBE_EXT_ZBKC = 0x200 + RISCV_HWPROBE_EXT_ZBKX = 0x400 + RISCV_HWPROBE_EXT_ZKND = 0x800 + RISCV_HWPROBE_EXT_ZKNE = 0x1000 + RISCV_HWPROBE_EXT_ZKNH = 0x2000 + RISCV_HWPROBE_EXT_ZKSED = 0x4000 + RISCV_HWPROBE_EXT_ZKSH = 0x8000 + RISCV_HWPROBE_EXT_ZKT = 0x10000 + RISCV_HWPROBE_EXT_ZVBB = 0x20000 + RISCV_HWPROBE_EXT_ZVBC = 0x40000 + RISCV_HWPROBE_EXT_ZVKB = 0x80000 + RISCV_HWPROBE_EXT_ZVKG = 0x100000 + RISCV_HWPROBE_EXT_ZVKNED = 0x200000 + RISCV_HWPROBE_EXT_ZVKNHA = 0x400000 + RISCV_HWPROBE_EXT_ZVKNHB = 0x800000 + RISCV_HWPROBE_EXT_ZVKSED = 0x1000000 + RISCV_HWPROBE_EXT_ZVKSH = 0x2000000 + RISCV_HWPROBE_EXT_ZVKT = 0x4000000 + RISCV_HWPROBE_EXT_ZFH = 0x8000000 + RISCV_HWPROBE_EXT_ZFHMIN = 0x10000000 + RISCV_HWPROBE_EXT_ZIHINTNTL = 0x20000000 + RISCV_HWPROBE_EXT_ZVFH = 0x40000000 + RISCV_HWPROBE_EXT_ZVFHMIN = 0x80000000 + RISCV_HWPROBE_EXT_ZFA = 0x100000000 + RISCV_HWPROBE_EXT_ZTSO = 0x200000000 + RISCV_HWPROBE_EXT_ZACAS = 0x400000000 + RISCV_HWPROBE_EXT_ZICOND = 0x800000000 + RISCV_HWPROBE_EXT_ZIHINTPAUSE = 0x1000000000 RISCV_HWPROBE_KEY_CPUPERF_0 = 0x5 RISCV_HWPROBE_MISALIGNED_UNKNOWN = 0x0 RISCV_HWPROBE_MISALIGNED_EMULATED = 0x1 @@ -734,4 +781,6 @@ const ( RISCV_HWPROBE_MISALIGNED_FAST = 0x3 RISCV_HWPROBE_MISALIGNED_UNSUPPORTED = 0x4 RISCV_HWPROBE_MISALIGNED_MASK = 0x7 + RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE = 0x6 + RISCV_HWPROBE_WHICH_CPUS = 0x1 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index cf3ce900..fb44268c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -353,6 +353,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 590b5673..9c38265c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -335,6 +335,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go index 439548ec..50e8e644 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go @@ -104,7 +104,7 @@ type Statvfs_t struct { Fsid uint32 Namemax uint32 Owner uint32 - Spare [4]uint32 + Spare [4]uint64 Fstypename [32]byte Mntonname [1024]byte Mntfromname [1024]byte diff --git a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go index d9a13af4..2e5d5a44 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go @@ -377,6 +377,12 @@ type Flock_t struct { Pid int32 } +type F_cnvrt struct { + Cvtcmd int32 + Pccsid int16 + Fccsid int16 +} + type Termios struct { Cflag uint32 Iflag uint32 diff --git a/vendor/golang.org/x/sys/windows/dll_windows.go b/vendor/golang.org/x/sys/windows/dll_windows.go index 115341fb..3ca814f5 100644 --- a/vendor/golang.org/x/sys/windows/dll_windows.go +++ b/vendor/golang.org/x/sys/windows/dll_windows.go @@ -43,8 +43,8 @@ type DLL struct { // LoadDLL loads DLL file into memory. // // Warning: using LoadDLL without an absolute path name is subject to -// DLL preloading attacks. To safely load a system DLL, use LazyDLL -// with System set to true, or use LoadLibraryEx directly. +// DLL preloading attacks. To safely load a system DLL, use [NewLazySystemDLL], +// or use [LoadLibraryEx] directly. func LoadDLL(name string) (dll *DLL, err error) { namep, err := UTF16PtrFromString(name) if err != nil { @@ -65,7 +65,7 @@ func LoadDLL(name string) (dll *DLL, err error) { return d, nil } -// MustLoadDLL is like LoadDLL but panics if load operation failes. +// MustLoadDLL is like LoadDLL but panics if load operation fails. func MustLoadDLL(name string) *DLL { d, e := LoadDLL(name) if e != nil { @@ -271,6 +271,9 @@ func (d *LazyDLL) NewProc(name string) *LazyProc { } // NewLazyDLL creates new LazyDLL associated with DLL file. +// +// Warning: using NewLazyDLL without an absolute path name is subject to +// DLL preloading attacks. To safely load a system DLL, use [NewLazySystemDLL]. func NewLazyDLL(name string) *LazyDLL { return &LazyDLL{Name: name} } @@ -410,7 +413,3 @@ func loadLibraryEx(name string, system bool) (*DLL, error) { } return &DLL{Name: name, Handle: h}, nil } - -type errString string - -func (s errString) Error() string { return string(s) } diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go index 6f7d2ac7..a8b0364c 100644 --- a/vendor/golang.org/x/sys/windows/security_windows.go +++ b/vendor/golang.org/x/sys/windows/security_windows.go @@ -894,7 +894,7 @@ type ACL struct { aclRevision byte sbz1 byte aclSize uint16 - aceCount uint16 + AceCount uint16 sbz2 uint16 } @@ -1087,6 +1087,27 @@ type EXPLICIT_ACCESS struct { Trustee TRUSTEE } +// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-ace_header +type ACE_HEADER struct { + AceType uint8 + AceFlags uint8 + AceSize uint16 +} + +// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-access_allowed_ace +type ACCESS_ALLOWED_ACE struct { + Header ACE_HEADER + Mask ACCESS_MASK + SidStart uint32 +} + +const ( + // Constants for AceType + // https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-ace_header + ACCESS_ALLOWED_ACE_TYPE = 0 + ACCESS_DENIED_ACE_TYPE = 1 +) + // This type is the union inside of TRUSTEE and must be created using one of the TrusteeValueFrom* functions. type TrusteeValue uintptr @@ -1158,6 +1179,7 @@ type OBJECTS_AND_NAME struct { //sys makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) = advapi32.MakeSelfRelativeSD //sys setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) = advapi32.SetEntriesInAclW +//sys GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (err error) = advapi32.GetAce // Control returns the security descriptor control bits. func (sd *SECURITY_DESCRIPTOR) Control() (control SECURITY_DESCRIPTOR_CONTROL, revision uint32, err error) { @@ -1281,7 +1303,10 @@ func (selfRelativeSD *SECURITY_DESCRIPTOR) ToAbsolute() (absoluteSD *SECURITY_DE return nil, err } if absoluteSDSize > 0 { - absoluteSD = (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&make([]byte, absoluteSDSize)[0])) + absoluteSD = new(SECURITY_DESCRIPTOR) + if unsafe.Sizeof(*absoluteSD) < uintptr(absoluteSDSize) { + panic("sizeof(SECURITY_DESCRIPTOR) too small") + } } var ( dacl *ACL @@ -1290,19 +1315,55 @@ func (selfRelativeSD *SECURITY_DESCRIPTOR) ToAbsolute() (absoluteSD *SECURITY_DE group *SID ) if daclSize > 0 { - dacl = (*ACL)(unsafe.Pointer(&make([]byte, daclSize)[0])) + dacl = (*ACL)(unsafe.Pointer(unsafe.SliceData(make([]byte, daclSize)))) } if saclSize > 0 { - sacl = (*ACL)(unsafe.Pointer(&make([]byte, saclSize)[0])) + sacl = (*ACL)(unsafe.Pointer(unsafe.SliceData(make([]byte, saclSize)))) } if ownerSize > 0 { - owner = (*SID)(unsafe.Pointer(&make([]byte, ownerSize)[0])) + owner = (*SID)(unsafe.Pointer(unsafe.SliceData(make([]byte, ownerSize)))) } if groupSize > 0 { - group = (*SID)(unsafe.Pointer(&make([]byte, groupSize)[0])) + group = (*SID)(unsafe.Pointer(unsafe.SliceData(make([]byte, groupSize)))) } + // We call into Windows via makeAbsoluteSD, which sets up + // pointers within absoluteSD that point to other chunks of memory + // we pass into makeAbsoluteSD, and that happens outside the view of the GC. + // We therefore take some care here to then verify the pointers are as we expect + // and set them explicitly in view of the GC. See https://go.dev/issue/73199. + // TODO: consider weak pointers once Go 1.24 is appropriate. See suggestion in https://go.dev/cl/663575. err = makeAbsoluteSD(selfRelativeSD, absoluteSD, &absoluteSDSize, dacl, &daclSize, sacl, &saclSize, owner, &ownerSize, group, &groupSize) + if err != nil { + // Don't return absoluteSD, which might be partially initialized. + return nil, err + } + // Before using any fields, verify absoluteSD is in the format we expect according to Windows. + // See https://learn.microsoft.com/en-us/windows/win32/secauthz/absolute-and-self-relative-security-descriptors + absControl, _, err := absoluteSD.Control() + if err != nil { + panic("absoluteSD: " + err.Error()) + } + if absControl&SE_SELF_RELATIVE != 0 { + panic("absoluteSD not in absolute format") + } + if absoluteSD.dacl != dacl { + panic("dacl pointer mismatch") + } + if absoluteSD.sacl != sacl { + panic("sacl pointer mismatch") + } + if absoluteSD.owner != owner { + panic("owner pointer mismatch") + } + if absoluteSD.group != group { + panic("group pointer mismatch") + } + absoluteSD.dacl = dacl + absoluteSD.sacl = sacl + absoluteSD.owner = owner + absoluteSD.group = group + return } diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 6525c62f..69439df2 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -17,8 +17,10 @@ import ( "unsafe" ) -type Handle uintptr -type HWND uintptr +type ( + Handle uintptr + HWND uintptr +) const ( InvalidHandle = ^Handle(0) @@ -166,6 +168,8 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *SecurityAttributes) (handle Handle, err error) [failretval==InvalidHandle] = CreateNamedPipeW //sys ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) //sys DisconnectNamedPipe(pipe Handle) (err error) +//sys GetNamedPipeClientProcessId(pipe Handle, clientProcessID *uint32) (err error) +//sys GetNamedPipeServerProcessId(pipe Handle, serverProcessID *uint32) (err error) //sys GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) //sys GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW //sys SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) = SetNamedPipeHandleState @@ -211,6 +215,10 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (handle Handle, err error) //sys ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) [failretval<=32] = shell32.ShellExecuteW //sys GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) = user32.GetWindowThreadProcessId +//sys LoadKeyboardLayout(name *uint16, flags uint32) (hkl Handle, err error) [failretval==0] = user32.LoadKeyboardLayoutW +//sys UnloadKeyboardLayout(hkl Handle) (err error) = user32.UnloadKeyboardLayout +//sys GetKeyboardLayout(tid uint32) (hkl Handle) = user32.GetKeyboardLayout +//sys ToUnicodeEx(vkey uint32, scancode uint32, keystate *byte, pwszBuff *uint16, cchBuff int32, flags uint32, hkl Handle) (ret int32) = user32.ToUnicodeEx //sys GetShellWindow() (shellWindow HWND) = user32.GetShellWindow //sys MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) [failretval==0] = user32.MessageBoxW //sys ExitWindowsEx(flags uint32, reason uint32) (err error) = user32.ExitWindowsEx @@ -307,8 +315,14 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetConsoleMode(console Handle, mode uint32) (err error) = kernel32.SetConsoleMode //sys GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) = kernel32.GetConsoleScreenBufferInfo //sys setConsoleCursorPosition(console Handle, position uint32) (err error) = kernel32.SetConsoleCursorPosition +//sys GetConsoleCP() (cp uint32, err error) = kernel32.GetConsoleCP +//sys GetConsoleOutputCP() (cp uint32, err error) = kernel32.GetConsoleOutputCP +//sys SetConsoleCP(cp uint32) (err error) = kernel32.SetConsoleCP +//sys SetConsoleOutputCP(cp uint32) (err error) = kernel32.SetConsoleOutputCP //sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW //sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW +//sys GetNumberOfConsoleInputEvents(console Handle, numevents *uint32) (err error) = kernel32.GetNumberOfConsoleInputEvents +//sys FlushConsoleInputBuffer(console Handle) (err error) = kernel32.FlushConsoleInputBuffer //sys resizePseudoConsole(pconsole Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole //sys CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.CreateToolhelp32Snapshot //sys Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) = kernel32.Module32FirstW @@ -715,20 +729,12 @@ func DurationSinceBoot() time.Duration { } func Ftruncate(fd Handle, length int64) (err error) { - curoffset, e := Seek(fd, 0, 1) - if e != nil { - return e + type _FILE_END_OF_FILE_INFO struct { + EndOfFile int64 } - defer Seek(fd, curoffset, 0) - _, e = Seek(fd, length, 0) - if e != nil { - return e - } - e = SetEndOfFile(fd) - if e != nil { - return e - } - return nil + var info _FILE_END_OF_FILE_INFO + info.EndOfFile = length + return SetFileInformationByHandle(fd, FileEndOfFileInfo, (*byte)(unsafe.Pointer(&info)), uint32(unsafe.Sizeof(info))) } func Gettimeofday(tv *Timeval) (err error) { @@ -866,6 +872,7 @@ const socket_error = uintptr(^uint32(0)) //sys WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSARecvFrom //sys WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSASendTo //sys WSASocket(af int32, typ int32, protocol int32, protoInfo *WSAProtocolInfo, group uint32, flags uint32) (handle Handle, err error) [failretval==InvalidHandle] = ws2_32.WSASocketW +//sys WSADuplicateSocket(s Handle, processID uint32, info *WSAProtocolInfo) (err error) [failretval!=0] = ws2_32.WSADuplicateSocketW //sys GetHostByName(name string) (h *Hostent, err error) [failretval==nil] = ws2_32.gethostbyname //sys GetServByName(name string, proto string) (s *Servent, err error) [failretval==nil] = ws2_32.getservbyname //sys Ntohs(netshort uint16) (u uint16) = ws2_32.ntohs @@ -884,6 +891,15 @@ const socket_error = uintptr(^uint32(0)) //sys GetACP() (acp uint32) = kernel32.GetACP //sys MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar //sys getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) = iphlpapi.GetBestInterfaceEx +//sys GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) = iphlpapi.GetIfEntry2Ex +//sys GetIpForwardEntry2(row *MibIpForwardRow2) (errcode error) = iphlpapi.GetIpForwardEntry2 +//sys GetIpForwardTable2(family uint16, table **MibIpForwardTable2) (errcode error) = iphlpapi.GetIpForwardTable2 +//sys GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) = iphlpapi.GetUnicastIpAddressEntry +//sys FreeMibTable(memory unsafe.Pointer) = iphlpapi.FreeMibTable +//sys NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyIpInterfaceChange +//sys NotifyRouteChange2(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyRouteChange2 +//sys NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyUnicastIpAddressChange +//sys CancelMibChangeNotify2(notificationHandle Handle) (errcode error) = iphlpapi.CancelMibChangeNotify2 // For testing: clients can set this flag to force // creation of IPv6 sockets to return EAFNOSUPPORT. @@ -904,6 +920,17 @@ type RawSockaddrInet6 struct { Scope_id uint32 } +// RawSockaddrInet is a union that contains an IPv4, an IPv6 address, or an address family. See +// https://learn.microsoft.com/en-us/windows/win32/api/ws2ipdef/ns-ws2ipdef-sockaddr_inet. +// +// A [*RawSockaddrInet] may be converted to a [*RawSockaddrInet4] or [*RawSockaddrInet6] using +// unsafe, depending on the address family. +type RawSockaddrInet struct { + Family uint16 + Port uint16 + Data [6]uint32 +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -1368,9 +1395,11 @@ func SetsockoptLinger(fd Handle, level, opt int, l *Linger) (err error) { func SetsockoptInet4Addr(fd Handle, level, opt int, value [4]byte) (err error) { return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(&value[0])), 4) } + func SetsockoptIPMreq(fd Handle, level, opt int, mreq *IPMreq) (err error) { return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(mreq)), int32(unsafe.Sizeof(*mreq))) } + func SetsockoptIPv6Mreq(fd Handle, level, opt int, mreq *IPv6Mreq) (err error) { return syscall.EWINDOWS } @@ -1673,19 +1702,23 @@ func (s NTStatus) Error() string { // do not use NTUnicodeString, and instead UTF16PtrFromString should be used for // the more common *uint16 string type. func NewNTUnicodeString(s string) (*NTUnicodeString, error) { - var u NTUnicodeString - s16, err := UTF16PtrFromString(s) + s16, err := UTF16FromString(s) if err != nil { return nil, err } - RtlInitUnicodeString(&u, s16) - return &u, nil + n := uint16(len(s16) * 2) + return &NTUnicodeString{ + Length: n - 2, // subtract 2 bytes for the NULL terminator + MaximumLength: n, + Buffer: &s16[0], + }, nil } // Slice returns a uint16 slice that aliases the data in the NTUnicodeString. func (s *NTUnicodeString) Slice() []uint16 { - slice := unsafe.Slice(s.Buffer, s.MaximumLength) - return slice[:s.Length] + // Note: this rounds the length down, if it happens + // to (incorrectly) be odd. Probably safer than rounding up. + return unsafe.Slice(s.Buffer, s.MaximumLength/2)[:s.Length/2] } func (s *NTUnicodeString) String() string { diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index d8cb71db..6e4f50eb 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -65,6 +65,22 @@ var signals = [...]string{ 15: "terminated", } +// File flags for [os.OpenFile]. The O_ prefix is used to indicate +// that these flags are specific to the OpenFile function. +const ( + O_FILE_FLAG_OPEN_NO_RECALL = FILE_FLAG_OPEN_NO_RECALL + O_FILE_FLAG_OPEN_REPARSE_POINT = FILE_FLAG_OPEN_REPARSE_POINT + O_FILE_FLAG_SESSION_AWARE = FILE_FLAG_SESSION_AWARE + O_FILE_FLAG_POSIX_SEMANTICS = FILE_FLAG_POSIX_SEMANTICS + O_FILE_FLAG_BACKUP_SEMANTICS = FILE_FLAG_BACKUP_SEMANTICS + O_FILE_FLAG_DELETE_ON_CLOSE = FILE_FLAG_DELETE_ON_CLOSE + O_FILE_FLAG_SEQUENTIAL_SCAN = FILE_FLAG_SEQUENTIAL_SCAN + O_FILE_FLAG_RANDOM_ACCESS = FILE_FLAG_RANDOM_ACCESS + O_FILE_FLAG_NO_BUFFERING = FILE_FLAG_NO_BUFFERING + O_FILE_FLAG_OVERLAPPED = FILE_FLAG_OVERLAPPED + O_FILE_FLAG_WRITE_THROUGH = FILE_FLAG_WRITE_THROUGH +) + const ( FILE_READ_DATA = 0x00000001 FILE_READ_ATTRIBUTES = 0x00000080 @@ -176,6 +192,7 @@ const ( WAIT_FAILED = 0xFFFFFFFF // Access rights for process. + PROCESS_ALL_ACCESS = 0xFFFF PROCESS_CREATE_PROCESS = 0x0080 PROCESS_CREATE_THREAD = 0x0002 PROCESS_DUP_HANDLE = 0x0040 @@ -1060,6 +1077,7 @@ const ( SIO_GET_EXTENSION_FUNCTION_POINTER = IOC_INOUT | IOC_WS2 | 6 SIO_KEEPALIVE_VALS = IOC_IN | IOC_VENDOR | 4 SIO_UDP_CONNRESET = IOC_IN | IOC_VENDOR | 12 + SIO_UDP_NETRESET = IOC_IN | IOC_VENDOR | 15 // cf. http://support.microsoft.com/default.aspx?scid=kb;en-us;257460 @@ -1072,6 +1090,7 @@ const ( IP_ADD_MEMBERSHIP = 0xc IP_DROP_MEMBERSHIP = 0xd IP_PKTINFO = 0x13 + IP_MTU_DISCOVER = 0x47 IPV6_V6ONLY = 0x1b IPV6_UNICAST_HOPS = 0x4 @@ -1081,6 +1100,7 @@ const ( IPV6_JOIN_GROUP = 0xc IPV6_LEAVE_GROUP = 0xd IPV6_PKTINFO = 0x13 + IPV6_MTU_DISCOVER = 0x47 MSG_OOB = 0x1 MSG_PEEK = 0x2 @@ -1130,6 +1150,15 @@ const ( WSASYS_STATUS_LEN = 128 ) +// enum PMTUD_STATE from ws2ipdef.h +const ( + IP_PMTUDISC_NOT_SET = 0 + IP_PMTUDISC_DO = 1 + IP_PMTUDISC_DONT = 2 + IP_PMTUDISC_PROBE = 3 + IP_PMTUDISC_MAX = 4 +) + type WSABuf struct { Len uint32 Buf *byte @@ -1144,6 +1173,22 @@ type WSAMsg struct { Flags uint32 } +type WSACMSGHDR struct { + Len uintptr + Level int32 + Type int32 +} + +type IN_PKTINFO struct { + Addr [4]byte + Ifindex uint32 +} + +type IN6_PKTINFO struct { + Addr [16]byte + Ifindex uint32 +} + // Flags for WSASocket const ( WSA_FLAG_OVERLAPPED = 0x01 @@ -1947,6 +1992,12 @@ const ( SYMBOLIC_LINK_FLAG_DIRECTORY = 0x1 ) +// FILE_ZERO_DATA_INFORMATION from winioctl.h +type FileZeroDataInformation struct { + FileOffset int64 + BeyondFinalZero int64 +} + const ( ComputerNameNetBIOS = 0 ComputerNameDnsHostname = 1 @@ -2003,7 +2054,21 @@ const ( MOVEFILE_FAIL_IF_NOT_TRACKABLE = 0x20 ) -const GAA_FLAG_INCLUDE_PREFIX = 0x00000010 +// Flags for GetAdaptersAddresses, see +// https://learn.microsoft.com/en-us/windows/win32/api/iphlpapi/nf-iphlpapi-getadaptersaddresses. +const ( + GAA_FLAG_SKIP_UNICAST = 0x1 + GAA_FLAG_SKIP_ANYCAST = 0x2 + GAA_FLAG_SKIP_MULTICAST = 0x4 + GAA_FLAG_SKIP_DNS_SERVER = 0x8 + GAA_FLAG_INCLUDE_PREFIX = 0x10 + GAA_FLAG_SKIP_FRIENDLY_NAME = 0x20 + GAA_FLAG_INCLUDE_WINS_INFO = 0x40 + GAA_FLAG_INCLUDE_GATEWAYS = 0x80 + GAA_FLAG_INCLUDE_ALL_INTERFACES = 0x100 + GAA_FLAG_INCLUDE_ALL_COMPARTMENTS = 0x200 + GAA_FLAG_INCLUDE_TUNNEL_BINDINGORDER = 0x400 +) const ( IF_TYPE_OTHER = 1 @@ -2017,6 +2082,50 @@ const ( IF_TYPE_IEEE1394 = 144 ) +// Enum NL_PREFIX_ORIGIN for [IpAdapterUnicastAddress], see +// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_prefix_origin +const ( + IpPrefixOriginOther = 0 + IpPrefixOriginManual = 1 + IpPrefixOriginWellKnown = 2 + IpPrefixOriginDhcp = 3 + IpPrefixOriginRouterAdvertisement = 4 + IpPrefixOriginUnchanged = 1 << 4 +) + +// Enum NL_SUFFIX_ORIGIN for [IpAdapterUnicastAddress], see +// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_suffix_origin +const ( + NlsoOther = 0 + NlsoManual = 1 + NlsoWellKnown = 2 + NlsoDhcp = 3 + NlsoLinkLayerAddress = 4 + NlsoRandom = 5 + IpSuffixOriginOther = 0 + IpSuffixOriginManual = 1 + IpSuffixOriginWellKnown = 2 + IpSuffixOriginDhcp = 3 + IpSuffixOriginLinkLayerAddress = 4 + IpSuffixOriginRandom = 5 + IpSuffixOriginUnchanged = 1 << 4 +) + +// Enum NL_DAD_STATE for [IpAdapterUnicastAddress], see +// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_dad_state +const ( + NldsInvalid = 0 + NldsTentative = 1 + NldsDuplicate = 2 + NldsDeprecated = 3 + NldsPreferred = 4 + IpDadStateInvalid = 0 + IpDadStateTentative = 1 + IpDadStateDuplicate = 2 + IpDadStateDeprecated = 3 + IpDadStatePreferred = 4 +) + type SocketAddress struct { Sockaddr *syscall.RawSockaddrAny SockaddrLength int32 @@ -2144,6 +2253,208 @@ const ( IfOperStatusLowerLayerDown = 7 ) +const ( + IF_MAX_PHYS_ADDRESS_LENGTH = 32 + IF_MAX_STRING_SIZE = 256 +) + +// MIB_IF_ENTRY_LEVEL enumeration from netioapi.h or +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/nf-netioapi-getifentry2ex. +const ( + MibIfEntryNormal = 0 + MibIfEntryNormalWithoutStatistics = 2 +) + +// MIB_NOTIFICATION_TYPE enumeration from netioapi.h or +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ne-netioapi-mib_notification_type. +const ( + MibParameterNotification = 0 + MibAddInstance = 1 + MibDeleteInstance = 2 + MibInitialNotification = 3 +) + +// MibIfRow2 stores information about a particular interface. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_if_row2. +type MibIfRow2 struct { + InterfaceLuid uint64 + InterfaceIndex uint32 + InterfaceGuid GUID + Alias [IF_MAX_STRING_SIZE + 1]uint16 + Description [IF_MAX_STRING_SIZE + 1]uint16 + PhysicalAddressLength uint32 + PhysicalAddress [IF_MAX_PHYS_ADDRESS_LENGTH]uint8 + PermanentPhysicalAddress [IF_MAX_PHYS_ADDRESS_LENGTH]uint8 + Mtu uint32 + Type uint32 + TunnelType uint32 + MediaType uint32 + PhysicalMediumType uint32 + AccessType uint32 + DirectionType uint32 + InterfaceAndOperStatusFlags uint8 + OperStatus uint32 + AdminStatus uint32 + MediaConnectState uint32 + NetworkGuid GUID + ConnectionType uint32 + TransmitLinkSpeed uint64 + ReceiveLinkSpeed uint64 + InOctets uint64 + InUcastPkts uint64 + InNUcastPkts uint64 + InDiscards uint64 + InErrors uint64 + InUnknownProtos uint64 + InUcastOctets uint64 + InMulticastOctets uint64 + InBroadcastOctets uint64 + OutOctets uint64 + OutUcastPkts uint64 + OutNUcastPkts uint64 + OutDiscards uint64 + OutErrors uint64 + OutUcastOctets uint64 + OutMulticastOctets uint64 + OutBroadcastOctets uint64 + OutQLen uint64 +} + +// IP_ADDRESS_PREFIX stores an IP address prefix. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-ip_address_prefix. +type IpAddressPrefix struct { + Prefix RawSockaddrInet + PrefixLength uint8 +} + +// NL_ROUTE_ORIGIN enumeration from nldef.h or +// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_route_origin. +const ( + NlroManual = 0 + NlroWellKnown = 1 + NlroDHCP = 2 + NlroRouterAdvertisement = 3 + Nlro6to4 = 4 +) + +// NL_ROUTE_ORIGIN enumeration from nldef.h or +// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_route_protocol. +const ( + MIB_IPPROTO_OTHER = 1 + MIB_IPPROTO_LOCAL = 2 + MIB_IPPROTO_NETMGMT = 3 + MIB_IPPROTO_ICMP = 4 + MIB_IPPROTO_EGP = 5 + MIB_IPPROTO_GGP = 6 + MIB_IPPROTO_HELLO = 7 + MIB_IPPROTO_RIP = 8 + MIB_IPPROTO_IS_IS = 9 + MIB_IPPROTO_ES_IS = 10 + MIB_IPPROTO_CISCO = 11 + MIB_IPPROTO_BBN = 12 + MIB_IPPROTO_OSPF = 13 + MIB_IPPROTO_BGP = 14 + MIB_IPPROTO_IDPR = 15 + MIB_IPPROTO_EIGRP = 16 + MIB_IPPROTO_DVMRP = 17 + MIB_IPPROTO_RPL = 18 + MIB_IPPROTO_DHCP = 19 + MIB_IPPROTO_NT_AUTOSTATIC = 10002 + MIB_IPPROTO_NT_STATIC = 10006 + MIB_IPPROTO_NT_STATIC_NON_DOD = 10007 +) + +// MIB_IPFORWARD_ROW2 stores information about an IP route entry. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_ipforward_row2. +type MibIpForwardRow2 struct { + InterfaceLuid uint64 + InterfaceIndex uint32 + DestinationPrefix IpAddressPrefix + NextHop RawSockaddrInet + SitePrefixLength uint8 + ValidLifetime uint32 + PreferredLifetime uint32 + Metric uint32 + Protocol uint32 + Loopback uint8 + AutoconfigureAddress uint8 + Publish uint8 + Immortal uint8 + Age uint32 + Origin uint32 +} + +// MIB_IPFORWARD_TABLE2 contains a table of IP route entries. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_ipforward_table2. +type MibIpForwardTable2 struct { + NumEntries uint32 + Table [1]MibIpForwardRow2 +} + +// Rows returns the IP route entries in the table. +func (t *MibIpForwardTable2) Rows() []MibIpForwardRow2 { + return unsafe.Slice(&t.Table[0], t.NumEntries) +} + +// MIB_UNICASTIPADDRESS_ROW stores information about a unicast IP address. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_unicastipaddress_row. +type MibUnicastIpAddressRow struct { + Address RawSockaddrInet6 // SOCKADDR_INET union + InterfaceLuid uint64 + InterfaceIndex uint32 + PrefixOrigin uint32 + SuffixOrigin uint32 + ValidLifetime uint32 + PreferredLifetime uint32 + OnLinkPrefixLength uint8 + SkipAsSource uint8 + DadState uint32 + ScopeId uint32 + CreationTimeStamp Filetime +} + +const ScopeLevelCount = 16 + +// MIB_IPINTERFACE_ROW stores interface management information for a particular IP address family on a network interface. +// See https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_ipinterface_row. +type MibIpInterfaceRow struct { + Family uint16 + InterfaceLuid uint64 + InterfaceIndex uint32 + MaxReassemblySize uint32 + InterfaceIdentifier uint64 + MinRouterAdvertisementInterval uint32 + MaxRouterAdvertisementInterval uint32 + AdvertisingEnabled uint8 + ForwardingEnabled uint8 + WeakHostSend uint8 + WeakHostReceive uint8 + UseAutomaticMetric uint8 + UseNeighborUnreachabilityDetection uint8 + ManagedAddressConfigurationSupported uint8 + OtherStatefulConfigurationSupported uint8 + AdvertiseDefaultRoute uint8 + RouterDiscoveryBehavior uint32 + DadTransmits uint32 + BaseReachableTime uint32 + RetransmitTime uint32 + PathMtuDiscoveryTimeout uint32 + LinkLocalAddressBehavior uint32 + LinkLocalAddressTimeout uint32 + ZoneIndices [ScopeLevelCount]uint32 + SitePrefixLength uint32 + Metric uint32 + NlMtu uint32 + Connected uint8 + SupportsWakeUpPatterns uint8 + SupportsNeighborDiscovery uint8 + SupportsRouterDiscovery uint8 + ReachableTime uint32 + TransmitOffload uint32 + ReceiveOffload uint32 + DisableDefaultRoutes uint8 +} + // Console related constants used for the mode parameter to SetConsoleMode. See // https://docs.microsoft.com/en-us/windows/console/setconsolemode for details. @@ -2487,6 +2798,8 @@ type CommTimeouts struct { // NTUnicodeString is a UTF-16 string for NT native APIs, corresponding to UNICODE_STRING. type NTUnicodeString struct { + // Note: Length and MaximumLength are in *bytes*, not uint16s. + // They should always be even. Length uint16 MaximumLength uint16 Buffer *uint16 @@ -3404,3 +3717,224 @@ type DCB struct { EvtChar byte wReserved1 uint16 } + +// Keyboard Layout Flags. +// See https://learn.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-loadkeyboardlayoutw +const ( + KLF_ACTIVATE = 0x00000001 + KLF_SUBSTITUTE_OK = 0x00000002 + KLF_REORDER = 0x00000008 + KLF_REPLACELANG = 0x00000010 + KLF_NOTELLSHELL = 0x00000080 + KLF_SETFORPROCESS = 0x00000100 +) + +// Virtual Key codes +// https://docs.microsoft.com/en-us/windows/win32/inputdev/virtual-key-codes +const ( + VK_LBUTTON = 0x01 + VK_RBUTTON = 0x02 + VK_CANCEL = 0x03 + VK_MBUTTON = 0x04 + VK_XBUTTON1 = 0x05 + VK_XBUTTON2 = 0x06 + VK_BACK = 0x08 + VK_TAB = 0x09 + VK_CLEAR = 0x0C + VK_RETURN = 0x0D + VK_SHIFT = 0x10 + VK_CONTROL = 0x11 + VK_MENU = 0x12 + VK_PAUSE = 0x13 + VK_CAPITAL = 0x14 + VK_KANA = 0x15 + VK_HANGEUL = 0x15 + VK_HANGUL = 0x15 + VK_IME_ON = 0x16 + VK_JUNJA = 0x17 + VK_FINAL = 0x18 + VK_HANJA = 0x19 + VK_KANJI = 0x19 + VK_IME_OFF = 0x1A + VK_ESCAPE = 0x1B + VK_CONVERT = 0x1C + VK_NONCONVERT = 0x1D + VK_ACCEPT = 0x1E + VK_MODECHANGE = 0x1F + VK_SPACE = 0x20 + VK_PRIOR = 0x21 + VK_NEXT = 0x22 + VK_END = 0x23 + VK_HOME = 0x24 + VK_LEFT = 0x25 + VK_UP = 0x26 + VK_RIGHT = 0x27 + VK_DOWN = 0x28 + VK_SELECT = 0x29 + VK_PRINT = 0x2A + VK_EXECUTE = 0x2B + VK_SNAPSHOT = 0x2C + VK_INSERT = 0x2D + VK_DELETE = 0x2E + VK_HELP = 0x2F + VK_LWIN = 0x5B + VK_RWIN = 0x5C + VK_APPS = 0x5D + VK_SLEEP = 0x5F + VK_NUMPAD0 = 0x60 + VK_NUMPAD1 = 0x61 + VK_NUMPAD2 = 0x62 + VK_NUMPAD3 = 0x63 + VK_NUMPAD4 = 0x64 + VK_NUMPAD5 = 0x65 + VK_NUMPAD6 = 0x66 + VK_NUMPAD7 = 0x67 + VK_NUMPAD8 = 0x68 + VK_NUMPAD9 = 0x69 + VK_MULTIPLY = 0x6A + VK_ADD = 0x6B + VK_SEPARATOR = 0x6C + VK_SUBTRACT = 0x6D + VK_DECIMAL = 0x6E + VK_DIVIDE = 0x6F + VK_F1 = 0x70 + VK_F2 = 0x71 + VK_F3 = 0x72 + VK_F4 = 0x73 + VK_F5 = 0x74 + VK_F6 = 0x75 + VK_F7 = 0x76 + VK_F8 = 0x77 + VK_F9 = 0x78 + VK_F10 = 0x79 + VK_F11 = 0x7A + VK_F12 = 0x7B + VK_F13 = 0x7C + VK_F14 = 0x7D + VK_F15 = 0x7E + VK_F16 = 0x7F + VK_F17 = 0x80 + VK_F18 = 0x81 + VK_F19 = 0x82 + VK_F20 = 0x83 + VK_F21 = 0x84 + VK_F22 = 0x85 + VK_F23 = 0x86 + VK_F24 = 0x87 + VK_NUMLOCK = 0x90 + VK_SCROLL = 0x91 + VK_OEM_NEC_EQUAL = 0x92 + VK_OEM_FJ_JISHO = 0x92 + VK_OEM_FJ_MASSHOU = 0x93 + VK_OEM_FJ_TOUROKU = 0x94 + VK_OEM_FJ_LOYA = 0x95 + VK_OEM_FJ_ROYA = 0x96 + VK_LSHIFT = 0xA0 + VK_RSHIFT = 0xA1 + VK_LCONTROL = 0xA2 + VK_RCONTROL = 0xA3 + VK_LMENU = 0xA4 + VK_RMENU = 0xA5 + VK_BROWSER_BACK = 0xA6 + VK_BROWSER_FORWARD = 0xA7 + VK_BROWSER_REFRESH = 0xA8 + VK_BROWSER_STOP = 0xA9 + VK_BROWSER_SEARCH = 0xAA + VK_BROWSER_FAVORITES = 0xAB + VK_BROWSER_HOME = 0xAC + VK_VOLUME_MUTE = 0xAD + VK_VOLUME_DOWN = 0xAE + VK_VOLUME_UP = 0xAF + VK_MEDIA_NEXT_TRACK = 0xB0 + VK_MEDIA_PREV_TRACK = 0xB1 + VK_MEDIA_STOP = 0xB2 + VK_MEDIA_PLAY_PAUSE = 0xB3 + VK_LAUNCH_MAIL = 0xB4 + VK_LAUNCH_MEDIA_SELECT = 0xB5 + VK_LAUNCH_APP1 = 0xB6 + VK_LAUNCH_APP2 = 0xB7 + VK_OEM_1 = 0xBA + VK_OEM_PLUS = 0xBB + VK_OEM_COMMA = 0xBC + VK_OEM_MINUS = 0xBD + VK_OEM_PERIOD = 0xBE + VK_OEM_2 = 0xBF + VK_OEM_3 = 0xC0 + VK_OEM_4 = 0xDB + VK_OEM_5 = 0xDC + VK_OEM_6 = 0xDD + VK_OEM_7 = 0xDE + VK_OEM_8 = 0xDF + VK_OEM_AX = 0xE1 + VK_OEM_102 = 0xE2 + VK_ICO_HELP = 0xE3 + VK_ICO_00 = 0xE4 + VK_PROCESSKEY = 0xE5 + VK_ICO_CLEAR = 0xE6 + VK_OEM_RESET = 0xE9 + VK_OEM_JUMP = 0xEA + VK_OEM_PA1 = 0xEB + VK_OEM_PA2 = 0xEC + VK_OEM_PA3 = 0xED + VK_OEM_WSCTRL = 0xEE + VK_OEM_CUSEL = 0xEF + VK_OEM_ATTN = 0xF0 + VK_OEM_FINISH = 0xF1 + VK_OEM_COPY = 0xF2 + VK_OEM_AUTO = 0xF3 + VK_OEM_ENLW = 0xF4 + VK_OEM_BACKTAB = 0xF5 + VK_ATTN = 0xF6 + VK_CRSEL = 0xF7 + VK_EXSEL = 0xF8 + VK_EREOF = 0xF9 + VK_PLAY = 0xFA + VK_ZOOM = 0xFB + VK_NONAME = 0xFC + VK_PA1 = 0xFD + VK_OEM_CLEAR = 0xFE +) + +// Mouse button constants. +// https://docs.microsoft.com/en-us/windows/console/mouse-event-record-str +const ( + FROM_LEFT_1ST_BUTTON_PRESSED = 0x0001 + RIGHTMOST_BUTTON_PRESSED = 0x0002 + FROM_LEFT_2ND_BUTTON_PRESSED = 0x0004 + FROM_LEFT_3RD_BUTTON_PRESSED = 0x0008 + FROM_LEFT_4TH_BUTTON_PRESSED = 0x0010 +) + +// Control key state constaints. +// https://docs.microsoft.com/en-us/windows/console/key-event-record-str +// https://docs.microsoft.com/en-us/windows/console/mouse-event-record-str +const ( + CAPSLOCK_ON = 0x0080 + ENHANCED_KEY = 0x0100 + LEFT_ALT_PRESSED = 0x0002 + LEFT_CTRL_PRESSED = 0x0008 + NUMLOCK_ON = 0x0020 + RIGHT_ALT_PRESSED = 0x0001 + RIGHT_CTRL_PRESSED = 0x0004 + SCROLLLOCK_ON = 0x0040 + SHIFT_PRESSED = 0x0010 +) + +// Mouse event record event flags. +// https://docs.microsoft.com/en-us/windows/console/mouse-event-record-str +const ( + MOUSE_MOVED = 0x0001 + DOUBLE_CLICK = 0x0002 + MOUSE_WHEELED = 0x0004 + MOUSE_HWHEELED = 0x0008 +) + +// Input Record Event Types +// https://learn.microsoft.com/en-us/windows/console/input-record-str +const ( + FOCUS_EVENT = 0x0010 + KEY_EVENT = 0x0001 + MENU_EVENT = 0x0008 + MOUSE_EVENT = 0x0002 + WINDOW_BUFFER_SIZE_EVENT = 0x0004 +) diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 9f73df75..f25b7308 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -91,6 +91,7 @@ var ( procEnumServicesStatusExW = modadvapi32.NewProc("EnumServicesStatusExW") procEqualSid = modadvapi32.NewProc("EqualSid") procFreeSid = modadvapi32.NewProc("FreeSid") + procGetAce = modadvapi32.NewProc("GetAce") procGetLengthSid = modadvapi32.NewProc("GetLengthSid") procGetNamedSecurityInfoW = modadvapi32.NewProc("GetNamedSecurityInfoW") procGetSecurityDescriptorControl = modadvapi32.NewProc("GetSecurityDescriptorControl") @@ -180,10 +181,19 @@ var ( procDnsRecordListFree = moddnsapi.NewProc("DnsRecordListFree") procDwmGetWindowAttribute = moddwmapi.NewProc("DwmGetWindowAttribute") procDwmSetWindowAttribute = moddwmapi.NewProc("DwmSetWindowAttribute") + procCancelMibChangeNotify2 = modiphlpapi.NewProc("CancelMibChangeNotify2") + procFreeMibTable = modiphlpapi.NewProc("FreeMibTable") procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") procGetBestInterfaceEx = modiphlpapi.NewProc("GetBestInterfaceEx") procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") + procGetIfEntry2Ex = modiphlpapi.NewProc("GetIfEntry2Ex") + procGetIpForwardEntry2 = modiphlpapi.NewProc("GetIpForwardEntry2") + procGetIpForwardTable2 = modiphlpapi.NewProc("GetIpForwardTable2") + procGetUnicastIpAddressEntry = modiphlpapi.NewProc("GetUnicastIpAddressEntry") + procNotifyIpInterfaceChange = modiphlpapi.NewProc("NotifyIpInterfaceChange") + procNotifyRouteChange2 = modiphlpapi.NewProc("NotifyRouteChange2") + procNotifyUnicastIpAddressChange = modiphlpapi.NewProc("NotifyUnicastIpAddressChange") procAddDllDirectory = modkernel32.NewProc("AddDllDirectory") procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") procCancelIo = modkernel32.NewProc("CancelIo") @@ -232,6 +242,7 @@ var ( procFindResourceW = modkernel32.NewProc("FindResourceW") procFindVolumeClose = modkernel32.NewProc("FindVolumeClose") procFindVolumeMountPointClose = modkernel32.NewProc("FindVolumeMountPointClose") + procFlushConsoleInputBuffer = modkernel32.NewProc("FlushConsoleInputBuffer") procFlushFileBuffers = modkernel32.NewProc("FlushFileBuffers") procFlushViewOfFile = modkernel32.NewProc("FlushViewOfFile") procFormatMessageW = modkernel32.NewProc("FormatMessageW") @@ -246,7 +257,9 @@ var ( procGetCommandLineW = modkernel32.NewProc("GetCommandLineW") procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") procGetComputerNameW = modkernel32.NewProc("GetComputerNameW") + procGetConsoleCP = modkernel32.NewProc("GetConsoleCP") procGetConsoleMode = modkernel32.NewProc("GetConsoleMode") + procGetConsoleOutputCP = modkernel32.NewProc("GetConsoleOutputCP") procGetConsoleScreenBufferInfo = modkernel32.NewProc("GetConsoleScreenBufferInfo") procGetCurrentDirectoryW = modkernel32.NewProc("GetCurrentDirectoryW") procGetCurrentProcessId = modkernel32.NewProc("GetCurrentProcessId") @@ -272,8 +285,11 @@ var ( procGetMaximumProcessorCount = modkernel32.NewProc("GetMaximumProcessorCount") procGetModuleFileNameW = modkernel32.NewProc("GetModuleFileNameW") procGetModuleHandleExW = modkernel32.NewProc("GetModuleHandleExW") + procGetNamedPipeClientProcessId = modkernel32.NewProc("GetNamedPipeClientProcessId") procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") + procGetNamedPipeServerProcessId = modkernel32.NewProc("GetNamedPipeServerProcessId") + procGetNumberOfConsoleInputEvents = modkernel32.NewProc("GetNumberOfConsoleInputEvents") procGetOverlappedResult = modkernel32.NewProc("GetOverlappedResult") procGetPriorityClass = modkernel32.NewProc("GetPriorityClass") procGetProcAddress = modkernel32.NewProc("GetProcAddress") @@ -346,8 +362,10 @@ var ( procSetCommMask = modkernel32.NewProc("SetCommMask") procSetCommState = modkernel32.NewProc("SetCommState") procSetCommTimeouts = modkernel32.NewProc("SetCommTimeouts") + procSetConsoleCP = modkernel32.NewProc("SetConsoleCP") procSetConsoleCursorPosition = modkernel32.NewProc("SetConsoleCursorPosition") procSetConsoleMode = modkernel32.NewProc("SetConsoleMode") + procSetConsoleOutputCP = modkernel32.NewProc("SetConsoleOutputCP") procSetCurrentDirectoryW = modkernel32.NewProc("SetCurrentDirectoryW") procSetDefaultDllDirectories = modkernel32.NewProc("SetDefaultDllDirectories") procSetDllDirectoryW = modkernel32.NewProc("SetDllDirectoryW") @@ -477,12 +495,16 @@ var ( procGetDesktopWindow = moduser32.NewProc("GetDesktopWindow") procGetForegroundWindow = moduser32.NewProc("GetForegroundWindow") procGetGUIThreadInfo = moduser32.NewProc("GetGUIThreadInfo") + procGetKeyboardLayout = moduser32.NewProc("GetKeyboardLayout") procGetShellWindow = moduser32.NewProc("GetShellWindow") procGetWindowThreadProcessId = moduser32.NewProc("GetWindowThreadProcessId") procIsWindow = moduser32.NewProc("IsWindow") procIsWindowUnicode = moduser32.NewProc("IsWindowUnicode") procIsWindowVisible = moduser32.NewProc("IsWindowVisible") + procLoadKeyboardLayoutW = moduser32.NewProc("LoadKeyboardLayoutW") procMessageBoxW = moduser32.NewProc("MessageBoxW") + procToUnicodeEx = moduser32.NewProc("ToUnicodeEx") + procUnloadKeyboardLayout = moduser32.NewProc("UnloadKeyboardLayout") procCreateEnvironmentBlock = moduserenv.NewProc("CreateEnvironmentBlock") procDestroyEnvironmentBlock = moduserenv.NewProc("DestroyEnvironmentBlock") procGetUserProfileDirectoryW = moduserenv.NewProc("GetUserProfileDirectoryW") @@ -495,6 +517,7 @@ var ( procFreeAddrInfoW = modws2_32.NewProc("FreeAddrInfoW") procGetAddrInfoW = modws2_32.NewProc("GetAddrInfoW") procWSACleanup = modws2_32.NewProc("WSACleanup") + procWSADuplicateSocketW = modws2_32.NewProc("WSADuplicateSocketW") procWSAEnumProtocolsW = modws2_32.NewProc("WSAEnumProtocolsW") procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") procWSAIoctl = modws2_32.NewProc("WSAIoctl") @@ -529,25 +552,25 @@ var ( ) func cm_Get_DevNode_Status(status *uint32, problemNumber *uint32, devInst DEVINST, flags uint32) (ret CONFIGRET) { - r0, _, _ := syscall.Syscall6(procCM_Get_DevNode_Status.Addr(), 4, uintptr(unsafe.Pointer(status)), uintptr(unsafe.Pointer(problemNumber)), uintptr(devInst), uintptr(flags), 0, 0) + r0, _, _ := syscall.SyscallN(procCM_Get_DevNode_Status.Addr(), uintptr(unsafe.Pointer(status)), uintptr(unsafe.Pointer(problemNumber)), uintptr(devInst), uintptr(flags)) ret = CONFIGRET(r0) return } func cm_Get_Device_Interface_List(interfaceClass *GUID, deviceID *uint16, buffer *uint16, bufferLen uint32, flags uint32) (ret CONFIGRET) { - r0, _, _ := syscall.Syscall6(procCM_Get_Device_Interface_ListW.Addr(), 5, uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(flags), 0) + r0, _, _ := syscall.SyscallN(procCM_Get_Device_Interface_ListW.Addr(), uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(flags)) ret = CONFIGRET(r0) return } func cm_Get_Device_Interface_List_Size(len *uint32, interfaceClass *GUID, deviceID *uint16, flags uint32) (ret CONFIGRET) { - r0, _, _ := syscall.Syscall6(procCM_Get_Device_Interface_List_SizeW.Addr(), 4, uintptr(unsafe.Pointer(len)), uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(flags), 0, 0) + r0, _, _ := syscall.SyscallN(procCM_Get_Device_Interface_List_SizeW.Addr(), uintptr(unsafe.Pointer(len)), uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(flags)) ret = CONFIGRET(r0) return } func cm_MapCrToWin32Err(configRet CONFIGRET, defaultWin32Error Errno) (ret Errno) { - r0, _, _ := syscall.Syscall(procCM_MapCrToWin32Err.Addr(), 2, uintptr(configRet), uintptr(defaultWin32Error), 0) + r0, _, _ := syscall.SyscallN(procCM_MapCrToWin32Err.Addr(), uintptr(configRet), uintptr(defaultWin32Error)) ret = Errno(r0) return } @@ -557,7 +580,7 @@ func AdjustTokenGroups(token Token, resetToDefault bool, newstate *Tokengroups, if resetToDefault { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procAdjustTokenGroups.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) + r1, _, e1 := syscall.SyscallN(procAdjustTokenGroups.Addr(), uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) if r1 == 0 { err = errnoErr(e1) } @@ -569,7 +592,7 @@ func AdjustTokenPrivileges(token Token, disableAllPrivileges bool, newstate *Tok if disableAllPrivileges { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) + r1, _, e1 := syscall.SyscallN(procAdjustTokenPrivileges.Addr(), uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) if r1 == 0 { err = errnoErr(e1) } @@ -577,7 +600,7 @@ func AdjustTokenPrivileges(token Token, disableAllPrivileges bool, newstate *Tok } func AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, subAuth0 uint32, subAuth1 uint32, subAuth2 uint32, subAuth3 uint32, subAuth4 uint32, subAuth5 uint32, subAuth6 uint32, subAuth7 uint32, sid **SID) (err error) { - r1, _, e1 := syscall.Syscall12(procAllocateAndInitializeSid.Addr(), 11, uintptr(unsafe.Pointer(identAuth)), uintptr(subAuth), uintptr(subAuth0), uintptr(subAuth1), uintptr(subAuth2), uintptr(subAuth3), uintptr(subAuth4), uintptr(subAuth5), uintptr(subAuth6), uintptr(subAuth7), uintptr(unsafe.Pointer(sid)), 0) + r1, _, e1 := syscall.SyscallN(procAllocateAndInitializeSid.Addr(), uintptr(unsafe.Pointer(identAuth)), uintptr(subAuth), uintptr(subAuth0), uintptr(subAuth1), uintptr(subAuth2), uintptr(subAuth3), uintptr(subAuth4), uintptr(subAuth5), uintptr(subAuth6), uintptr(subAuth7), uintptr(unsafe.Pointer(sid))) if r1 == 0 { err = errnoErr(e1) } @@ -585,7 +608,7 @@ func AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, s } func buildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, countAccessEntries uint32, accessEntries *EXPLICIT_ACCESS, countAuditEntries uint32, auditEntries *EXPLICIT_ACCESS, oldSecurityDescriptor *SECURITY_DESCRIPTOR, sizeNewSecurityDescriptor *uint32, newSecurityDescriptor **SECURITY_DESCRIPTOR) (ret error) { - r0, _, _ := syscall.Syscall9(procBuildSecurityDescriptorW.Addr(), 9, uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(countAccessEntries), uintptr(unsafe.Pointer(accessEntries)), uintptr(countAuditEntries), uintptr(unsafe.Pointer(auditEntries)), uintptr(unsafe.Pointer(oldSecurityDescriptor)), uintptr(unsafe.Pointer(sizeNewSecurityDescriptor)), uintptr(unsafe.Pointer(newSecurityDescriptor))) + r0, _, _ := syscall.SyscallN(procBuildSecurityDescriptorW.Addr(), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(countAccessEntries), uintptr(unsafe.Pointer(accessEntries)), uintptr(countAuditEntries), uintptr(unsafe.Pointer(auditEntries)), uintptr(unsafe.Pointer(oldSecurityDescriptor)), uintptr(unsafe.Pointer(sizeNewSecurityDescriptor)), uintptr(unsafe.Pointer(newSecurityDescriptor))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -593,7 +616,7 @@ func buildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, countAccessEntries } func ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err error) { - r1, _, e1 := syscall.Syscall(procChangeServiceConfig2W.Addr(), 3, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(info))) + r1, _, e1 := syscall.SyscallN(procChangeServiceConfig2W.Addr(), uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(info))) if r1 == 0 { err = errnoErr(e1) } @@ -601,7 +624,7 @@ func ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err err } func ChangeServiceConfig(service Handle, serviceType uint32, startType uint32, errorControl uint32, binaryPathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16, displayName *uint16) (err error) { - r1, _, e1 := syscall.Syscall12(procChangeServiceConfigW.Addr(), 11, uintptr(service), uintptr(serviceType), uintptr(startType), uintptr(errorControl), uintptr(unsafe.Pointer(binaryPathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), uintptr(unsafe.Pointer(displayName)), 0) + r1, _, e1 := syscall.SyscallN(procChangeServiceConfigW.Addr(), uintptr(service), uintptr(serviceType), uintptr(startType), uintptr(errorControl), uintptr(unsafe.Pointer(binaryPathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), uintptr(unsafe.Pointer(displayName))) if r1 == 0 { err = errnoErr(e1) } @@ -609,7 +632,7 @@ func ChangeServiceConfig(service Handle, serviceType uint32, startType uint32, e } func checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) { - r1, _, e1 := syscall.Syscall(procCheckTokenMembership.Addr(), 3, uintptr(tokenHandle), uintptr(unsafe.Pointer(sidToCheck)), uintptr(unsafe.Pointer(isMember))) + r1, _, e1 := syscall.SyscallN(procCheckTokenMembership.Addr(), uintptr(tokenHandle), uintptr(unsafe.Pointer(sidToCheck)), uintptr(unsafe.Pointer(isMember))) if r1 == 0 { err = errnoErr(e1) } @@ -617,7 +640,7 @@ func checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) ( } func CloseServiceHandle(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procCloseServiceHandle.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procCloseServiceHandle.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -625,7 +648,7 @@ func CloseServiceHandle(handle Handle) (err error) { } func ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err error) { - r1, _, e1 := syscall.Syscall(procControlService.Addr(), 3, uintptr(service), uintptr(control), uintptr(unsafe.Pointer(status))) + r1, _, e1 := syscall.SyscallN(procControlService.Addr(), uintptr(service), uintptr(control), uintptr(unsafe.Pointer(status))) if r1 == 0 { err = errnoErr(e1) } @@ -633,7 +656,7 @@ func ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err } func convertSecurityDescriptorToStringSecurityDescriptor(sd *SECURITY_DESCRIPTOR, revision uint32, securityInformation SECURITY_INFORMATION, str **uint16, strLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(securityInformation), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(strLen)), 0) + r1, _, e1 := syscall.SyscallN(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(securityInformation), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(strLen))) if r1 == 0 { err = errnoErr(e1) } @@ -641,7 +664,7 @@ func convertSecurityDescriptorToStringSecurityDescriptor(sd *SECURITY_DESCRIPTOR } func ConvertSidToStringSid(sid *SID, stringSid **uint16) (err error) { - r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(stringSid)), 0) + r1, _, e1 := syscall.SyscallN(procConvertSidToStringSidW.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(stringSid))) if r1 == 0 { err = errnoErr(e1) } @@ -658,7 +681,7 @@ func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision ui } func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd **SECURITY_DESCRIPTOR, size *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) + r1, _, e1 := syscall.SyscallN(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size))) if r1 == 0 { err = errnoErr(e1) } @@ -666,7 +689,7 @@ func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision } func ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) { - r1, _, e1 := syscall.Syscall(procConvertStringSidToSidW.Addr(), 2, uintptr(unsafe.Pointer(stringSid)), uintptr(unsafe.Pointer(sid)), 0) + r1, _, e1 := syscall.SyscallN(procConvertStringSidToSidW.Addr(), uintptr(unsafe.Pointer(stringSid)), uintptr(unsafe.Pointer(sid))) if r1 == 0 { err = errnoErr(e1) } @@ -674,7 +697,7 @@ func ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) { } func CopySid(destSidLen uint32, destSid *SID, srcSid *SID) (err error) { - r1, _, e1 := syscall.Syscall(procCopySid.Addr(), 3, uintptr(destSidLen), uintptr(unsafe.Pointer(destSid)), uintptr(unsafe.Pointer(srcSid))) + r1, _, e1 := syscall.SyscallN(procCopySid.Addr(), uintptr(destSidLen), uintptr(unsafe.Pointer(destSid)), uintptr(unsafe.Pointer(srcSid))) if r1 == 0 { err = errnoErr(e1) } @@ -686,7 +709,7 @@ func CreateProcessAsUser(token Token, appName *uint16, commandLine *uint16, proc if inheritHandles { _p0 = 1 } - r1, _, e1 := syscall.Syscall12(procCreateProcessAsUserW.Addr(), 11, uintptr(token), uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)), 0) + r1, _, e1 := syscall.SyscallN(procCreateProcessAsUserW.Addr(), uintptr(token), uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo))) if r1 == 0 { err = errnoErr(e1) } @@ -694,7 +717,7 @@ func CreateProcessAsUser(token Token, appName *uint16, commandLine *uint16, proc } func CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access uint32, srvType uint32, startType uint32, errCtl uint32, pathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall15(procCreateServiceW.Addr(), 13, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(unsafe.Pointer(displayName)), uintptr(access), uintptr(srvType), uintptr(startType), uintptr(errCtl), uintptr(unsafe.Pointer(pathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateServiceW.Addr(), uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(unsafe.Pointer(displayName)), uintptr(access), uintptr(srvType), uintptr(startType), uintptr(errCtl), uintptr(unsafe.Pointer(pathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -703,7 +726,7 @@ func CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access } func createWellKnownSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID, sid *SID, sizeSid *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCreateWellKnownSid.Addr(), 4, uintptr(sidType), uintptr(unsafe.Pointer(domainSid)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sizeSid)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCreateWellKnownSid.Addr(), uintptr(sidType), uintptr(unsafe.Pointer(domainSid)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sizeSid))) if r1 == 0 { err = errnoErr(e1) } @@ -711,7 +734,7 @@ func createWellKnownSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID, sid *SID, s } func CryptAcquireContext(provhandle *Handle, container *uint16, provider *uint16, provtype uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCryptAcquireContextW.Addr(), 5, uintptr(unsafe.Pointer(provhandle)), uintptr(unsafe.Pointer(container)), uintptr(unsafe.Pointer(provider)), uintptr(provtype), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procCryptAcquireContextW.Addr(), uintptr(unsafe.Pointer(provhandle)), uintptr(unsafe.Pointer(container)), uintptr(unsafe.Pointer(provider)), uintptr(provtype), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -719,7 +742,7 @@ func CryptAcquireContext(provhandle *Handle, container *uint16, provider *uint16 } func CryptGenRandom(provhandle Handle, buflen uint32, buf *byte) (err error) { - r1, _, e1 := syscall.Syscall(procCryptGenRandom.Addr(), 3, uintptr(provhandle), uintptr(buflen), uintptr(unsafe.Pointer(buf))) + r1, _, e1 := syscall.SyscallN(procCryptGenRandom.Addr(), uintptr(provhandle), uintptr(buflen), uintptr(unsafe.Pointer(buf))) if r1 == 0 { err = errnoErr(e1) } @@ -727,7 +750,7 @@ func CryptGenRandom(provhandle Handle, buflen uint32, buf *byte) (err error) { } func CryptReleaseContext(provhandle Handle, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCryptReleaseContext.Addr(), 2, uintptr(provhandle), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procCryptReleaseContext.Addr(), uintptr(provhandle), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -735,7 +758,7 @@ func CryptReleaseContext(provhandle Handle, flags uint32) (err error) { } func DeleteService(service Handle) (err error) { - r1, _, e1 := syscall.Syscall(procDeleteService.Addr(), 1, uintptr(service), 0, 0) + r1, _, e1 := syscall.SyscallN(procDeleteService.Addr(), uintptr(service)) if r1 == 0 { err = errnoErr(e1) } @@ -743,7 +766,7 @@ func DeleteService(service Handle) (err error) { } func DeregisterEventSource(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procDeregisterEventSource.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procDeregisterEventSource.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -751,7 +774,7 @@ func DeregisterEventSource(handle Handle) (err error) { } func DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes *SecurityAttributes, impersonationLevel uint32, tokenType uint32, newToken *Token) (err error) { - r1, _, e1 := syscall.Syscall6(procDuplicateTokenEx.Addr(), 6, uintptr(existingToken), uintptr(desiredAccess), uintptr(unsafe.Pointer(tokenAttributes)), uintptr(impersonationLevel), uintptr(tokenType), uintptr(unsafe.Pointer(newToken))) + r1, _, e1 := syscall.SyscallN(procDuplicateTokenEx.Addr(), uintptr(existingToken), uintptr(desiredAccess), uintptr(unsafe.Pointer(tokenAttributes)), uintptr(impersonationLevel), uintptr(tokenType), uintptr(unsafe.Pointer(newToken))) if r1 == 0 { err = errnoErr(e1) } @@ -759,7 +782,7 @@ func DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes } func EnumDependentServices(service Handle, activityState uint32, services *ENUM_SERVICE_STATUS, buffSize uint32, bytesNeeded *uint32, servicesReturned *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procEnumDependentServicesW.Addr(), 6, uintptr(service), uintptr(activityState), uintptr(unsafe.Pointer(services)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned))) + r1, _, e1 := syscall.SyscallN(procEnumDependentServicesW.Addr(), uintptr(service), uintptr(activityState), uintptr(unsafe.Pointer(services)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned))) if r1 == 0 { err = errnoErr(e1) } @@ -767,7 +790,7 @@ func EnumDependentServices(service Handle, activityState uint32, services *ENUM_ } func EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serviceState uint32, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uint32, groupName *uint16) (err error) { - r1, _, e1 := syscall.Syscall12(procEnumServicesStatusExW.Addr(), 10, uintptr(mgr), uintptr(infoLevel), uintptr(serviceType), uintptr(serviceState), uintptr(unsafe.Pointer(services)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)), uintptr(unsafe.Pointer(resumeHandle)), uintptr(unsafe.Pointer(groupName)), 0, 0) + r1, _, e1 := syscall.SyscallN(procEnumServicesStatusExW.Addr(), uintptr(mgr), uintptr(infoLevel), uintptr(serviceType), uintptr(serviceState), uintptr(unsafe.Pointer(services)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)), uintptr(unsafe.Pointer(resumeHandle)), uintptr(unsafe.Pointer(groupName))) if r1 == 0 { err = errnoErr(e1) } @@ -775,21 +798,29 @@ func EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serv } func EqualSid(sid1 *SID, sid2 *SID) (isEqual bool) { - r0, _, _ := syscall.Syscall(procEqualSid.Addr(), 2, uintptr(unsafe.Pointer(sid1)), uintptr(unsafe.Pointer(sid2)), 0) + r0, _, _ := syscall.SyscallN(procEqualSid.Addr(), uintptr(unsafe.Pointer(sid1)), uintptr(unsafe.Pointer(sid2))) isEqual = r0 != 0 return } func FreeSid(sid *SID) (err error) { - r1, _, e1 := syscall.Syscall(procFreeSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + r1, _, e1 := syscall.SyscallN(procFreeSid.Addr(), uintptr(unsafe.Pointer(sid))) if r1 != 0 { err = errnoErr(e1) } return } +func GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (err error) { + r1, _, e1 := syscall.SyscallN(procGetAce.Addr(), uintptr(unsafe.Pointer(acl)), uintptr(aceIndex), uintptr(unsafe.Pointer(pAce))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetLengthSid(sid *SID) (len uint32) { - r0, _, _ := syscall.Syscall(procGetLengthSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetLengthSid.Addr(), uintptr(unsafe.Pointer(sid))) len = uint32(r0) return } @@ -804,7 +835,7 @@ func getNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, security } func _getNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { - r0, _, _ := syscall.Syscall9(procGetNamedSecurityInfoW.Addr(), 8, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0) + r0, _, _ := syscall.SyscallN(procGetNamedSecurityInfoW.Addr(), uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -812,7 +843,7 @@ func _getNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securi } func getSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, control *SECURITY_DESCRIPTOR_CONTROL, revision *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(control)), uintptr(unsafe.Pointer(revision))) + r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(control)), uintptr(unsafe.Pointer(revision))) if r1 == 0 { err = errnoErr(e1) } @@ -828,7 +859,7 @@ func getSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent *bool, dacl if *daclDefaulted { _p1 = 1 } - r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorDacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(&_p1))) *daclPresent = _p0 != 0 *daclDefaulted = _p1 != 0 if r1 == 0 { @@ -842,7 +873,7 @@ func getSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group **SID, groupDefau if *groupDefaulted { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(&_p0))) + r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorGroup.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(&_p0))) *groupDefaulted = _p0 != 0 if r1 == 0 { err = errnoErr(e1) @@ -851,7 +882,7 @@ func getSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group **SID, groupDefau } func getSecurityDescriptorLength(sd *SECURITY_DESCRIPTOR) (len uint32) { - r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetSecurityDescriptorLength.Addr(), uintptr(unsafe.Pointer(sd))) len = uint32(r0) return } @@ -861,7 +892,7 @@ func getSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner **SID, ownerDefau if *ownerDefaulted { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(&_p0))) + r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorOwner.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(&_p0))) *ownerDefaulted = _p0 != 0 if r1 == 0 { err = errnoErr(e1) @@ -870,7 +901,7 @@ func getSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner **SID, ownerDefau } func getSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) (ret error) { - r0, _, _ := syscall.Syscall(procGetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0) + r0, _, _ := syscall.SyscallN(procGetSecurityDescriptorRMControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -886,7 +917,7 @@ func getSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent *bool, sacl if *saclDefaulted { _p1 = 1 } - r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorSacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(&_p1))) *saclPresent = _p0 != 0 *saclDefaulted = _p1 != 0 if r1 == 0 { @@ -896,7 +927,7 @@ func getSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent *bool, sacl } func getSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { - r0, _, _ := syscall.Syscall9(procGetSecurityInfo.Addr(), 8, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0) + r0, _, _ := syscall.SyscallN(procGetSecurityInfo.Addr(), uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -904,25 +935,25 @@ func getSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformati } func getSidIdentifierAuthority(sid *SID) (authority *SidIdentifierAuthority) { - r0, _, _ := syscall.Syscall(procGetSidIdentifierAuthority.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetSidIdentifierAuthority.Addr(), uintptr(unsafe.Pointer(sid))) authority = (*SidIdentifierAuthority)(unsafe.Pointer(r0)) return } func getSidSubAuthority(sid *SID, index uint32) (subAuthority *uint32) { - r0, _, _ := syscall.Syscall(procGetSidSubAuthority.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(index), 0) + r0, _, _ := syscall.SyscallN(procGetSidSubAuthority.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(index)) subAuthority = (*uint32)(unsafe.Pointer(r0)) return } func getSidSubAuthorityCount(sid *SID) (count *uint8) { - r0, _, _ := syscall.Syscall(procGetSidSubAuthorityCount.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetSidSubAuthorityCount.Addr(), uintptr(unsafe.Pointer(sid))) count = (*uint8)(unsafe.Pointer(r0)) return } func GetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetTokenInformation.Addr(), 5, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), uintptr(unsafe.Pointer(returnedLen)), 0) + r1, _, e1 := syscall.SyscallN(procGetTokenInformation.Addr(), uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), uintptr(unsafe.Pointer(returnedLen))) if r1 == 0 { err = errnoErr(e1) } @@ -930,7 +961,7 @@ func GetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint } func ImpersonateSelf(impersonationlevel uint32) (err error) { - r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(impersonationlevel), 0, 0) + r1, _, e1 := syscall.SyscallN(procImpersonateSelf.Addr(), uintptr(impersonationlevel)) if r1 == 0 { err = errnoErr(e1) } @@ -938,7 +969,7 @@ func ImpersonateSelf(impersonationlevel uint32) (err error) { } func initializeSecurityDescriptor(absoluteSD *SECURITY_DESCRIPTOR, revision uint32) (err error) { - r1, _, e1 := syscall.Syscall(procInitializeSecurityDescriptor.Addr(), 2, uintptr(unsafe.Pointer(absoluteSD)), uintptr(revision), 0) + r1, _, e1 := syscall.SyscallN(procInitializeSecurityDescriptor.Addr(), uintptr(unsafe.Pointer(absoluteSD)), uintptr(revision)) if r1 == 0 { err = errnoErr(e1) } @@ -954,7 +985,7 @@ func InitiateSystemShutdownEx(machineName *uint16, message *uint16, timeout uint if rebootAfterShutdown { _p1 = 1 } - r1, _, e1 := syscall.Syscall6(procInitiateSystemShutdownExW.Addr(), 6, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(message)), uintptr(timeout), uintptr(_p0), uintptr(_p1), uintptr(reason)) + r1, _, e1 := syscall.SyscallN(procInitiateSystemShutdownExW.Addr(), uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(message)), uintptr(timeout), uintptr(_p0), uintptr(_p1), uintptr(reason)) if r1 == 0 { err = errnoErr(e1) } @@ -962,7 +993,7 @@ func InitiateSystemShutdownEx(machineName *uint16, message *uint16, timeout uint } func isTokenRestricted(tokenHandle Token) (ret bool, err error) { - r0, _, e1 := syscall.Syscall(procIsTokenRestricted.Addr(), 1, uintptr(tokenHandle), 0, 0) + r0, _, e1 := syscall.SyscallN(procIsTokenRestricted.Addr(), uintptr(tokenHandle)) ret = r0 != 0 if !ret { err = errnoErr(e1) @@ -971,25 +1002,25 @@ func isTokenRestricted(tokenHandle Token) (ret bool, err error) { } func isValidSecurityDescriptor(sd *SECURITY_DESCRIPTOR) (isValid bool) { - r0, _, _ := syscall.Syscall(procIsValidSecurityDescriptor.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0) + r0, _, _ := syscall.SyscallN(procIsValidSecurityDescriptor.Addr(), uintptr(unsafe.Pointer(sd))) isValid = r0 != 0 return } func isValidSid(sid *SID) (isValid bool) { - r0, _, _ := syscall.Syscall(procIsValidSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + r0, _, _ := syscall.SyscallN(procIsValidSid.Addr(), uintptr(unsafe.Pointer(sid))) isValid = r0 != 0 return } func isWellKnownSid(sid *SID, sidType WELL_KNOWN_SID_TYPE) (isWellKnown bool) { - r0, _, _ := syscall.Syscall(procIsWellKnownSid.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(sidType), 0) + r0, _, _ := syscall.SyscallN(procIsWellKnownSid.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(sidType)) isWellKnown = r0 != 0 return } func LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0) + r1, _, e1 := syscall.SyscallN(procLookupAccountNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use))) if r1 == 0 { err = errnoErr(e1) } @@ -997,7 +1028,7 @@ func LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen } func LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procLookupAccountSidW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0) + r1, _, e1 := syscall.SyscallN(procLookupAccountSidW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use))) if r1 == 0 { err = errnoErr(e1) } @@ -1005,7 +1036,7 @@ func LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint3 } func LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err error) { - r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemname)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) + r1, _, e1 := syscall.SyscallN(procLookupPrivilegeValueW.Addr(), uintptr(unsafe.Pointer(systemname)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) if r1 == 0 { err = errnoErr(e1) } @@ -1013,7 +1044,7 @@ func LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err err } func makeAbsoluteSD(selfRelativeSD *SECURITY_DESCRIPTOR, absoluteSD *SECURITY_DESCRIPTOR, absoluteSDSize *uint32, dacl *ACL, daclSize *uint32, sacl *ACL, saclSize *uint32, owner *SID, ownerSize *uint32, group *SID, groupSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall12(procMakeAbsoluteSD.Addr(), 11, uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(absoluteSDSize)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclSize)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(saclSize)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(ownerSize)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(groupSize)), 0) + r1, _, e1 := syscall.SyscallN(procMakeAbsoluteSD.Addr(), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(absoluteSDSize)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclSize)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(saclSize)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(ownerSize)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(groupSize))) if r1 == 0 { err = errnoErr(e1) } @@ -1021,7 +1052,7 @@ func makeAbsoluteSD(selfRelativeSD *SECURITY_DESCRIPTOR, absoluteSD *SECURITY_DE } func makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procMakeSelfRelativeSD.Addr(), 3, uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(selfRelativeSDSize))) + r1, _, e1 := syscall.SyscallN(procMakeSelfRelativeSD.Addr(), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(selfRelativeSDSize))) if r1 == 0 { err = errnoErr(e1) } @@ -1029,7 +1060,7 @@ func makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURIT } func NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERVICE_NOTIFY) (ret error) { - r0, _, _ := syscall.Syscall(procNotifyServiceStatusChangeW.Addr(), 3, uintptr(service), uintptr(notifyMask), uintptr(unsafe.Pointer(notifier))) + r0, _, _ := syscall.SyscallN(procNotifyServiceStatusChangeW.Addr(), uintptr(service), uintptr(notifyMask), uintptr(unsafe.Pointer(notifier))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -1037,7 +1068,7 @@ func NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERV } func OpenProcessToken(process Handle, access uint32, token *Token) (err error) { - r1, _, e1 := syscall.Syscall(procOpenProcessToken.Addr(), 3, uintptr(process), uintptr(access), uintptr(unsafe.Pointer(token))) + r1, _, e1 := syscall.SyscallN(procOpenProcessToken.Addr(), uintptr(process), uintptr(access), uintptr(unsafe.Pointer(token))) if r1 == 0 { err = errnoErr(e1) } @@ -1045,7 +1076,7 @@ func OpenProcessToken(process Handle, access uint32, token *Token) (err error) { } func OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procOpenSCManagerW.Addr(), 3, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(databaseName)), uintptr(access)) + r0, _, e1 := syscall.SyscallN(procOpenSCManagerW.Addr(), uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(databaseName)), uintptr(access)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1054,7 +1085,7 @@ func OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (ha } func OpenService(mgr Handle, serviceName *uint16, access uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procOpenServiceW.Addr(), 3, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(access)) + r0, _, e1 := syscall.SyscallN(procOpenServiceW.Addr(), uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(access)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1067,7 +1098,7 @@ func OpenThreadToken(thread Handle, access uint32, openAsSelf bool, token *Token if openAsSelf { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(access), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) + r1, _, e1 := syscall.SyscallN(procOpenThreadToken.Addr(), uintptr(thread), uintptr(access), uintptr(_p0), uintptr(unsafe.Pointer(token))) if r1 == 0 { err = errnoErr(e1) } @@ -1075,7 +1106,7 @@ func OpenThreadToken(thread Handle, access uint32, openAsSelf bool, token *Token } func QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceConfig2W.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0) + r1, _, e1 := syscall.SyscallN(procQueryServiceConfig2W.Addr(), uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded))) if r1 == 0 { err = errnoErr(e1) } @@ -1083,7 +1114,7 @@ func QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize } func QueryServiceConfig(service Handle, serviceConfig *QUERY_SERVICE_CONFIG, bufSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceConfigW.Addr(), 4, uintptr(service), uintptr(unsafe.Pointer(serviceConfig)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0) + r1, _, e1 := syscall.SyscallN(procQueryServiceConfigW.Addr(), uintptr(service), uintptr(unsafe.Pointer(serviceConfig)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded))) if r1 == 0 { err = errnoErr(e1) } @@ -1095,7 +1126,7 @@ func QueryServiceDynamicInformation(service Handle, infoLevel uint32, dynamicInf if err != nil { return } - r1, _, e1 := syscall.Syscall(procQueryServiceDynamicInformation.Addr(), 3, uintptr(service), uintptr(infoLevel), uintptr(dynamicInfo)) + r1, _, e1 := syscall.SyscallN(procQueryServiceDynamicInformation.Addr(), uintptr(service), uintptr(infoLevel), uintptr(dynamicInfo)) if r1 == 0 { err = errnoErr(e1) } @@ -1103,7 +1134,7 @@ func QueryServiceDynamicInformation(service Handle, infoLevel uint32, dynamicInf } func QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, bufSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceLockStatusW.Addr(), 4, uintptr(mgr), uintptr(unsafe.Pointer(lockStatus)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0) + r1, _, e1 := syscall.SyscallN(procQueryServiceLockStatusW.Addr(), uintptr(mgr), uintptr(unsafe.Pointer(lockStatus)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded))) if r1 == 0 { err = errnoErr(e1) } @@ -1111,7 +1142,7 @@ func QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, b } func QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) { - r1, _, e1 := syscall.Syscall(procQueryServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(status)), 0) + r1, _, e1 := syscall.SyscallN(procQueryServiceStatus.Addr(), uintptr(service), uintptr(unsafe.Pointer(status))) if r1 == 0 { err = errnoErr(e1) } @@ -1119,7 +1150,7 @@ func QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) { } func QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceStatusEx.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0) + r1, _, e1 := syscall.SyscallN(procQueryServiceStatusEx.Addr(), uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded))) if r1 == 0 { err = errnoErr(e1) } @@ -1127,7 +1158,7 @@ func QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize } func RegCloseKey(key Handle) (regerrno error) { - r0, _, _ := syscall.Syscall(procRegCloseKey.Addr(), 1, uintptr(key), 0, 0) + r0, _, _ := syscall.SyscallN(procRegCloseKey.Addr(), uintptr(key)) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1135,7 +1166,7 @@ func RegCloseKey(key Handle) (regerrno error) { } func RegEnumKeyEx(key Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, class *uint16, classLen *uint32, lastWriteTime *Filetime) (regerrno error) { - r0, _, _ := syscall.Syscall9(procRegEnumKeyExW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(lastWriteTime)), 0) + r0, _, _ := syscall.SyscallN(procRegEnumKeyExW.Addr(), uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(lastWriteTime))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1151,7 +1182,7 @@ func RegNotifyChangeKeyValue(key Handle, watchSubtree bool, notifyFilter uint32, if asynchronous { _p1 = 1 } - r0, _, _ := syscall.Syscall6(procRegNotifyChangeKeyValue.Addr(), 5, uintptr(key), uintptr(_p0), uintptr(notifyFilter), uintptr(event), uintptr(_p1), 0) + r0, _, _ := syscall.SyscallN(procRegNotifyChangeKeyValue.Addr(), uintptr(key), uintptr(_p0), uintptr(notifyFilter), uintptr(event), uintptr(_p1)) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1159,7 +1190,7 @@ func RegNotifyChangeKeyValue(key Handle, watchSubtree bool, notifyFilter uint32, } func RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint32, result *Handle) (regerrno error) { - r0, _, _ := syscall.Syscall6(procRegOpenKeyExW.Addr(), 5, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(options), uintptr(desiredAccess), uintptr(unsafe.Pointer(result)), 0) + r0, _, _ := syscall.SyscallN(procRegOpenKeyExW.Addr(), uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(options), uintptr(desiredAccess), uintptr(unsafe.Pointer(result))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1167,7 +1198,7 @@ func RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint } func RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint32, subkeysLen *uint32, maxSubkeyLen *uint32, maxClassLen *uint32, valuesLen *uint32, maxValueNameLen *uint32, maxValueLen *uint32, saLen *uint32, lastWriteTime *Filetime) (regerrno error) { - r0, _, _ := syscall.Syscall12(procRegQueryInfoKeyW.Addr(), 12, uintptr(key), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(subkeysLen)), uintptr(unsafe.Pointer(maxSubkeyLen)), uintptr(unsafe.Pointer(maxClassLen)), uintptr(unsafe.Pointer(valuesLen)), uintptr(unsafe.Pointer(maxValueNameLen)), uintptr(unsafe.Pointer(maxValueLen)), uintptr(unsafe.Pointer(saLen)), uintptr(unsafe.Pointer(lastWriteTime))) + r0, _, _ := syscall.SyscallN(procRegQueryInfoKeyW.Addr(), uintptr(key), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(subkeysLen)), uintptr(unsafe.Pointer(maxSubkeyLen)), uintptr(unsafe.Pointer(maxClassLen)), uintptr(unsafe.Pointer(valuesLen)), uintptr(unsafe.Pointer(maxValueNameLen)), uintptr(unsafe.Pointer(maxValueLen)), uintptr(unsafe.Pointer(saLen)), uintptr(unsafe.Pointer(lastWriteTime))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1175,7 +1206,7 @@ func RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint } func RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) { - r0, _, _ := syscall.Syscall6(procRegQueryValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen))) + r0, _, _ := syscall.SyscallN(procRegQueryValueExW.Addr(), uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1183,7 +1214,7 @@ func RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32 } func RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procRegisterEventSourceW.Addr(), 2, uintptr(unsafe.Pointer(uncServerName)), uintptr(unsafe.Pointer(sourceName)), 0) + r0, _, e1 := syscall.SyscallN(procRegisterEventSourceW.Addr(), uintptr(unsafe.Pointer(uncServerName)), uintptr(unsafe.Pointer(sourceName))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1192,7 +1223,7 @@ func RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Hand } func RegisterServiceCtrlHandlerEx(serviceName *uint16, handlerProc uintptr, context uintptr) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procRegisterServiceCtrlHandlerExW.Addr(), 3, uintptr(unsafe.Pointer(serviceName)), uintptr(handlerProc), uintptr(context)) + r0, _, e1 := syscall.SyscallN(procRegisterServiceCtrlHandlerExW.Addr(), uintptr(unsafe.Pointer(serviceName)), uintptr(handlerProc), uintptr(context)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1201,7 +1232,7 @@ func RegisterServiceCtrlHandlerEx(serviceName *uint16, handlerProc uintptr, cont } func ReportEvent(log Handle, etype uint16, category uint16, eventId uint32, usrSId uintptr, numStrings uint16, dataSize uint32, strings **uint16, rawData *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procReportEventW.Addr(), 9, uintptr(log), uintptr(etype), uintptr(category), uintptr(eventId), uintptr(usrSId), uintptr(numStrings), uintptr(dataSize), uintptr(unsafe.Pointer(strings)), uintptr(unsafe.Pointer(rawData))) + r1, _, e1 := syscall.SyscallN(procReportEventW.Addr(), uintptr(log), uintptr(etype), uintptr(category), uintptr(eventId), uintptr(usrSId), uintptr(numStrings), uintptr(dataSize), uintptr(unsafe.Pointer(strings)), uintptr(unsafe.Pointer(rawData))) if r1 == 0 { err = errnoErr(e1) } @@ -1209,7 +1240,7 @@ func ReportEvent(log Handle, etype uint16, category uint16, eventId uint32, usrS } func RevertToSelf() (err error) { - r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) + r1, _, e1 := syscall.SyscallN(procRevertToSelf.Addr()) if r1 == 0 { err = errnoErr(e1) } @@ -1217,7 +1248,7 @@ func RevertToSelf() (err error) { } func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) { - r0, _, _ := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(countExplicitEntries), uintptr(unsafe.Pointer(explicitEntries)), uintptr(unsafe.Pointer(oldACL)), uintptr(unsafe.Pointer(newACL)), 0, 0) + r0, _, _ := syscall.SyscallN(procSetEntriesInAclW.Addr(), uintptr(countExplicitEntries), uintptr(unsafe.Pointer(explicitEntries)), uintptr(unsafe.Pointer(oldACL)), uintptr(unsafe.Pointer(newACL))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -1225,7 +1256,7 @@ func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCE } func SetKernelObjectSecurity(handle Handle, securityInformation SECURITY_INFORMATION, securityDescriptor *SECURITY_DESCRIPTOR) (err error) { - r1, _, e1 := syscall.Syscall(procSetKernelObjectSecurity.Addr(), 3, uintptr(handle), uintptr(securityInformation), uintptr(unsafe.Pointer(securityDescriptor))) + r1, _, e1 := syscall.SyscallN(procSetKernelObjectSecurity.Addr(), uintptr(handle), uintptr(securityInformation), uintptr(unsafe.Pointer(securityDescriptor))) if r1 == 0 { err = errnoErr(e1) } @@ -1242,7 +1273,7 @@ func SetNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, security } func _SetNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) { - r0, _, _ := syscall.Syscall9(procSetNamedSecurityInfoW.Addr(), 7, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) + r0, _, _ := syscall.SyscallN(procSetNamedSecurityInfoW.Addr(), uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -1250,7 +1281,7 @@ func _SetNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securi } func setSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, controlBitsOfInterest SECURITY_DESCRIPTOR_CONTROL, controlBitsToSet SECURITY_DESCRIPTOR_CONTROL) (err error) { - r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(controlBitsOfInterest), uintptr(controlBitsToSet)) + r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(controlBitsOfInterest), uintptr(controlBitsToSet)) if r1 == 0 { err = errnoErr(e1) } @@ -1266,7 +1297,7 @@ func setSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent bool, dacl * if daclDefaulted { _p1 = 1 } - r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(dacl)), uintptr(_p1), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorDacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(dacl)), uintptr(_p1)) if r1 == 0 { err = errnoErr(e1) } @@ -1278,7 +1309,7 @@ func setSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group *SID, groupDefaul if groupDefaulted { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(_p0)) + r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorGroup.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(_p0)) if r1 == 0 { err = errnoErr(e1) } @@ -1290,7 +1321,7 @@ func setSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner *SID, ownerDefaul if ownerDefaulted { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(_p0)) + r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorOwner.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(_p0)) if r1 == 0 { err = errnoErr(e1) } @@ -1298,7 +1329,7 @@ func setSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner *SID, ownerDefaul } func setSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) { - syscall.Syscall(procSetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0) + syscall.SyscallN(procSetSecurityDescriptorRMControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl))) return } @@ -1311,7 +1342,7 @@ func setSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent bool, sacl * if saclDefaulted { _p1 = 1 } - r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(sacl)), uintptr(_p1), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorSacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(sacl)), uintptr(_p1)) if r1 == 0 { err = errnoErr(e1) } @@ -1319,7 +1350,7 @@ func setSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent bool, sacl * } func SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) { - r0, _, _ := syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) + r0, _, _ := syscall.SyscallN(procSetSecurityInfo.Addr(), uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -1327,7 +1358,7 @@ func SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformati } func SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) { - r1, _, e1 := syscall.Syscall(procSetServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(serviceStatus)), 0) + r1, _, e1 := syscall.SyscallN(procSetServiceStatus.Addr(), uintptr(service), uintptr(unsafe.Pointer(serviceStatus))) if r1 == 0 { err = errnoErr(e1) } @@ -1335,7 +1366,7 @@ func SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) } func SetThreadToken(thread *Handle, token Token) (err error) { - r1, _, e1 := syscall.Syscall(procSetThreadToken.Addr(), 2, uintptr(unsafe.Pointer(thread)), uintptr(token), 0) + r1, _, e1 := syscall.SyscallN(procSetThreadToken.Addr(), uintptr(unsafe.Pointer(thread)), uintptr(token)) if r1 == 0 { err = errnoErr(e1) } @@ -1343,7 +1374,7 @@ func SetThreadToken(thread *Handle, token Token) (err error) { } func SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetTokenInformation.Addr(), 4, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetTokenInformation.Addr(), uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen)) if r1 == 0 { err = errnoErr(e1) } @@ -1351,7 +1382,7 @@ func SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint } func StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) { - r1, _, e1 := syscall.Syscall(procStartServiceCtrlDispatcherW.Addr(), 1, uintptr(unsafe.Pointer(serviceTable)), 0, 0) + r1, _, e1 := syscall.SyscallN(procStartServiceCtrlDispatcherW.Addr(), uintptr(unsafe.Pointer(serviceTable))) if r1 == 0 { err = errnoErr(e1) } @@ -1359,7 +1390,7 @@ func StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) { } func StartService(service Handle, numArgs uint32, argVectors **uint16) (err error) { - r1, _, e1 := syscall.Syscall(procStartServiceW.Addr(), 3, uintptr(service), uintptr(numArgs), uintptr(unsafe.Pointer(argVectors))) + r1, _, e1 := syscall.SyscallN(procStartServiceW.Addr(), uintptr(service), uintptr(numArgs), uintptr(unsafe.Pointer(argVectors))) if r1 == 0 { err = errnoErr(e1) } @@ -1367,7 +1398,7 @@ func StartService(service Handle, numArgs uint32, argVectors **uint16) (err erro } func CertAddCertificateContextToStore(store Handle, certContext *CertContext, addDisposition uint32, storeContext **CertContext) (err error) { - r1, _, e1 := syscall.Syscall6(procCertAddCertificateContextToStore.Addr(), 4, uintptr(store), uintptr(unsafe.Pointer(certContext)), uintptr(addDisposition), uintptr(unsafe.Pointer(storeContext)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCertAddCertificateContextToStore.Addr(), uintptr(store), uintptr(unsafe.Pointer(certContext)), uintptr(addDisposition), uintptr(unsafe.Pointer(storeContext))) if r1 == 0 { err = errnoErr(e1) } @@ -1375,7 +1406,7 @@ func CertAddCertificateContextToStore(store Handle, certContext *CertContext, ad } func CertCloseStore(store Handle, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCertCloseStore.Addr(), 2, uintptr(store), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procCertCloseStore.Addr(), uintptr(store), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -1383,7 +1414,7 @@ func CertCloseStore(store Handle, flags uint32) (err error) { } func CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, encodedLen uint32) (context *CertContext, err error) { - r0, _, e1 := syscall.Syscall(procCertCreateCertificateContext.Addr(), 3, uintptr(certEncodingType), uintptr(unsafe.Pointer(certEncoded)), uintptr(encodedLen)) + r0, _, e1 := syscall.SyscallN(procCertCreateCertificateContext.Addr(), uintptr(certEncodingType), uintptr(unsafe.Pointer(certEncoded)), uintptr(encodedLen)) context = (*CertContext)(unsafe.Pointer(r0)) if context == nil { err = errnoErr(e1) @@ -1392,7 +1423,7 @@ func CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, en } func CertDeleteCertificateFromStore(certContext *CertContext) (err error) { - r1, _, e1 := syscall.Syscall(procCertDeleteCertificateFromStore.Addr(), 1, uintptr(unsafe.Pointer(certContext)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCertDeleteCertificateFromStore.Addr(), uintptr(unsafe.Pointer(certContext))) if r1 == 0 { err = errnoErr(e1) } @@ -1400,13 +1431,13 @@ func CertDeleteCertificateFromStore(certContext *CertContext) (err error) { } func CertDuplicateCertificateContext(certContext *CertContext) (dupContext *CertContext) { - r0, _, _ := syscall.Syscall(procCertDuplicateCertificateContext.Addr(), 1, uintptr(unsafe.Pointer(certContext)), 0, 0) + r0, _, _ := syscall.SyscallN(procCertDuplicateCertificateContext.Addr(), uintptr(unsafe.Pointer(certContext))) dupContext = (*CertContext)(unsafe.Pointer(r0)) return } func CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (context *CertContext, err error) { - r0, _, e1 := syscall.Syscall(procCertEnumCertificatesInStore.Addr(), 2, uintptr(store), uintptr(unsafe.Pointer(prevContext)), 0) + r0, _, e1 := syscall.SyscallN(procCertEnumCertificatesInStore.Addr(), uintptr(store), uintptr(unsafe.Pointer(prevContext))) context = (*CertContext)(unsafe.Pointer(r0)) if context == nil { err = errnoErr(e1) @@ -1415,7 +1446,7 @@ func CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (contex } func CertFindCertificateInStore(store Handle, certEncodingType uint32, findFlags uint32, findType uint32, findPara unsafe.Pointer, prevCertContext *CertContext) (cert *CertContext, err error) { - r0, _, e1 := syscall.Syscall6(procCertFindCertificateInStore.Addr(), 6, uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevCertContext))) + r0, _, e1 := syscall.SyscallN(procCertFindCertificateInStore.Addr(), uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevCertContext))) cert = (*CertContext)(unsafe.Pointer(r0)) if cert == nil { err = errnoErr(e1) @@ -1424,7 +1455,7 @@ func CertFindCertificateInStore(store Handle, certEncodingType uint32, findFlags } func CertFindChainInStore(store Handle, certEncodingType uint32, findFlags uint32, findType uint32, findPara unsafe.Pointer, prevChainContext *CertChainContext) (certchain *CertChainContext, err error) { - r0, _, e1 := syscall.Syscall6(procCertFindChainInStore.Addr(), 6, uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevChainContext))) + r0, _, e1 := syscall.SyscallN(procCertFindChainInStore.Addr(), uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevChainContext))) certchain = (*CertChainContext)(unsafe.Pointer(r0)) if certchain == nil { err = errnoErr(e1) @@ -1433,18 +1464,18 @@ func CertFindChainInStore(store Handle, certEncodingType uint32, findFlags uint3 } func CertFindExtension(objId *byte, countExtensions uint32, extensions *CertExtension) (ret *CertExtension) { - r0, _, _ := syscall.Syscall(procCertFindExtension.Addr(), 3, uintptr(unsafe.Pointer(objId)), uintptr(countExtensions), uintptr(unsafe.Pointer(extensions))) + r0, _, _ := syscall.SyscallN(procCertFindExtension.Addr(), uintptr(unsafe.Pointer(objId)), uintptr(countExtensions), uintptr(unsafe.Pointer(extensions))) ret = (*CertExtension)(unsafe.Pointer(r0)) return } func CertFreeCertificateChain(ctx *CertChainContext) { - syscall.Syscall(procCertFreeCertificateChain.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0) + syscall.SyscallN(procCertFreeCertificateChain.Addr(), uintptr(unsafe.Pointer(ctx))) return } func CertFreeCertificateContext(ctx *CertContext) (err error) { - r1, _, e1 := syscall.Syscall(procCertFreeCertificateContext.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCertFreeCertificateContext.Addr(), uintptr(unsafe.Pointer(ctx))) if r1 == 0 { err = errnoErr(e1) } @@ -1452,7 +1483,7 @@ func CertFreeCertificateContext(ctx *CertContext) (err error) { } func CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, additionalStore Handle, para *CertChainPara, flags uint32, reserved uintptr, chainCtx **CertChainContext) (err error) { - r1, _, e1 := syscall.Syscall9(procCertGetCertificateChain.Addr(), 8, uintptr(engine), uintptr(unsafe.Pointer(leaf)), uintptr(unsafe.Pointer(time)), uintptr(additionalStore), uintptr(unsafe.Pointer(para)), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(chainCtx)), 0) + r1, _, e1 := syscall.SyscallN(procCertGetCertificateChain.Addr(), uintptr(engine), uintptr(unsafe.Pointer(leaf)), uintptr(unsafe.Pointer(time)), uintptr(additionalStore), uintptr(unsafe.Pointer(para)), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(chainCtx))) if r1 == 0 { err = errnoErr(e1) } @@ -1460,13 +1491,13 @@ func CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, a } func CertGetNameString(certContext *CertContext, nameType uint32, flags uint32, typePara unsafe.Pointer, name *uint16, size uint32) (chars uint32) { - r0, _, _ := syscall.Syscall6(procCertGetNameStringW.Addr(), 6, uintptr(unsafe.Pointer(certContext)), uintptr(nameType), uintptr(flags), uintptr(typePara), uintptr(unsafe.Pointer(name)), uintptr(size)) + r0, _, _ := syscall.SyscallN(procCertGetNameStringW.Addr(), uintptr(unsafe.Pointer(certContext)), uintptr(nameType), uintptr(flags), uintptr(typePara), uintptr(unsafe.Pointer(name)), uintptr(size)) chars = uint32(r0) return } func CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptProv uintptr, flags uint32, para uintptr) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCertOpenStore.Addr(), 5, uintptr(storeProvider), uintptr(msgAndCertEncodingType), uintptr(cryptProv), uintptr(flags), uintptr(para), 0) + r0, _, e1 := syscall.SyscallN(procCertOpenStore.Addr(), uintptr(storeProvider), uintptr(msgAndCertEncodingType), uintptr(cryptProv), uintptr(flags), uintptr(para)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1475,7 +1506,7 @@ func CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptPr } func CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) { - r0, _, e1 := syscall.Syscall(procCertOpenSystemStoreW.Addr(), 2, uintptr(hprov), uintptr(unsafe.Pointer(name)), 0) + r0, _, e1 := syscall.SyscallN(procCertOpenSystemStoreW.Addr(), uintptr(hprov), uintptr(unsafe.Pointer(name))) store = Handle(r0) if store == 0 { err = errnoErr(e1) @@ -1484,7 +1515,7 @@ func CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) { } func CertVerifyCertificateChainPolicy(policyOID uintptr, chain *CertChainContext, para *CertChainPolicyPara, status *CertChainPolicyStatus) (err error) { - r1, _, e1 := syscall.Syscall6(procCertVerifyCertificateChainPolicy.Addr(), 4, uintptr(policyOID), uintptr(unsafe.Pointer(chain)), uintptr(unsafe.Pointer(para)), uintptr(unsafe.Pointer(status)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCertVerifyCertificateChainPolicy.Addr(), uintptr(policyOID), uintptr(unsafe.Pointer(chain)), uintptr(unsafe.Pointer(para)), uintptr(unsafe.Pointer(status))) if r1 == 0 { err = errnoErr(e1) } @@ -1496,7 +1527,7 @@ func CryptAcquireCertificatePrivateKey(cert *CertContext, flags uint32, paramete if *callerFreeProvOrNCryptKey { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procCryptAcquireCertificatePrivateKey.Addr(), 6, uintptr(unsafe.Pointer(cert)), uintptr(flags), uintptr(parameters), uintptr(unsafe.Pointer(cryptProvOrNCryptKey)), uintptr(unsafe.Pointer(keySpec)), uintptr(unsafe.Pointer(&_p0))) + r1, _, e1 := syscall.SyscallN(procCryptAcquireCertificatePrivateKey.Addr(), uintptr(unsafe.Pointer(cert)), uintptr(flags), uintptr(parameters), uintptr(unsafe.Pointer(cryptProvOrNCryptKey)), uintptr(unsafe.Pointer(keySpec)), uintptr(unsafe.Pointer(&_p0))) *callerFreeProvOrNCryptKey = _p0 != 0 if r1 == 0 { err = errnoErr(e1) @@ -1505,7 +1536,7 @@ func CryptAcquireCertificatePrivateKey(cert *CertContext, flags uint32, paramete } func CryptDecodeObject(encodingType uint32, structType *byte, encodedBytes *byte, lenEncodedBytes uint32, flags uint32, decoded unsafe.Pointer, decodedLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procCryptDecodeObject.Addr(), 7, uintptr(encodingType), uintptr(unsafe.Pointer(structType)), uintptr(unsafe.Pointer(encodedBytes)), uintptr(lenEncodedBytes), uintptr(flags), uintptr(decoded), uintptr(unsafe.Pointer(decodedLen)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCryptDecodeObject.Addr(), uintptr(encodingType), uintptr(unsafe.Pointer(structType)), uintptr(unsafe.Pointer(encodedBytes)), uintptr(lenEncodedBytes), uintptr(flags), uintptr(decoded), uintptr(unsafe.Pointer(decodedLen))) if r1 == 0 { err = errnoErr(e1) } @@ -1513,7 +1544,7 @@ func CryptDecodeObject(encodingType uint32, structType *byte, encodedBytes *byte } func CryptProtectData(dataIn *DataBlob, name *uint16, optionalEntropy *DataBlob, reserved uintptr, promptStruct *CryptProtectPromptStruct, flags uint32, dataOut *DataBlob) (err error) { - r1, _, e1 := syscall.Syscall9(procCryptProtectData.Addr(), 7, uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCryptProtectData.Addr(), uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut))) if r1 == 0 { err = errnoErr(e1) } @@ -1521,7 +1552,7 @@ func CryptProtectData(dataIn *DataBlob, name *uint16, optionalEntropy *DataBlob, } func CryptQueryObject(objectType uint32, object unsafe.Pointer, expectedContentTypeFlags uint32, expectedFormatTypeFlags uint32, flags uint32, msgAndCertEncodingType *uint32, contentType *uint32, formatType *uint32, certStore *Handle, msg *Handle, context *unsafe.Pointer) (err error) { - r1, _, e1 := syscall.Syscall12(procCryptQueryObject.Addr(), 11, uintptr(objectType), uintptr(object), uintptr(expectedContentTypeFlags), uintptr(expectedFormatTypeFlags), uintptr(flags), uintptr(unsafe.Pointer(msgAndCertEncodingType)), uintptr(unsafe.Pointer(contentType)), uintptr(unsafe.Pointer(formatType)), uintptr(unsafe.Pointer(certStore)), uintptr(unsafe.Pointer(msg)), uintptr(unsafe.Pointer(context)), 0) + r1, _, e1 := syscall.SyscallN(procCryptQueryObject.Addr(), uintptr(objectType), uintptr(object), uintptr(expectedContentTypeFlags), uintptr(expectedFormatTypeFlags), uintptr(flags), uintptr(unsafe.Pointer(msgAndCertEncodingType)), uintptr(unsafe.Pointer(contentType)), uintptr(unsafe.Pointer(formatType)), uintptr(unsafe.Pointer(certStore)), uintptr(unsafe.Pointer(msg)), uintptr(unsafe.Pointer(context))) if r1 == 0 { err = errnoErr(e1) } @@ -1529,7 +1560,7 @@ func CryptQueryObject(objectType uint32, object unsafe.Pointer, expectedContentT } func CryptUnprotectData(dataIn *DataBlob, name **uint16, optionalEntropy *DataBlob, reserved uintptr, promptStruct *CryptProtectPromptStruct, flags uint32, dataOut *DataBlob) (err error) { - r1, _, e1 := syscall.Syscall9(procCryptUnprotectData.Addr(), 7, uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCryptUnprotectData.Addr(), uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut))) if r1 == 0 { err = errnoErr(e1) } @@ -1537,7 +1568,7 @@ func CryptUnprotectData(dataIn *DataBlob, name **uint16, optionalEntropy *DataBl } func PFXImportCertStore(pfx *CryptDataBlob, password *uint16, flags uint32) (store Handle, err error) { - r0, _, e1 := syscall.Syscall(procPFXImportCertStore.Addr(), 3, uintptr(unsafe.Pointer(pfx)), uintptr(unsafe.Pointer(password)), uintptr(flags)) + r0, _, e1 := syscall.SyscallN(procPFXImportCertStore.Addr(), uintptr(unsafe.Pointer(pfx)), uintptr(unsafe.Pointer(password)), uintptr(flags)) store = Handle(r0) if store == 0 { err = errnoErr(e1) @@ -1546,7 +1577,7 @@ func PFXImportCertStore(pfx *CryptDataBlob, password *uint16, flags uint32) (sto } func DnsNameCompare(name1 *uint16, name2 *uint16) (same bool) { - r0, _, _ := syscall.Syscall(procDnsNameCompare_W.Addr(), 2, uintptr(unsafe.Pointer(name1)), uintptr(unsafe.Pointer(name2)), 0) + r0, _, _ := syscall.SyscallN(procDnsNameCompare_W.Addr(), uintptr(unsafe.Pointer(name1)), uintptr(unsafe.Pointer(name2))) same = r0 != 0 return } @@ -1561,7 +1592,7 @@ func DnsQuery(name string, qtype uint16, options uint32, extra *byte, qrs **DNSR } func _DnsQuery(name *uint16, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) { - r0, _, _ := syscall.Syscall6(procDnsQuery_W.Addr(), 6, uintptr(unsafe.Pointer(name)), uintptr(qtype), uintptr(options), uintptr(unsafe.Pointer(extra)), uintptr(unsafe.Pointer(qrs)), uintptr(unsafe.Pointer(pr))) + r0, _, _ := syscall.SyscallN(procDnsQuery_W.Addr(), uintptr(unsafe.Pointer(name)), uintptr(qtype), uintptr(options), uintptr(unsafe.Pointer(extra)), uintptr(unsafe.Pointer(qrs)), uintptr(unsafe.Pointer(pr))) if r0 != 0 { status = syscall.Errno(r0) } @@ -1569,12 +1600,12 @@ func _DnsQuery(name *uint16, qtype uint16, options uint32, extra *byte, qrs **DN } func DnsRecordListFree(rl *DNSRecord, freetype uint32) { - syscall.Syscall(procDnsRecordListFree.Addr(), 2, uintptr(unsafe.Pointer(rl)), uintptr(freetype), 0) + syscall.SyscallN(procDnsRecordListFree.Addr(), uintptr(unsafe.Pointer(rl)), uintptr(freetype)) return } func DwmGetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) { - r0, _, _ := syscall.Syscall6(procDwmGetWindowAttribute.Addr(), 4, uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size), 0, 0) + r0, _, _ := syscall.SyscallN(procDwmGetWindowAttribute.Addr(), uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size)) if r0 != 0 { ret = syscall.Errno(r0) } @@ -1582,15 +1613,28 @@ func DwmGetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, si } func DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) { - r0, _, _ := syscall.Syscall6(procDwmSetWindowAttribute.Addr(), 4, uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size), 0, 0) + r0, _, _ := syscall.SyscallN(procDwmSetWindowAttribute.Addr(), uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size)) if r0 != 0 { ret = syscall.Errno(r0) } return } +func CancelMibChangeNotify2(notificationHandle Handle) (errcode error) { + r0, _, _ := syscall.SyscallN(procCancelMibChangeNotify2.Addr(), uintptr(notificationHandle)) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func FreeMibTable(memory unsafe.Pointer) { + syscall.SyscallN(procFreeMibTable.Addr(), uintptr(memory)) + return +} + func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) { - r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0) + r0, _, _ := syscall.SyscallN(procGetAdaptersAddresses.Addr(), uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1598,7 +1642,7 @@ func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapter } func GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) { - r0, _, _ := syscall.Syscall(procGetAdaptersInfo.Addr(), 2, uintptr(unsafe.Pointer(ai)), uintptr(unsafe.Pointer(ol)), 0) + r0, _, _ := syscall.SyscallN(procGetAdaptersInfo.Addr(), uintptr(unsafe.Pointer(ai)), uintptr(unsafe.Pointer(ol))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1606,7 +1650,7 @@ func GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) { } func getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) { - r0, _, _ := syscall.Syscall(procGetBestInterfaceEx.Addr(), 2, uintptr(sockaddr), uintptr(unsafe.Pointer(pdwBestIfIndex)), 0) + r0, _, _ := syscall.SyscallN(procGetBestInterfaceEx.Addr(), uintptr(sockaddr), uintptr(unsafe.Pointer(pdwBestIfIndex))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1614,7 +1658,75 @@ func getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcod } func GetIfEntry(pIfRow *MibIfRow) (errcode error) { - r0, _, _ := syscall.Syscall(procGetIfEntry.Addr(), 1, uintptr(unsafe.Pointer(pIfRow)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetIfEntry.Addr(), uintptr(unsafe.Pointer(pIfRow))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) { + r0, _, _ := syscall.SyscallN(procGetIfEntry2Ex.Addr(), uintptr(level), uintptr(unsafe.Pointer(row))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func GetIpForwardEntry2(row *MibIpForwardRow2) (errcode error) { + r0, _, _ := syscall.SyscallN(procGetIpForwardEntry2.Addr(), uintptr(unsafe.Pointer(row))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func GetIpForwardTable2(family uint16, table **MibIpForwardTable2) (errcode error) { + r0, _, _ := syscall.SyscallN(procGetIpForwardTable2.Addr(), uintptr(family), uintptr(unsafe.Pointer(table))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) { + r0, _, _ := syscall.SyscallN(procGetUnicastIpAddressEntry.Addr(), uintptr(unsafe.Pointer(row))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) { + var _p0 uint32 + if initialNotification { + _p0 = 1 + } + r0, _, _ := syscall.SyscallN(procNotifyIpInterfaceChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func NotifyRouteChange2(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) { + var _p0 uint32 + if initialNotification { + _p0 = 1 + } + r0, _, _ := syscall.SyscallN(procNotifyRouteChange2.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) { + var _p0 uint32 + if initialNotification { + _p0 = 1 + } + r0, _, _ := syscall.SyscallN(procNotifyUnicastIpAddressChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1622,7 +1734,7 @@ func GetIfEntry(pIfRow *MibIfRow) (errcode error) { } func AddDllDirectory(path *uint16) (cookie uintptr, err error) { - r0, _, e1 := syscall.Syscall(procAddDllDirectory.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + r0, _, e1 := syscall.SyscallN(procAddDllDirectory.Addr(), uintptr(unsafe.Pointer(path))) cookie = uintptr(r0) if cookie == 0 { err = errnoErr(e1) @@ -1631,7 +1743,7 @@ func AddDllDirectory(path *uint16) (cookie uintptr, err error) { } func AssignProcessToJobObject(job Handle, process Handle) (err error) { - r1, _, e1 := syscall.Syscall(procAssignProcessToJobObject.Addr(), 2, uintptr(job), uintptr(process), 0) + r1, _, e1 := syscall.SyscallN(procAssignProcessToJobObject.Addr(), uintptr(job), uintptr(process)) if r1 == 0 { err = errnoErr(e1) } @@ -1639,7 +1751,7 @@ func AssignProcessToJobObject(job Handle, process Handle) (err error) { } func CancelIo(s Handle) (err error) { - r1, _, e1 := syscall.Syscall(procCancelIo.Addr(), 1, uintptr(s), 0, 0) + r1, _, e1 := syscall.SyscallN(procCancelIo.Addr(), uintptr(s)) if r1 == 0 { err = errnoErr(e1) } @@ -1647,7 +1759,7 @@ func CancelIo(s Handle) (err error) { } func CancelIoEx(s Handle, o *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(s), uintptr(unsafe.Pointer(o)), 0) + r1, _, e1 := syscall.SyscallN(procCancelIoEx.Addr(), uintptr(s), uintptr(unsafe.Pointer(o))) if r1 == 0 { err = errnoErr(e1) } @@ -1655,7 +1767,7 @@ func CancelIoEx(s Handle, o *Overlapped) (err error) { } func ClearCommBreak(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procClearCommBreak.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procClearCommBreak.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -1663,7 +1775,7 @@ func ClearCommBreak(handle Handle) (err error) { } func ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error) { - r1, _, e1 := syscall.Syscall(procClearCommError.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpErrors)), uintptr(unsafe.Pointer(lpStat))) + r1, _, e1 := syscall.SyscallN(procClearCommError.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpErrors)), uintptr(unsafe.Pointer(lpStat))) if r1 == 0 { err = errnoErr(e1) } @@ -1671,7 +1783,7 @@ func ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error } func CloseHandle(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procCloseHandle.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procCloseHandle.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -1679,12 +1791,12 @@ func CloseHandle(handle Handle) (err error) { } func ClosePseudoConsole(console Handle) { - syscall.Syscall(procClosePseudoConsole.Addr(), 1, uintptr(console), 0, 0) + syscall.SyscallN(procClosePseudoConsole.Addr(), uintptr(console)) return } func ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procConnectNamedPipe.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -1692,7 +1804,7 @@ func ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) { } func CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) { - r1, _, e1 := syscall.Syscall(procCreateDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(sa)), 0) + r1, _, e1 := syscall.SyscallN(procCreateDirectoryW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(sa))) if r1 == 0 { err = errnoErr(e1) } @@ -1700,7 +1812,7 @@ func CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) { } func CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateEventExW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateEventExW.Addr(), uintptr(unsafe.Pointer(eventAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess)) handle = Handle(r0) if handle == 0 || e1 == ERROR_ALREADY_EXISTS { err = errnoErr(e1) @@ -1709,7 +1821,7 @@ func CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, d } func CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateEventW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(manualReset), uintptr(initialState), uintptr(unsafe.Pointer(name)), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateEventW.Addr(), uintptr(unsafe.Pointer(eventAttrs)), uintptr(manualReset), uintptr(initialState), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 || e1 == ERROR_ALREADY_EXISTS { err = errnoErr(e1) @@ -1718,7 +1830,7 @@ func CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialStat } func CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateFileMappingW.Addr(), 6, uintptr(fhandle), uintptr(unsafe.Pointer(sa)), uintptr(prot), uintptr(maxSizeHigh), uintptr(maxSizeLow), uintptr(unsafe.Pointer(name))) + r0, _, e1 := syscall.SyscallN(procCreateFileMappingW.Addr(), uintptr(fhandle), uintptr(unsafe.Pointer(sa)), uintptr(prot), uintptr(maxSizeHigh), uintptr(maxSizeLow), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 || e1 == ERROR_ALREADY_EXISTS { err = errnoErr(e1) @@ -1727,7 +1839,7 @@ func CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxS } func CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateFileW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -1736,7 +1848,7 @@ func CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes } func CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procCreateHardLinkW.Addr(), 3, uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(existingfilename)), uintptr(reserved)) + r1, _, e1 := syscall.SyscallN(procCreateHardLinkW.Addr(), uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(existingfilename)), uintptr(reserved)) if r1&0xff == 0 { err = errnoErr(e1) } @@ -1744,7 +1856,7 @@ func CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr } func CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uintptr, threadcnt uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(filehandle), uintptr(cphandle), uintptr(key), uintptr(threadcnt), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateIoCompletionPort.Addr(), uintptr(filehandle), uintptr(cphandle), uintptr(key), uintptr(threadcnt)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1753,7 +1865,7 @@ func CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uintptr, thr } func CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procCreateJobObjectW.Addr(), 2, uintptr(unsafe.Pointer(jobAttr)), uintptr(unsafe.Pointer(name)), 0) + r0, _, e1 := syscall.SyscallN(procCreateJobObjectW.Addr(), uintptr(unsafe.Pointer(jobAttr)), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1762,7 +1874,7 @@ func CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, } func CreateMutexEx(mutexAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateMutexExW.Addr(), 4, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateMutexExW.Addr(), uintptr(unsafe.Pointer(mutexAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess)) handle = Handle(r0) if handle == 0 || e1 == ERROR_ALREADY_EXISTS { err = errnoErr(e1) @@ -1775,7 +1887,7 @@ func CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16 if initialOwner { _p0 = 1 } - r0, _, e1 := syscall.Syscall(procCreateMutexW.Addr(), 3, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(_p0), uintptr(unsafe.Pointer(name))) + r0, _, e1 := syscall.SyscallN(procCreateMutexW.Addr(), uintptr(unsafe.Pointer(mutexAttrs)), uintptr(_p0), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 || e1 == ERROR_ALREADY_EXISTS { err = errnoErr(e1) @@ -1784,7 +1896,7 @@ func CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16 } func CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *SecurityAttributes) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0) + r0, _, e1 := syscall.SyscallN(procCreateNamedPipeW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa))) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -1793,7 +1905,7 @@ func CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances u } func CreatePipe(readhandle *Handle, writehandle *Handle, sa *SecurityAttributes, size uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCreatePipe.Addr(), 4, uintptr(unsafe.Pointer(readhandle)), uintptr(unsafe.Pointer(writehandle)), uintptr(unsafe.Pointer(sa)), uintptr(size), 0, 0) + r1, _, e1 := syscall.SyscallN(procCreatePipe.Addr(), uintptr(unsafe.Pointer(readhandle)), uintptr(unsafe.Pointer(writehandle)), uintptr(unsafe.Pointer(sa)), uintptr(size)) if r1 == 0 { err = errnoErr(e1) } @@ -1805,7 +1917,7 @@ func CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityA if inheritHandles { _p0 = 1 } - r1, _, e1 := syscall.Syscall12(procCreateProcessW.Addr(), 10, uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCreateProcessW.Addr(), uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo))) if r1 == 0 { err = errnoErr(e1) } @@ -1813,7 +1925,7 @@ func CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityA } func createPseudoConsole(size uint32, in Handle, out Handle, flags uint32, pconsole *Handle) (hr error) { - r0, _, _ := syscall.Syscall6(procCreatePseudoConsole.Addr(), 5, uintptr(size), uintptr(in), uintptr(out), uintptr(flags), uintptr(unsafe.Pointer(pconsole)), 0) + r0, _, _ := syscall.SyscallN(procCreatePseudoConsole.Addr(), uintptr(size), uintptr(in), uintptr(out), uintptr(flags), uintptr(unsafe.Pointer(pconsole))) if r0 != 0 { hr = syscall.Errno(r0) } @@ -1821,7 +1933,7 @@ func createPseudoConsole(size uint32, in Handle, out Handle, flags uint32, pcons } func CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCreateSymbolicLinkW.Addr(), 3, uintptr(unsafe.Pointer(symlinkfilename)), uintptr(unsafe.Pointer(targetfilename)), uintptr(flags)) + r1, _, e1 := syscall.SyscallN(procCreateSymbolicLinkW.Addr(), uintptr(unsafe.Pointer(symlinkfilename)), uintptr(unsafe.Pointer(targetfilename)), uintptr(flags)) if r1&0xff == 0 { err = errnoErr(e1) } @@ -1829,7 +1941,7 @@ func CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags u } func CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procCreateToolhelp32Snapshot.Addr(), 2, uintptr(flags), uintptr(processId), 0) + r0, _, e1 := syscall.SyscallN(procCreateToolhelp32Snapshot.Addr(), uintptr(flags), uintptr(processId)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -1838,7 +1950,7 @@ func CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, er } func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDefineDosDeviceW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath))) + r1, _, e1 := syscall.SyscallN(procDefineDosDeviceW.Addr(), uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath))) if r1 == 0 { err = errnoErr(e1) } @@ -1846,7 +1958,7 @@ func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err } func DeleteFile(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDeleteFileW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + r1, _, e1 := syscall.SyscallN(procDeleteFileW.Addr(), uintptr(unsafe.Pointer(path))) if r1 == 0 { err = errnoErr(e1) } @@ -1854,12 +1966,12 @@ func DeleteFile(path *uint16) (err error) { } func deleteProcThreadAttributeList(attrlist *ProcThreadAttributeList) { - syscall.Syscall(procDeleteProcThreadAttributeList.Addr(), 1, uintptr(unsafe.Pointer(attrlist)), 0, 0) + syscall.SyscallN(procDeleteProcThreadAttributeList.Addr(), uintptr(unsafe.Pointer(attrlist))) return } func DeleteVolumeMountPoint(volumeMountPoint *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDeleteVolumeMountPointW.Addr(), 1, uintptr(unsafe.Pointer(volumeMountPoint)), 0, 0) + r1, _, e1 := syscall.SyscallN(procDeleteVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(volumeMountPoint))) if r1 == 0 { err = errnoErr(e1) } @@ -1867,7 +1979,7 @@ func DeleteVolumeMountPoint(volumeMountPoint *uint16) (err error) { } func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBufferSize uint32, outBuffer *byte, outBufferSize uint32, bytesReturned *uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall9(procDeviceIoControl.Addr(), 8, uintptr(handle), uintptr(ioControlCode), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferSize), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferSize), uintptr(unsafe.Pointer(bytesReturned)), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procDeviceIoControl.Addr(), uintptr(handle), uintptr(ioControlCode), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferSize), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferSize), uintptr(unsafe.Pointer(bytesReturned)), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -1875,7 +1987,7 @@ func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBuff } func DisconnectNamedPipe(pipe Handle) (err error) { - r1, _, e1 := syscall.Syscall(procDisconnectNamedPipe.Addr(), 1, uintptr(pipe), 0, 0) + r1, _, e1 := syscall.SyscallN(procDisconnectNamedPipe.Addr(), uintptr(pipe)) if r1 == 0 { err = errnoErr(e1) } @@ -1887,7 +1999,7 @@ func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetP if bInheritHandle { _p0 = 1 } - r1, _, e1 := syscall.Syscall9(procDuplicateHandle.Addr(), 7, uintptr(hSourceProcessHandle), uintptr(hSourceHandle), uintptr(hTargetProcessHandle), uintptr(unsafe.Pointer(lpTargetHandle)), uintptr(dwDesiredAccess), uintptr(_p0), uintptr(dwOptions), 0, 0) + r1, _, e1 := syscall.SyscallN(procDuplicateHandle.Addr(), uintptr(hSourceProcessHandle), uintptr(hSourceHandle), uintptr(hTargetProcessHandle), uintptr(unsafe.Pointer(lpTargetHandle)), uintptr(dwDesiredAccess), uintptr(_p0), uintptr(dwOptions)) if r1 == 0 { err = errnoErr(e1) } @@ -1895,7 +2007,7 @@ func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetP } func EscapeCommFunction(handle Handle, dwFunc uint32) (err error) { - r1, _, e1 := syscall.Syscall(procEscapeCommFunction.Addr(), 2, uintptr(handle), uintptr(dwFunc), 0) + r1, _, e1 := syscall.SyscallN(procEscapeCommFunction.Addr(), uintptr(handle), uintptr(dwFunc)) if r1 == 0 { err = errnoErr(e1) } @@ -1903,12 +2015,12 @@ func EscapeCommFunction(handle Handle, dwFunc uint32) (err error) { } func ExitProcess(exitcode uint32) { - syscall.Syscall(procExitProcess.Addr(), 1, uintptr(exitcode), 0, 0) + syscall.SyscallN(procExitProcess.Addr(), uintptr(exitcode)) return } func ExpandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) + r0, _, e1 := syscall.SyscallN(procExpandEnvironmentStringsW.Addr(), uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -1917,7 +2029,7 @@ func ExpandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, } func FindClose(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindClose.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procFindClose.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -1925,7 +2037,7 @@ func FindClose(handle Handle) (err error) { } func FindCloseChangeNotification(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindCloseChangeNotification.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procFindCloseChangeNotification.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -1946,7 +2058,7 @@ func _FindFirstChangeNotification(path *uint16, watchSubtree bool, notifyFilter if watchSubtree { _p1 = 1 } - r0, _, e1 := syscall.Syscall(procFindFirstChangeNotificationW.Addr(), 3, uintptr(unsafe.Pointer(path)), uintptr(_p1), uintptr(notifyFilter)) + r0, _, e1 := syscall.SyscallN(procFindFirstChangeNotificationW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(_p1), uintptr(notifyFilter)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -1955,7 +2067,7 @@ func _FindFirstChangeNotification(path *uint16, watchSubtree bool, notifyFilter } func findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindFirstFileW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(data)), 0) + r0, _, e1 := syscall.SyscallN(procFindFirstFileW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(data))) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -1964,7 +2076,7 @@ func findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err erro } func FindFirstVolumeMountPoint(rootPathName *uint16, volumeMountPoint *uint16, bufferLength uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindFirstVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) + r0, _, e1 := syscall.SyscallN(procFindFirstVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -1973,7 +2085,7 @@ func FindFirstVolumeMountPoint(rootPathName *uint16, volumeMountPoint *uint16, b } func FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindFirstVolumeW.Addr(), 2, uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength), 0) + r0, _, e1 := syscall.SyscallN(procFindFirstVolumeW.Addr(), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -1982,7 +2094,7 @@ func FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, er } func FindNextChangeNotification(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextChangeNotification.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procFindNextChangeNotification.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -1990,7 +2102,7 @@ func FindNextChangeNotification(handle Handle) (err error) { } func findNextFile1(handle Handle, data *win32finddata1) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextFileW.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0) + r1, _, e1 := syscall.SyscallN(procFindNextFileW.Addr(), uintptr(handle), uintptr(unsafe.Pointer(data))) if r1 == 0 { err = errnoErr(e1) } @@ -1998,7 +2110,7 @@ func findNextFile1(handle Handle, data *win32finddata1) (err error) { } func FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uint16, bufferLength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextVolumeMountPointW.Addr(), 3, uintptr(findVolumeMountPoint), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) + r1, _, e1 := syscall.SyscallN(procFindNextVolumeMountPointW.Addr(), uintptr(findVolumeMountPoint), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) if r1 == 0 { err = errnoErr(e1) } @@ -2006,7 +2118,7 @@ func FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uin } func FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextVolumeW.Addr(), 3, uintptr(findVolume), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength)) + r1, _, e1 := syscall.SyscallN(procFindNextVolumeW.Addr(), uintptr(findVolume), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength)) if r1 == 0 { err = errnoErr(e1) } @@ -2014,7 +2126,7 @@ func FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32) } func findResource(module Handle, name uintptr, resType uintptr) (resInfo Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindResourceW.Addr(), 3, uintptr(module), uintptr(name), uintptr(resType)) + r0, _, e1 := syscall.SyscallN(procFindResourceW.Addr(), uintptr(module), uintptr(name), uintptr(resType)) resInfo = Handle(r0) if resInfo == 0 { err = errnoErr(e1) @@ -2023,7 +2135,7 @@ func findResource(module Handle, name uintptr, resType uintptr) (resInfo Handle, } func FindVolumeClose(findVolume Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindVolumeClose.Addr(), 1, uintptr(findVolume), 0, 0) + r1, _, e1 := syscall.SyscallN(procFindVolumeClose.Addr(), uintptr(findVolume)) if r1 == 0 { err = errnoErr(e1) } @@ -2031,7 +2143,15 @@ func FindVolumeClose(findVolume Handle) (err error) { } func FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindVolumeMountPointClose.Addr(), 1, uintptr(findVolumeMountPoint), 0, 0) + r1, _, e1 := syscall.SyscallN(procFindVolumeMountPointClose.Addr(), uintptr(findVolumeMountPoint)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func FlushConsoleInputBuffer(console Handle) (err error) { + r1, _, e1 := syscall.SyscallN(procFlushConsoleInputBuffer.Addr(), uintptr(console)) if r1 == 0 { err = errnoErr(e1) } @@ -2039,7 +2159,7 @@ func FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) { } func FlushFileBuffers(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFlushFileBuffers.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procFlushFileBuffers.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -2047,7 +2167,7 @@ func FlushFileBuffers(handle Handle) (err error) { } func FlushViewOfFile(addr uintptr, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procFlushViewOfFile.Addr(), 2, uintptr(addr), uintptr(length), 0) + r1, _, e1 := syscall.SyscallN(procFlushViewOfFile.Addr(), uintptr(addr), uintptr(length)) if r1 == 0 { err = errnoErr(e1) } @@ -2059,7 +2179,7 @@ func FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, bu if len(buf) > 0 { _p0 = &buf[0] } - r0, _, e1 := syscall.Syscall9(procFormatMessageW.Addr(), 7, uintptr(flags), uintptr(msgsrc), uintptr(msgid), uintptr(langid), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(args)), 0, 0) + r0, _, e1 := syscall.SyscallN(procFormatMessageW.Addr(), uintptr(flags), uintptr(msgsrc), uintptr(msgid), uintptr(langid), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(args))) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2068,7 +2188,7 @@ func FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, bu } func FreeEnvironmentStrings(envs *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procFreeEnvironmentStringsW.Addr(), 1, uintptr(unsafe.Pointer(envs)), 0, 0) + r1, _, e1 := syscall.SyscallN(procFreeEnvironmentStringsW.Addr(), uintptr(unsafe.Pointer(envs))) if r1 == 0 { err = errnoErr(e1) } @@ -2076,7 +2196,7 @@ func FreeEnvironmentStrings(envs *uint16) (err error) { } func FreeLibrary(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFreeLibrary.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procFreeLibrary.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -2084,7 +2204,7 @@ func FreeLibrary(handle Handle) (err error) { } func GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGenerateConsoleCtrlEvent.Addr(), 2, uintptr(ctrlEvent), uintptr(processGroupID), 0) + r1, _, e1 := syscall.SyscallN(procGenerateConsoleCtrlEvent.Addr(), uintptr(ctrlEvent), uintptr(processGroupID)) if r1 == 0 { err = errnoErr(e1) } @@ -2092,19 +2212,19 @@ func GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err erro } func GetACP() (acp uint32) { - r0, _, _ := syscall.Syscall(procGetACP.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetACP.Addr()) acp = uint32(r0) return } func GetActiveProcessorCount(groupNumber uint16) (ret uint32) { - r0, _, _ := syscall.Syscall(procGetActiveProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0) + r0, _, _ := syscall.SyscallN(procGetActiveProcessorCount.Addr(), uintptr(groupNumber)) ret = uint32(r0) return } func GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetCommModemStatus.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpModemStat)), 0) + r1, _, e1 := syscall.SyscallN(procGetCommModemStatus.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpModemStat))) if r1 == 0 { err = errnoErr(e1) } @@ -2112,7 +2232,7 @@ func GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) { } func GetCommState(handle Handle, lpDCB *DCB) (err error) { - r1, _, e1 := syscall.Syscall(procGetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0) + r1, _, e1 := syscall.SyscallN(procGetCommState.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpDCB))) if r1 == 0 { err = errnoErr(e1) } @@ -2120,7 +2240,7 @@ func GetCommState(handle Handle, lpDCB *DCB) (err error) { } func GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { - r1, _, e1 := syscall.Syscall(procGetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0) + r1, _, e1 := syscall.SyscallN(procGetCommTimeouts.Addr(), uintptr(handle), uintptr(unsafe.Pointer(timeouts))) if r1 == 0 { err = errnoErr(e1) } @@ -2128,13 +2248,13 @@ func GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { } func GetCommandLine() (cmd *uint16) { - r0, _, _ := syscall.Syscall(procGetCommandLineW.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetCommandLineW.Addr()) cmd = (*uint16)(unsafe.Pointer(r0)) return } func GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetComputerNameExW.Addr(), 3, uintptr(nametype), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n))) + r1, _, e1 := syscall.SyscallN(procGetComputerNameExW.Addr(), uintptr(nametype), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n))) if r1 == 0 { err = errnoErr(e1) } @@ -2142,23 +2262,41 @@ func GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) { } func GetComputerName(buf *uint16, n *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetComputerNameW.Addr(), 2, uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n)), 0) + r1, _, e1 := syscall.SyscallN(procGetComputerNameW.Addr(), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n))) if r1 == 0 { err = errnoErr(e1) } return } +func GetConsoleCP() (cp uint32, err error) { + r0, _, e1 := syscall.SyscallN(procGetConsoleCP.Addr()) + cp = uint32(r0) + if cp == 0 { + err = errnoErr(e1) + } + return +} + func GetConsoleMode(console Handle, mode *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(mode)), 0) + r1, _, e1 := syscall.SyscallN(procGetConsoleMode.Addr(), uintptr(console), uintptr(unsafe.Pointer(mode))) if r1 == 0 { err = errnoErr(e1) } return } +func GetConsoleOutputCP() (cp uint32, err error) { + r0, _, e1 := syscall.SyscallN(procGetConsoleOutputCP.Addr()) + cp = uint32(r0) + if cp == 0 { + err = errnoErr(e1) + } + return +} + func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) { - r1, _, e1 := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(info)), 0) + r1, _, e1 := syscall.SyscallN(procGetConsoleScreenBufferInfo.Addr(), uintptr(console), uintptr(unsafe.Pointer(info))) if r1 == 0 { err = errnoErr(e1) } @@ -2166,7 +2304,7 @@ func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) ( } func GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetCurrentDirectoryW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0) + r0, _, e1 := syscall.SyscallN(procGetCurrentDirectoryW.Addr(), uintptr(buflen), uintptr(unsafe.Pointer(buf))) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2175,19 +2313,19 @@ func GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) { } func GetCurrentProcessId() (pid uint32) { - r0, _, _ := syscall.Syscall(procGetCurrentProcessId.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetCurrentProcessId.Addr()) pid = uint32(r0) return } func GetCurrentThreadId() (id uint32) { - r0, _, _ := syscall.Syscall(procGetCurrentThreadId.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetCurrentThreadId.Addr()) id = uint32(r0) return } func GetDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *uint64, totalNumberOfBytes *uint64, totalNumberOfFreeBytes *uint64) (err error) { - r1, _, e1 := syscall.Syscall6(procGetDiskFreeSpaceExW.Addr(), 4, uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetDiskFreeSpaceExW.Addr(), uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes))) if r1 == 0 { err = errnoErr(e1) } @@ -2195,13 +2333,13 @@ func GetDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *uint6 } func GetDriveType(rootPathName *uint16) (driveType uint32) { - r0, _, _ := syscall.Syscall(procGetDriveTypeW.Addr(), 1, uintptr(unsafe.Pointer(rootPathName)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetDriveTypeW.Addr(), uintptr(unsafe.Pointer(rootPathName))) driveType = uint32(r0) return } func GetEnvironmentStrings() (envs *uint16, err error) { - r0, _, e1 := syscall.Syscall(procGetEnvironmentStringsW.Addr(), 0, 0, 0, 0) + r0, _, e1 := syscall.SyscallN(procGetEnvironmentStringsW.Addr()) envs = (*uint16)(unsafe.Pointer(r0)) if envs == nil { err = errnoErr(e1) @@ -2210,7 +2348,7 @@ func GetEnvironmentStrings() (envs *uint16, err error) { } func GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetEnvironmentVariableW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(size)) + r0, _, e1 := syscall.SyscallN(procGetEnvironmentVariableW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(size)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2219,7 +2357,7 @@ func GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32 } func GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetExitCodeProcess.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(exitcode)), 0) + r1, _, e1 := syscall.SyscallN(procGetExitCodeProcess.Addr(), uintptr(handle), uintptr(unsafe.Pointer(exitcode))) if r1 == 0 { err = errnoErr(e1) } @@ -2227,7 +2365,7 @@ func GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) { } func GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) { - r1, _, e1 := syscall.Syscall(procGetFileAttributesExW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(level), uintptr(unsafe.Pointer(info))) + r1, _, e1 := syscall.SyscallN(procGetFileAttributesExW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(level), uintptr(unsafe.Pointer(info))) if r1 == 0 { err = errnoErr(e1) } @@ -2235,7 +2373,7 @@ func GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) { } func GetFileAttributes(name *uint16) (attrs uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetFileAttributesW.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetFileAttributesW.Addr(), uintptr(unsafe.Pointer(name))) attrs = uint32(r0) if attrs == INVALID_FILE_ATTRIBUTES { err = errnoErr(e1) @@ -2244,7 +2382,7 @@ func GetFileAttributes(name *uint16) (attrs uint32, err error) { } func GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (err error) { - r1, _, e1 := syscall.Syscall(procGetFileInformationByHandle.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0) + r1, _, e1 := syscall.SyscallN(procGetFileInformationByHandle.Addr(), uintptr(handle), uintptr(unsafe.Pointer(data))) if r1 == 0 { err = errnoErr(e1) } @@ -2252,7 +2390,7 @@ func GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (e } func GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, outBufferLen uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferLen), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetFileInformationByHandleEx.Addr(), uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferLen)) if r1 == 0 { err = errnoErr(e1) } @@ -2260,7 +2398,7 @@ func GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, } func GetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) { - r1, _, e1 := syscall.Syscall6(procGetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetFileTime.Addr(), uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime))) if r1 == 0 { err = errnoErr(e1) } @@ -2268,7 +2406,7 @@ func GetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetim } func GetFileType(filehandle Handle) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetFileType.Addr(), 1, uintptr(filehandle), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetFileType.Addr(), uintptr(filehandle)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2277,7 +2415,7 @@ func GetFileType(filehandle Handle) (n uint32, err error) { } func GetFinalPathNameByHandle(file Handle, filePath *uint16, filePathSize uint32, flags uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall6(procGetFinalPathNameByHandleW.Addr(), 4, uintptr(file), uintptr(unsafe.Pointer(filePath)), uintptr(filePathSize), uintptr(flags), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetFinalPathNameByHandleW.Addr(), uintptr(file), uintptr(unsafe.Pointer(filePath)), uintptr(filePathSize), uintptr(flags)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2286,7 +2424,7 @@ func GetFinalPathNameByHandle(file Handle, filePath *uint16, filePathSize uint32 } func GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall6(procGetFullPathNameW.Addr(), 4, uintptr(unsafe.Pointer(path)), uintptr(buflen), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(fname)), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetFullPathNameW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(buflen), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(fname))) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2295,13 +2433,13 @@ func GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) ( } func GetLargePageMinimum() (size uintptr) { - r0, _, _ := syscall.Syscall(procGetLargePageMinimum.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetLargePageMinimum.Addr()) size = uintptr(r0) return } func GetLastError() (lasterr error) { - r0, _, _ := syscall.Syscall(procGetLastError.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetLastError.Addr()) if r0 != 0 { lasterr = syscall.Errno(r0) } @@ -2309,7 +2447,7 @@ func GetLastError() (lasterr error) { } func GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetLogicalDriveStringsW.Addr(), 2, uintptr(bufferLength), uintptr(unsafe.Pointer(buffer)), 0) + r0, _, e1 := syscall.SyscallN(procGetLogicalDriveStringsW.Addr(), uintptr(bufferLength), uintptr(unsafe.Pointer(buffer))) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2318,7 +2456,7 @@ func GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err } func GetLogicalDrives() (drivesBitMask uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetLogicalDrives.Addr(), 0, 0, 0, 0) + r0, _, e1 := syscall.SyscallN(procGetLogicalDrives.Addr()) drivesBitMask = uint32(r0) if drivesBitMask == 0 { err = errnoErr(e1) @@ -2327,7 +2465,7 @@ func GetLogicalDrives() (drivesBitMask uint32, err error) { } func GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetLongPathNameW.Addr(), 3, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(buf)), uintptr(buflen)) + r0, _, e1 := syscall.SyscallN(procGetLongPathNameW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(buf)), uintptr(buflen)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2336,13 +2474,13 @@ func GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err er } func GetMaximumProcessorCount(groupNumber uint16) (ret uint32) { - r0, _, _ := syscall.Syscall(procGetMaximumProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0) + r0, _, _ := syscall.SyscallN(procGetMaximumProcessorCount.Addr(), uintptr(groupNumber)) ret = uint32(r0) return } func GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetModuleFileNameW.Addr(), 3, uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size)) + r0, _, e1 := syscall.SyscallN(procGetModuleFileNameW.Addr(), uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2351,7 +2489,15 @@ func GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, } func GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err error) { - r1, _, e1 := syscall.Syscall(procGetModuleHandleExW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(moduleName)), uintptr(unsafe.Pointer(module))) + r1, _, e1 := syscall.SyscallN(procGetModuleHandleExW.Addr(), uintptr(flags), uintptr(unsafe.Pointer(moduleName)), uintptr(unsafe.Pointer(module))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetNamedPipeClientProcessId(pipe Handle, clientProcessID *uint32) (err error) { + r1, _, e1 := syscall.SyscallN(procGetNamedPipeClientProcessId.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(clientProcessID))) if r1 == 0 { err = errnoErr(e1) } @@ -2359,7 +2505,7 @@ func GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err er } func GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetNamedPipeHandleStateW.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize)) if r1 == 0 { err = errnoErr(e1) } @@ -2367,7 +2513,23 @@ func GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, m } func GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0) + r1, _, e1 := syscall.SyscallN(procGetNamedPipeInfo.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetNamedPipeServerProcessId(pipe Handle, serverProcessID *uint32) (err error) { + r1, _, e1 := syscall.SyscallN(procGetNamedPipeServerProcessId.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(serverProcessID))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetNumberOfConsoleInputEvents(console Handle, numevents *uint32) (err error) { + r1, _, e1 := syscall.SyscallN(procGetNumberOfConsoleInputEvents.Addr(), uintptr(console), uintptr(unsafe.Pointer(numevents))) if r1 == 0 { err = errnoErr(e1) } @@ -2379,7 +2541,7 @@ func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wa if wait { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procGetOverlappedResult.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(done)), uintptr(_p0), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetOverlappedResult.Addr(), uintptr(handle), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(done)), uintptr(_p0)) if r1 == 0 { err = errnoErr(e1) } @@ -2387,7 +2549,7 @@ func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wa } func GetPriorityClass(process Handle) (ret uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetPriorityClass.Addr(), 1, uintptr(process), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetPriorityClass.Addr(), uintptr(process)) ret = uint32(r0) if ret == 0 { err = errnoErr(e1) @@ -2405,7 +2567,7 @@ func GetProcAddress(module Handle, procname string) (proc uintptr, err error) { } func _GetProcAddress(module Handle, procname *byte) (proc uintptr, err error) { - r0, _, e1 := syscall.Syscall(procGetProcAddress.Addr(), 2, uintptr(module), uintptr(unsafe.Pointer(procname)), 0) + r0, _, e1 := syscall.SyscallN(procGetProcAddress.Addr(), uintptr(module), uintptr(unsafe.Pointer(procname))) proc = uintptr(r0) if proc == 0 { err = errnoErr(e1) @@ -2414,7 +2576,7 @@ func _GetProcAddress(module Handle, procname *byte) (proc uintptr, err error) { } func GetProcessId(process Handle) (id uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetProcessId.Addr(), 1, uintptr(process), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetProcessId.Addr(), uintptr(process)) id = uint32(r0) if id == 0 { err = errnoErr(e1) @@ -2423,7 +2585,7 @@ func GetProcessId(process Handle) (id uint32, err error) { } func getProcessPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetProcessPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetProcessPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize))) if r1 == 0 { err = errnoErr(e1) } @@ -2431,7 +2593,7 @@ func getProcessPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uin } func GetProcessShutdownParameters(level *uint32, flags *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetProcessShutdownParameters.Addr(), 2, uintptr(unsafe.Pointer(level)), uintptr(unsafe.Pointer(flags)), 0) + r1, _, e1 := syscall.SyscallN(procGetProcessShutdownParameters.Addr(), uintptr(unsafe.Pointer(level)), uintptr(unsafe.Pointer(flags))) if r1 == 0 { err = errnoErr(e1) } @@ -2439,7 +2601,7 @@ func GetProcessShutdownParameters(level *uint32, flags *uint32) (err error) { } func GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) { - r1, _, e1 := syscall.Syscall6(procGetProcessTimes.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(creationTime)), uintptr(unsafe.Pointer(exitTime)), uintptr(unsafe.Pointer(kernelTime)), uintptr(unsafe.Pointer(userTime)), 0) + r1, _, e1 := syscall.SyscallN(procGetProcessTimes.Addr(), uintptr(handle), uintptr(unsafe.Pointer(creationTime)), uintptr(unsafe.Pointer(exitTime)), uintptr(unsafe.Pointer(kernelTime)), uintptr(unsafe.Pointer(userTime))) if r1 == 0 { err = errnoErr(e1) } @@ -2447,12 +2609,12 @@ func GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, } func GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintptr, lpMaximumWorkingSetSize *uintptr, flags *uint32) { - syscall.Syscall6(procGetProcessWorkingSetSizeEx.Addr(), 4, uintptr(hProcess), uintptr(unsafe.Pointer(lpMinimumWorkingSetSize)), uintptr(unsafe.Pointer(lpMaximumWorkingSetSize)), uintptr(unsafe.Pointer(flags)), 0, 0) + syscall.SyscallN(procGetProcessWorkingSetSizeEx.Addr(), uintptr(hProcess), uintptr(unsafe.Pointer(lpMinimumWorkingSetSize)), uintptr(unsafe.Pointer(lpMaximumWorkingSetSize)), uintptr(unsafe.Pointer(flags))) return } func GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uintptr, overlapped **Overlapped, timeout uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout), 0) + r1, _, e1 := syscall.SyscallN(procGetQueuedCompletionStatus.Addr(), uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout)) if r1 == 0 { err = errnoErr(e1) } @@ -2460,7 +2622,7 @@ func GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uintptr, overl } func GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetShortPathNameW.Addr(), 3, uintptr(unsafe.Pointer(longpath)), uintptr(unsafe.Pointer(shortpath)), uintptr(buflen)) + r0, _, e1 := syscall.SyscallN(procGetShortPathNameW.Addr(), uintptr(unsafe.Pointer(longpath)), uintptr(unsafe.Pointer(shortpath)), uintptr(buflen)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2469,12 +2631,12 @@ func GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uin } func getStartupInfo(startupInfo *StartupInfo) { - syscall.Syscall(procGetStartupInfoW.Addr(), 1, uintptr(unsafe.Pointer(startupInfo)), 0, 0) + syscall.SyscallN(procGetStartupInfoW.Addr(), uintptr(unsafe.Pointer(startupInfo))) return } func GetStdHandle(stdhandle uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procGetStdHandle.Addr(), 1, uintptr(stdhandle), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetStdHandle.Addr(), uintptr(stdhandle)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -2483,7 +2645,7 @@ func GetStdHandle(stdhandle uint32) (handle Handle, err error) { } func getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetSystemDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) + r0, _, e1 := syscall.SyscallN(procGetSystemDirectoryW.Addr(), uintptr(unsafe.Pointer(dir)), uintptr(dirLen)) len = uint32(r0) if len == 0 { err = errnoErr(e1) @@ -2492,7 +2654,7 @@ func getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { } func getSystemPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetSystemPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetSystemPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize))) if r1 == 0 { err = errnoErr(e1) } @@ -2500,17 +2662,17 @@ func getSystemPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint } func GetSystemTimeAsFileTime(time *Filetime) { - syscall.Syscall(procGetSystemTimeAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0) + syscall.SyscallN(procGetSystemTimeAsFileTime.Addr(), uintptr(unsafe.Pointer(time))) return } func GetSystemTimePreciseAsFileTime(time *Filetime) { - syscall.Syscall(procGetSystemTimePreciseAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0) + syscall.SyscallN(procGetSystemTimePreciseAsFileTime.Addr(), uintptr(unsafe.Pointer(time))) return } func getSystemWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetSystemWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) + r0, _, e1 := syscall.SyscallN(procGetSystemWindowsDirectoryW.Addr(), uintptr(unsafe.Pointer(dir)), uintptr(dirLen)) len = uint32(r0) if len == 0 { err = errnoErr(e1) @@ -2519,7 +2681,7 @@ func getSystemWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err erro } func GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetTempPathW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0) + r0, _, e1 := syscall.SyscallN(procGetTempPathW.Addr(), uintptr(buflen), uintptr(unsafe.Pointer(buf))) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2528,7 +2690,7 @@ func GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) { } func getThreadPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetThreadPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetThreadPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize))) if r1 == 0 { err = errnoErr(e1) } @@ -2536,13 +2698,13 @@ func getThreadPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint } func getTickCount64() (ms uint64) { - r0, _, _ := syscall.Syscall(procGetTickCount64.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetTickCount64.Addr()) ms = uint64(r0) return } func GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetTimeZoneInformation.Addr(), 1, uintptr(unsafe.Pointer(tzi)), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetTimeZoneInformation.Addr(), uintptr(unsafe.Pointer(tzi))) rc = uint32(r0) if rc == 0xffffffff { err = errnoErr(e1) @@ -2551,7 +2713,7 @@ func GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) { } func getUserPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetUserPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetUserPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize))) if r1 == 0 { err = errnoErr(e1) } @@ -2559,7 +2721,7 @@ func getUserPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16 } func GetVersion() (ver uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetVersion.Addr(), 0, 0, 0, 0) + r0, _, e1 := syscall.SyscallN(procGetVersion.Addr()) ver = uint32(r0) if ver == 0 { err = errnoErr(e1) @@ -2568,7 +2730,7 @@ func GetVersion() (ver uint32, err error) { } func GetVolumeInformationByHandle(file Handle, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procGetVolumeInformationByHandleW.Addr(), 8, uintptr(file), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0) + r1, _, e1 := syscall.SyscallN(procGetVolumeInformationByHandleW.Addr(), uintptr(file), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize)) if r1 == 0 { err = errnoErr(e1) } @@ -2576,7 +2738,7 @@ func GetVolumeInformationByHandle(file Handle, volumeNameBuffer *uint16, volumeN } func GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procGetVolumeInformationW.Addr(), 8, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0) + r1, _, e1 := syscall.SyscallN(procGetVolumeInformationW.Addr(), uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize)) if r1 == 0 { err = errnoErr(e1) } @@ -2584,7 +2746,7 @@ func GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volume } func GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16, bufferlength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetVolumeNameForVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferlength)) + r1, _, e1 := syscall.SyscallN(procGetVolumeNameForVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferlength)) if r1 == 0 { err = errnoErr(e1) } @@ -2592,7 +2754,7 @@ func GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint } func GetVolumePathName(fileName *uint16, volumePathName *uint16, bufferLength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetVolumePathNameW.Addr(), 3, uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(volumePathName)), uintptr(bufferLength)) + r1, _, e1 := syscall.SyscallN(procGetVolumePathNameW.Addr(), uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(volumePathName)), uintptr(bufferLength)) if r1 == 0 { err = errnoErr(e1) } @@ -2600,7 +2762,7 @@ func GetVolumePathName(fileName *uint16, volumePathName *uint16, bufferLength ui } func GetVolumePathNamesForVolumeName(volumeName *uint16, volumePathNames *uint16, bufferLength uint32, returnLength *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetVolumePathNamesForVolumeNameW.Addr(), 4, uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(volumePathNames)), uintptr(bufferLength), uintptr(unsafe.Pointer(returnLength)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetVolumePathNamesForVolumeNameW.Addr(), uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(volumePathNames)), uintptr(bufferLength), uintptr(unsafe.Pointer(returnLength))) if r1 == 0 { err = errnoErr(e1) } @@ -2608,7 +2770,7 @@ func GetVolumePathNamesForVolumeName(volumeName *uint16, volumePathNames *uint16 } func getWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) + r0, _, e1 := syscall.SyscallN(procGetWindowsDirectoryW.Addr(), uintptr(unsafe.Pointer(dir)), uintptr(dirLen)) len = uint32(r0) if len == 0 { err = errnoErr(e1) @@ -2617,7 +2779,7 @@ func getWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { } func initializeProcThreadAttributeList(attrlist *ProcThreadAttributeList, attrcount uint32, flags uint32, size *uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procInitializeProcThreadAttributeList.Addr(), 4, uintptr(unsafe.Pointer(attrlist)), uintptr(attrcount), uintptr(flags), uintptr(unsafe.Pointer(size)), 0, 0) + r1, _, e1 := syscall.SyscallN(procInitializeProcThreadAttributeList.Addr(), uintptr(unsafe.Pointer(attrlist)), uintptr(attrcount), uintptr(flags), uintptr(unsafe.Pointer(size))) if r1 == 0 { err = errnoErr(e1) } @@ -2629,7 +2791,7 @@ func IsWow64Process(handle Handle, isWow64 *bool) (err error) { if *isWow64 { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procIsWow64Process.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(&_p0)), 0) + r1, _, e1 := syscall.SyscallN(procIsWow64Process.Addr(), uintptr(handle), uintptr(unsafe.Pointer(&_p0))) *isWow64 = _p0 != 0 if r1 == 0 { err = errnoErr(e1) @@ -2642,7 +2804,7 @@ func IsWow64Process2(handle Handle, processMachine *uint16, nativeMachine *uint1 if err != nil { return } - r1, _, e1 := syscall.Syscall(procIsWow64Process2.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(processMachine)), uintptr(unsafe.Pointer(nativeMachine))) + r1, _, e1 := syscall.SyscallN(procIsWow64Process2.Addr(), uintptr(handle), uintptr(unsafe.Pointer(processMachine)), uintptr(unsafe.Pointer(nativeMachine))) if r1 == 0 { err = errnoErr(e1) } @@ -2659,7 +2821,7 @@ func LoadLibraryEx(libname string, zero Handle, flags uintptr) (handle Handle, e } func _LoadLibraryEx(libname *uint16, zero Handle, flags uintptr) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procLoadLibraryExW.Addr(), 3, uintptr(unsafe.Pointer(libname)), uintptr(zero), uintptr(flags)) + r0, _, e1 := syscall.SyscallN(procLoadLibraryExW.Addr(), uintptr(unsafe.Pointer(libname)), uintptr(zero), uintptr(flags)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2677,7 +2839,7 @@ func LoadLibrary(libname string) (handle Handle, err error) { } func _LoadLibrary(libname *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procLoadLibraryW.Addr(), 1, uintptr(unsafe.Pointer(libname)), 0, 0) + r0, _, e1 := syscall.SyscallN(procLoadLibraryW.Addr(), uintptr(unsafe.Pointer(libname))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2686,7 +2848,7 @@ func _LoadLibrary(libname *uint16) (handle Handle, err error) { } func LoadResource(module Handle, resInfo Handle) (resData Handle, err error) { - r0, _, e1 := syscall.Syscall(procLoadResource.Addr(), 2, uintptr(module), uintptr(resInfo), 0) + r0, _, e1 := syscall.SyscallN(procLoadResource.Addr(), uintptr(module), uintptr(resInfo)) resData = Handle(r0) if resData == 0 { err = errnoErr(e1) @@ -2695,7 +2857,7 @@ func LoadResource(module Handle, resInfo Handle) (resData Handle, err error) { } func LocalAlloc(flags uint32, length uint32) (ptr uintptr, err error) { - r0, _, e1 := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(flags), uintptr(length), 0) + r0, _, e1 := syscall.SyscallN(procLocalAlloc.Addr(), uintptr(flags), uintptr(length)) ptr = uintptr(r0) if ptr == 0 { err = errnoErr(e1) @@ -2704,7 +2866,7 @@ func LocalAlloc(flags uint32, length uint32) (ptr uintptr, err error) { } func LocalFree(hmem Handle) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procLocalFree.Addr(), 1, uintptr(hmem), 0, 0) + r0, _, e1 := syscall.SyscallN(procLocalFree.Addr(), uintptr(hmem)) handle = Handle(r0) if handle != 0 { err = errnoErr(e1) @@ -2713,7 +2875,7 @@ func LocalFree(hmem Handle) (handle Handle, err error) { } func LockFileEx(file Handle, flags uint32, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(file), uintptr(flags), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped))) + r1, _, e1 := syscall.SyscallN(procLockFileEx.Addr(), uintptr(file), uintptr(flags), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -2721,7 +2883,7 @@ func LockFileEx(file Handle, flags uint32, reserved uint32, bytesLow uint32, byt } func LockResource(resData Handle) (addr uintptr, err error) { - r0, _, e1 := syscall.Syscall(procLockResource.Addr(), 1, uintptr(resData), 0, 0) + r0, _, e1 := syscall.SyscallN(procLockResource.Addr(), uintptr(resData)) addr = uintptr(r0) if addr == 0 { err = errnoErr(e1) @@ -2730,7 +2892,7 @@ func LockResource(resData Handle) (addr uintptr, err error) { } func MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow uint32, length uintptr) (addr uintptr, err error) { - r0, _, e1 := syscall.Syscall6(procMapViewOfFile.Addr(), 5, uintptr(handle), uintptr(access), uintptr(offsetHigh), uintptr(offsetLow), uintptr(length), 0) + r0, _, e1 := syscall.SyscallN(procMapViewOfFile.Addr(), uintptr(handle), uintptr(access), uintptr(offsetHigh), uintptr(offsetLow), uintptr(length)) addr = uintptr(r0) if addr == 0 { err = errnoErr(e1) @@ -2739,7 +2901,7 @@ func MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow ui } func Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procModule32FirstW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry)), 0) + r1, _, e1 := syscall.SyscallN(procModule32FirstW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -2747,7 +2909,7 @@ func Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) { } func Module32Next(snapshot Handle, moduleEntry *ModuleEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procModule32NextW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry)), 0) + r1, _, e1 := syscall.SyscallN(procModule32NextW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -2755,7 +2917,7 @@ func Module32Next(snapshot Handle, moduleEntry *ModuleEntry32) (err error) { } func MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags)) + r1, _, e1 := syscall.SyscallN(procMoveFileExW.Addr(), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -2763,7 +2925,7 @@ func MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) { } func MoveFile(from *uint16, to *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procMoveFileW.Addr(), 2, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), 0) + r1, _, e1 := syscall.SyscallN(procMoveFileW.Addr(), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to))) if r1 == 0 { err = errnoErr(e1) } @@ -2771,7 +2933,7 @@ func MoveFile(from *uint16, to *uint16) (err error) { } func MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) { - r0, _, e1 := syscall.Syscall6(procMultiByteToWideChar.Addr(), 6, uintptr(codePage), uintptr(dwFlags), uintptr(unsafe.Pointer(str)), uintptr(nstr), uintptr(unsafe.Pointer(wchar)), uintptr(nwchar)) + r0, _, e1 := syscall.SyscallN(procMultiByteToWideChar.Addr(), uintptr(codePage), uintptr(dwFlags), uintptr(unsafe.Pointer(str)), uintptr(nstr), uintptr(unsafe.Pointer(wchar)), uintptr(nwchar)) nwrite = int32(r0) if nwrite == 0 { err = errnoErr(e1) @@ -2784,7 +2946,7 @@ func OpenEvent(desiredAccess uint32, inheritHandle bool, name *uint16) (handle H if inheritHandle { _p0 = 1 } - r0, _, e1 := syscall.Syscall(procOpenEventW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) + r0, _, e1 := syscall.SyscallN(procOpenEventW.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2797,7 +2959,7 @@ func OpenMutex(desiredAccess uint32, inheritHandle bool, name *uint16) (handle H if inheritHandle { _p0 = 1 } - r0, _, e1 := syscall.Syscall(procOpenMutexW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) + r0, _, e1 := syscall.SyscallN(procOpenMutexW.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2810,7 +2972,7 @@ func OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (ha if inheritHandle { _p0 = 1 } - r0, _, e1 := syscall.Syscall(procOpenProcess.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(processId)) + r0, _, e1 := syscall.SyscallN(procOpenProcess.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(processId)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2823,7 +2985,7 @@ func OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (hand if inheritHandle { _p0 = 1 } - r0, _, e1 := syscall.Syscall(procOpenThread.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(threadId)) + r0, _, e1 := syscall.SyscallN(procOpenThread.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(threadId)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2832,7 +2994,7 @@ func OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (hand } func PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uintptr, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall6(procPostQueuedCompletionStatus.Addr(), 4, uintptr(cphandle), uintptr(qty), uintptr(key), uintptr(unsafe.Pointer(overlapped)), 0, 0) + r1, _, e1 := syscall.SyscallN(procPostQueuedCompletionStatus.Addr(), uintptr(cphandle), uintptr(qty), uintptr(key), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -2840,7 +3002,7 @@ func PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uintptr, overla } func Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procProcess32FirstW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0) + r1, _, e1 := syscall.SyscallN(procProcess32FirstW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(procEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -2848,7 +3010,7 @@ func Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) { } func Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procProcess32NextW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0) + r1, _, e1 := syscall.SyscallN(procProcess32NextW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(procEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -2856,7 +3018,7 @@ func Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) { } func ProcessIdToSessionId(pid uint32, sessionid *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procProcessIdToSessionId.Addr(), 2, uintptr(pid), uintptr(unsafe.Pointer(sessionid)), 0) + r1, _, e1 := syscall.SyscallN(procProcessIdToSessionId.Addr(), uintptr(pid), uintptr(unsafe.Pointer(sessionid))) if r1 == 0 { err = errnoErr(e1) } @@ -2864,7 +3026,7 @@ func ProcessIdToSessionId(pid uint32, sessionid *uint32) (err error) { } func PulseEvent(event Handle) (err error) { - r1, _, e1 := syscall.Syscall(procPulseEvent.Addr(), 1, uintptr(event), 0, 0) + r1, _, e1 := syscall.SyscallN(procPulseEvent.Addr(), uintptr(event)) if r1 == 0 { err = errnoErr(e1) } @@ -2872,7 +3034,7 @@ func PulseEvent(event Handle) (err error) { } func PurgeComm(handle Handle, dwFlags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procPurgeComm.Addr(), 2, uintptr(handle), uintptr(dwFlags), 0) + r1, _, e1 := syscall.SyscallN(procPurgeComm.Addr(), uintptr(handle), uintptr(dwFlags)) if r1 == 0 { err = errnoErr(e1) } @@ -2880,7 +3042,7 @@ func PurgeComm(handle Handle, dwFlags uint32) (err error) { } func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procQueryDosDeviceW.Addr(), 3, uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max)) + r0, _, e1 := syscall.SyscallN(procQueryDosDeviceW.Addr(), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2889,7 +3051,7 @@ func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint3 } func QueryFullProcessImageName(proc Handle, flags uint32, exeName *uint16, size *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryFullProcessImageNameW.Addr(), 4, uintptr(proc), uintptr(flags), uintptr(unsafe.Pointer(exeName)), uintptr(unsafe.Pointer(size)), 0, 0) + r1, _, e1 := syscall.SyscallN(procQueryFullProcessImageNameW.Addr(), uintptr(proc), uintptr(flags), uintptr(unsafe.Pointer(exeName)), uintptr(unsafe.Pointer(size))) if r1 == 0 { err = errnoErr(e1) } @@ -2897,7 +3059,7 @@ func QueryFullProcessImageName(proc Handle, flags uint32, exeName *uint16, size } func QueryInformationJobObject(job Handle, JobObjectInformationClass int32, JobObjectInformation uintptr, JobObjectInformationLength uint32, retlen *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryInformationJobObject.Addr(), 5, uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), uintptr(unsafe.Pointer(retlen)), 0) + r1, _, e1 := syscall.SyscallN(procQueryInformationJobObject.Addr(), uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), uintptr(unsafe.Pointer(retlen))) if r1 == 0 { err = errnoErr(e1) } @@ -2905,7 +3067,7 @@ func QueryInformationJobObject(job Handle, JobObjectInformationClass int32, JobO } func ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) { - r1, _, e1 := syscall.Syscall6(procReadConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(toread), uintptr(unsafe.Pointer(read)), uintptr(unsafe.Pointer(inputControl)), 0) + r1, _, e1 := syscall.SyscallN(procReadConsoleW.Addr(), uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(toread), uintptr(unsafe.Pointer(read)), uintptr(unsafe.Pointer(inputControl))) if r1 == 0 { err = errnoErr(e1) } @@ -2917,7 +3079,7 @@ func ReadDirectoryChanges(handle Handle, buf *byte, buflen uint32, watchSubTree if watchSubTree { _p0 = 1 } - r1, _, e1 := syscall.Syscall9(procReadDirectoryChangesW.Addr(), 8, uintptr(handle), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(_p0), uintptr(mask), uintptr(unsafe.Pointer(retlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine), 0) + r1, _, e1 := syscall.SyscallN(procReadDirectoryChangesW.Addr(), uintptr(handle), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(_p0), uintptr(mask), uintptr(unsafe.Pointer(retlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine)) if r1 == 0 { err = errnoErr(e1) } @@ -2929,7 +3091,7 @@ func readFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) ( if len(buf) > 0 { _p0 = &buf[0] } - r1, _, e1 := syscall.Syscall6(procReadFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procReadFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -2937,7 +3099,7 @@ func readFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) ( } func ReadProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size uintptr, numberOfBytesRead *uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procReadProcessMemory.Addr(), 5, uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesRead)), 0) + r1, _, e1 := syscall.SyscallN(procReadProcessMemory.Addr(), uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesRead))) if r1 == 0 { err = errnoErr(e1) } @@ -2945,7 +3107,7 @@ func ReadProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size u } func ReleaseMutex(mutex Handle) (err error) { - r1, _, e1 := syscall.Syscall(procReleaseMutex.Addr(), 1, uintptr(mutex), 0, 0) + r1, _, e1 := syscall.SyscallN(procReleaseMutex.Addr(), uintptr(mutex)) if r1 == 0 { err = errnoErr(e1) } @@ -2953,7 +3115,7 @@ func ReleaseMutex(mutex Handle) (err error) { } func RemoveDirectory(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procRemoveDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + r1, _, e1 := syscall.SyscallN(procRemoveDirectoryW.Addr(), uintptr(unsafe.Pointer(path))) if r1 == 0 { err = errnoErr(e1) } @@ -2961,7 +3123,7 @@ func RemoveDirectory(path *uint16) (err error) { } func RemoveDllDirectory(cookie uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procRemoveDllDirectory.Addr(), 1, uintptr(cookie), 0, 0) + r1, _, e1 := syscall.SyscallN(procRemoveDllDirectory.Addr(), uintptr(cookie)) if r1 == 0 { err = errnoErr(e1) } @@ -2969,7 +3131,7 @@ func RemoveDllDirectory(cookie uintptr) (err error) { } func ResetEvent(event Handle) (err error) { - r1, _, e1 := syscall.Syscall(procResetEvent.Addr(), 1, uintptr(event), 0, 0) + r1, _, e1 := syscall.SyscallN(procResetEvent.Addr(), uintptr(event)) if r1 == 0 { err = errnoErr(e1) } @@ -2977,7 +3139,7 @@ func ResetEvent(event Handle) (err error) { } func resizePseudoConsole(pconsole Handle, size uint32) (hr error) { - r0, _, _ := syscall.Syscall(procResizePseudoConsole.Addr(), 2, uintptr(pconsole), uintptr(size), 0) + r0, _, _ := syscall.SyscallN(procResizePseudoConsole.Addr(), uintptr(pconsole), uintptr(size)) if r0 != 0 { hr = syscall.Errno(r0) } @@ -2985,7 +3147,7 @@ func resizePseudoConsole(pconsole Handle, size uint32) (hr error) { } func ResumeThread(thread Handle) (ret uint32, err error) { - r0, _, e1 := syscall.Syscall(procResumeThread.Addr(), 1, uintptr(thread), 0, 0) + r0, _, e1 := syscall.SyscallN(procResumeThread.Addr(), uintptr(thread)) ret = uint32(r0) if ret == 0xffffffff { err = errnoErr(e1) @@ -2994,7 +3156,7 @@ func ResumeThread(thread Handle) (ret uint32, err error) { } func SetCommBreak(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetCommBreak.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetCommBreak.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -3002,7 +3164,7 @@ func SetCommBreak(handle Handle) (err error) { } func SetCommMask(handle Handle, dwEvtMask uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetCommMask.Addr(), 2, uintptr(handle), uintptr(dwEvtMask), 0) + r1, _, e1 := syscall.SyscallN(procSetCommMask.Addr(), uintptr(handle), uintptr(dwEvtMask)) if r1 == 0 { err = errnoErr(e1) } @@ -3010,7 +3172,7 @@ func SetCommMask(handle Handle, dwEvtMask uint32) (err error) { } func SetCommState(handle Handle, lpDCB *DCB) (err error) { - r1, _, e1 := syscall.Syscall(procSetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0) + r1, _, e1 := syscall.SyscallN(procSetCommState.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpDCB))) if r1 == 0 { err = errnoErr(e1) } @@ -3018,7 +3180,15 @@ func SetCommState(handle Handle, lpDCB *DCB) (err error) { } func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { - r1, _, e1 := syscall.Syscall(procSetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0) + r1, _, e1 := syscall.SyscallN(procSetCommTimeouts.Addr(), uintptr(handle), uintptr(unsafe.Pointer(timeouts))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetConsoleCP(cp uint32) (err error) { + r1, _, e1 := syscall.SyscallN(procSetConsoleCP.Addr(), uintptr(cp)) if r1 == 0 { err = errnoErr(e1) } @@ -3026,7 +3196,7 @@ func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { } func setConsoleCursorPosition(console Handle, position uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetConsoleCursorPosition.Addr(), 2, uintptr(console), uintptr(position), 0) + r1, _, e1 := syscall.SyscallN(procSetConsoleCursorPosition.Addr(), uintptr(console), uintptr(position)) if r1 == 0 { err = errnoErr(e1) } @@ -3034,7 +3204,15 @@ func setConsoleCursorPosition(console Handle, position uint32) (err error) { } func SetConsoleMode(console Handle, mode uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(console), uintptr(mode), 0) + r1, _, e1 := syscall.SyscallN(procSetConsoleMode.Addr(), uintptr(console), uintptr(mode)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetConsoleOutputCP(cp uint32) (err error) { + r1, _, e1 := syscall.SyscallN(procSetConsoleOutputCP.Addr(), uintptr(cp)) if r1 == 0 { err = errnoErr(e1) } @@ -3042,7 +3220,7 @@ func SetConsoleMode(console Handle, mode uint32) (err error) { } func SetCurrentDirectory(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetCurrentDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetCurrentDirectoryW.Addr(), uintptr(unsafe.Pointer(path))) if r1 == 0 { err = errnoErr(e1) } @@ -3050,7 +3228,7 @@ func SetCurrentDirectory(path *uint16) (err error) { } func SetDefaultDllDirectories(directoryFlags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetDefaultDllDirectories.Addr(), 1, uintptr(directoryFlags), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetDefaultDllDirectories.Addr(), uintptr(directoryFlags)) if r1 == 0 { err = errnoErr(e1) } @@ -3067,7 +3245,7 @@ func SetDllDirectory(path string) (err error) { } func _SetDllDirectory(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetDllDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetDllDirectoryW.Addr(), uintptr(unsafe.Pointer(path))) if r1 == 0 { err = errnoErr(e1) } @@ -3075,7 +3253,7 @@ func _SetDllDirectory(path *uint16) (err error) { } func SetEndOfFile(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetEndOfFile.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetEndOfFile.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -3083,7 +3261,7 @@ func SetEndOfFile(handle Handle) (err error) { } func SetEnvironmentVariable(name *uint16, value *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetEnvironmentVariableW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), 0) + r1, _, e1 := syscall.SyscallN(procSetEnvironmentVariableW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value))) if r1 == 0 { err = errnoErr(e1) } @@ -3091,13 +3269,13 @@ func SetEnvironmentVariable(name *uint16, value *uint16) (err error) { } func SetErrorMode(mode uint32) (ret uint32) { - r0, _, _ := syscall.Syscall(procSetErrorMode.Addr(), 1, uintptr(mode), 0, 0) + r0, _, _ := syscall.SyscallN(procSetErrorMode.Addr(), uintptr(mode)) ret = uint32(r0) return } func SetEvent(event Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetEvent.Addr(), 1, uintptr(event), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetEvent.Addr(), uintptr(event)) if r1 == 0 { err = errnoErr(e1) } @@ -3105,7 +3283,7 @@ func SetEvent(event Handle) (err error) { } func SetFileAttributes(name *uint16, attrs uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileAttributesW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(attrs), 0) + r1, _, e1 := syscall.SyscallN(procSetFileAttributesW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(attrs)) if r1 == 0 { err = errnoErr(e1) } @@ -3113,7 +3291,7 @@ func SetFileAttributes(name *uint16, attrs uint32) (err error) { } func SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(handle), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procSetFileCompletionNotificationModes.Addr(), uintptr(handle), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -3121,7 +3299,7 @@ func SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) } func SetFileInformationByHandle(handle Handle, class uint32, inBuffer *byte, inBufferLen uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetFileInformationByHandle.Addr(), 4, uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetFileInformationByHandle.Addr(), uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen)) if r1 == 0 { err = errnoErr(e1) } @@ -3129,7 +3307,7 @@ func SetFileInformationByHandle(handle Handle, class uint32, inBuffer *byte, inB } func SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence uint32) (newlowoffset uint32, err error) { - r0, _, e1 := syscall.Syscall6(procSetFilePointer.Addr(), 4, uintptr(handle), uintptr(lowoffset), uintptr(unsafe.Pointer(highoffsetptr)), uintptr(whence), 0, 0) + r0, _, e1 := syscall.SyscallN(procSetFilePointer.Addr(), uintptr(handle), uintptr(lowoffset), uintptr(unsafe.Pointer(highoffsetptr)), uintptr(whence)) newlowoffset = uint32(r0) if newlowoffset == 0xffffffff { err = errnoErr(e1) @@ -3138,7 +3316,7 @@ func SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence } func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) { - r1, _, e1 := syscall.Syscall6(procSetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetFileTime.Addr(), uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime))) if r1 == 0 { err = errnoErr(e1) } @@ -3146,7 +3324,7 @@ func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetim } func SetFileValidData(handle Handle, validDataLength int64) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileValidData.Addr(), 2, uintptr(handle), uintptr(validDataLength), 0) + r1, _, e1 := syscall.SyscallN(procSetFileValidData.Addr(), uintptr(handle), uintptr(validDataLength)) if r1 == 0 { err = errnoErr(e1) } @@ -3154,7 +3332,7 @@ func SetFileValidData(handle Handle, validDataLength int64) (err error) { } func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetHandleInformation.Addr(), 3, uintptr(handle), uintptr(mask), uintptr(flags)) + r1, _, e1 := syscall.SyscallN(procSetHandleInformation.Addr(), uintptr(handle), uintptr(mask), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -3162,7 +3340,7 @@ func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) } func SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobObjectInformation uintptr, JobObjectInformationLength uint32) (ret int, err error) { - r0, _, e1 := syscall.Syscall6(procSetInformationJobObject.Addr(), 4, uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), 0, 0) + r0, _, e1 := syscall.SyscallN(procSetInformationJobObject.Addr(), uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength)) ret = int(r0) if ret == 0 { err = errnoErr(e1) @@ -3171,7 +3349,7 @@ func SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobOb } func SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetNamedPipeHandleState.Addr(), 4, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetNamedPipeHandleState.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout))) if r1 == 0 { err = errnoErr(e1) } @@ -3179,7 +3357,7 @@ func SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uin } func SetPriorityClass(process Handle, priorityClass uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetPriorityClass.Addr(), 2, uintptr(process), uintptr(priorityClass), 0) + r1, _, e1 := syscall.SyscallN(procSetPriorityClass.Addr(), uintptr(process), uintptr(priorityClass)) if r1 == 0 { err = errnoErr(e1) } @@ -3191,7 +3369,7 @@ func SetProcessPriorityBoost(process Handle, disable bool) (err error) { if disable { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procSetProcessPriorityBoost.Addr(), 2, uintptr(process), uintptr(_p0), 0) + r1, _, e1 := syscall.SyscallN(procSetProcessPriorityBoost.Addr(), uintptr(process), uintptr(_p0)) if r1 == 0 { err = errnoErr(e1) } @@ -3199,7 +3377,7 @@ func SetProcessPriorityBoost(process Handle, disable bool) (err error) { } func SetProcessShutdownParameters(level uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetProcessShutdownParameters.Addr(), 2, uintptr(level), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procSetProcessShutdownParameters.Addr(), uintptr(level), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -3207,7 +3385,7 @@ func SetProcessShutdownParameters(level uint32, flags uint32) (err error) { } func SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetProcessWorkingSetSizeEx.Addr(), 4, uintptr(hProcess), uintptr(dwMinimumWorkingSetSize), uintptr(dwMaximumWorkingSetSize), uintptr(flags), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetProcessWorkingSetSizeEx.Addr(), uintptr(hProcess), uintptr(dwMinimumWorkingSetSize), uintptr(dwMaximumWorkingSetSize), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -3215,7 +3393,7 @@ func SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr } func SetStdHandle(stdhandle uint32, handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetStdHandle.Addr(), 2, uintptr(stdhandle), uintptr(handle), 0) + r1, _, e1 := syscall.SyscallN(procSetStdHandle.Addr(), uintptr(stdhandle), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -3223,7 +3401,7 @@ func SetStdHandle(stdhandle uint32, handle Handle) (err error) { } func SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetVolumeLabelW.Addr(), 2, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeName)), 0) + r1, _, e1 := syscall.SyscallN(procSetVolumeLabelW.Addr(), uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeName))) if r1 == 0 { err = errnoErr(e1) } @@ -3231,7 +3409,7 @@ func SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) { } func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetVolumeMountPointW.Addr(), 2, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), 0) + r1, _, e1 := syscall.SyscallN(procSetVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName))) if r1 == 0 { err = errnoErr(e1) } @@ -3239,7 +3417,7 @@ func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err erro } func SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetupComm.Addr(), 3, uintptr(handle), uintptr(dwInQueue), uintptr(dwOutQueue)) + r1, _, e1 := syscall.SyscallN(procSetupComm.Addr(), uintptr(handle), uintptr(dwInQueue), uintptr(dwOutQueue)) if r1 == 0 { err = errnoErr(e1) } @@ -3247,7 +3425,7 @@ func SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) { } func SizeofResource(module Handle, resInfo Handle) (size uint32, err error) { - r0, _, e1 := syscall.Syscall(procSizeofResource.Addr(), 2, uintptr(module), uintptr(resInfo), 0) + r0, _, e1 := syscall.SyscallN(procSizeofResource.Addr(), uintptr(module), uintptr(resInfo)) size = uint32(r0) if size == 0 { err = errnoErr(e1) @@ -3260,13 +3438,13 @@ func SleepEx(milliseconds uint32, alertable bool) (ret uint32) { if alertable { _p0 = 1 } - r0, _, _ := syscall.Syscall(procSleepEx.Addr(), 2, uintptr(milliseconds), uintptr(_p0), 0) + r0, _, _ := syscall.SyscallN(procSleepEx.Addr(), uintptr(milliseconds), uintptr(_p0)) ret = uint32(r0) return } func TerminateJobObject(job Handle, exitCode uint32) (err error) { - r1, _, e1 := syscall.Syscall(procTerminateJobObject.Addr(), 2, uintptr(job), uintptr(exitCode), 0) + r1, _, e1 := syscall.SyscallN(procTerminateJobObject.Addr(), uintptr(job), uintptr(exitCode)) if r1 == 0 { err = errnoErr(e1) } @@ -3274,7 +3452,7 @@ func TerminateJobObject(job Handle, exitCode uint32) (err error) { } func TerminateProcess(handle Handle, exitcode uint32) (err error) { - r1, _, e1 := syscall.Syscall(procTerminateProcess.Addr(), 2, uintptr(handle), uintptr(exitcode), 0) + r1, _, e1 := syscall.SyscallN(procTerminateProcess.Addr(), uintptr(handle), uintptr(exitcode)) if r1 == 0 { err = errnoErr(e1) } @@ -3282,7 +3460,7 @@ func TerminateProcess(handle Handle, exitcode uint32) (err error) { } func Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procThread32First.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0) + r1, _, e1 := syscall.SyscallN(procThread32First.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -3290,7 +3468,7 @@ func Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) { } func Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procThread32Next.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0) + r1, _, e1 := syscall.SyscallN(procThread32Next.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -3298,7 +3476,7 @@ func Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error) { } func UnlockFileEx(file Handle, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall6(procUnlockFileEx.Addr(), 5, uintptr(file), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procUnlockFileEx.Addr(), uintptr(file), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -3306,7 +3484,7 @@ func UnlockFileEx(file Handle, reserved uint32, bytesLow uint32, bytesHigh uint3 } func UnmapViewOfFile(addr uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procUnmapViewOfFile.Addr(), 1, uintptr(addr), 0, 0) + r1, _, e1 := syscall.SyscallN(procUnmapViewOfFile.Addr(), uintptr(addr)) if r1 == 0 { err = errnoErr(e1) } @@ -3314,7 +3492,7 @@ func UnmapViewOfFile(addr uintptr) (err error) { } func updateProcThreadAttribute(attrlist *ProcThreadAttributeList, flags uint32, attr uintptr, value unsafe.Pointer, size uintptr, prevvalue unsafe.Pointer, returnedsize *uintptr) (err error) { - r1, _, e1 := syscall.Syscall9(procUpdateProcThreadAttribute.Addr(), 7, uintptr(unsafe.Pointer(attrlist)), uintptr(flags), uintptr(attr), uintptr(value), uintptr(size), uintptr(prevvalue), uintptr(unsafe.Pointer(returnedsize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procUpdateProcThreadAttribute.Addr(), uintptr(unsafe.Pointer(attrlist)), uintptr(flags), uintptr(attr), uintptr(value), uintptr(size), uintptr(prevvalue), uintptr(unsafe.Pointer(returnedsize))) if r1 == 0 { err = errnoErr(e1) } @@ -3322,7 +3500,7 @@ func updateProcThreadAttribute(attrlist *ProcThreadAttributeList, flags uint32, } func VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint32) (value uintptr, err error) { - r0, _, e1 := syscall.Syscall6(procVirtualAlloc.Addr(), 4, uintptr(address), uintptr(size), uintptr(alloctype), uintptr(protect), 0, 0) + r0, _, e1 := syscall.SyscallN(procVirtualAlloc.Addr(), uintptr(address), uintptr(size), uintptr(alloctype), uintptr(protect)) value = uintptr(r0) if value == 0 { err = errnoErr(e1) @@ -3331,7 +3509,7 @@ func VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint3 } func VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualFree.Addr(), 3, uintptr(address), uintptr(size), uintptr(freetype)) + r1, _, e1 := syscall.SyscallN(procVirtualFree.Addr(), uintptr(address), uintptr(size), uintptr(freetype)) if r1 == 0 { err = errnoErr(e1) } @@ -3339,7 +3517,7 @@ func VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) { } func VirtualLock(addr uintptr, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualLock.Addr(), 2, uintptr(addr), uintptr(length), 0) + r1, _, e1 := syscall.SyscallN(procVirtualLock.Addr(), uintptr(addr), uintptr(length)) if r1 == 0 { err = errnoErr(e1) } @@ -3347,7 +3525,7 @@ func VirtualLock(addr uintptr, length uintptr) (err error) { } func VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procVirtualProtect.Addr(), 4, uintptr(address), uintptr(size), uintptr(newprotect), uintptr(unsafe.Pointer(oldprotect)), 0, 0) + r1, _, e1 := syscall.SyscallN(procVirtualProtect.Addr(), uintptr(address), uintptr(size), uintptr(newprotect), uintptr(unsafe.Pointer(oldprotect))) if r1 == 0 { err = errnoErr(e1) } @@ -3355,7 +3533,7 @@ func VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect } func VirtualProtectEx(process Handle, address uintptr, size uintptr, newProtect uint32, oldProtect *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procVirtualProtectEx.Addr(), 5, uintptr(process), uintptr(address), uintptr(size), uintptr(newProtect), uintptr(unsafe.Pointer(oldProtect)), 0) + r1, _, e1 := syscall.SyscallN(procVirtualProtectEx.Addr(), uintptr(process), uintptr(address), uintptr(size), uintptr(newProtect), uintptr(unsafe.Pointer(oldProtect))) if r1 == 0 { err = errnoErr(e1) } @@ -3363,7 +3541,7 @@ func VirtualProtectEx(process Handle, address uintptr, size uintptr, newProtect } func VirtualQuery(address uintptr, buffer *MemoryBasicInformation, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualQuery.Addr(), 3, uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length)) + r1, _, e1 := syscall.SyscallN(procVirtualQuery.Addr(), uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length)) if r1 == 0 { err = errnoErr(e1) } @@ -3371,7 +3549,7 @@ func VirtualQuery(address uintptr, buffer *MemoryBasicInformation, length uintpt } func VirtualQueryEx(process Handle, address uintptr, buffer *MemoryBasicInformation, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procVirtualQueryEx.Addr(), 4, uintptr(process), uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length), 0, 0) + r1, _, e1 := syscall.SyscallN(procVirtualQueryEx.Addr(), uintptr(process), uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length)) if r1 == 0 { err = errnoErr(e1) } @@ -3379,7 +3557,7 @@ func VirtualQueryEx(process Handle, address uintptr, buffer *MemoryBasicInformat } func VirtualUnlock(addr uintptr, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualUnlock.Addr(), 2, uintptr(addr), uintptr(length), 0) + r1, _, e1 := syscall.SyscallN(procVirtualUnlock.Addr(), uintptr(addr), uintptr(length)) if r1 == 0 { err = errnoErr(e1) } @@ -3387,13 +3565,13 @@ func VirtualUnlock(addr uintptr, length uintptr) (err error) { } func WTSGetActiveConsoleSessionId() (sessionID uint32) { - r0, _, _ := syscall.Syscall(procWTSGetActiveConsoleSessionId.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procWTSGetActiveConsoleSessionId.Addr()) sessionID = uint32(r0) return } func WaitCommEvent(handle Handle, lpEvtMask *uint32, lpOverlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procWaitCommEvent.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpEvtMask)), uintptr(unsafe.Pointer(lpOverlapped))) + r1, _, e1 := syscall.SyscallN(procWaitCommEvent.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpEvtMask)), uintptr(unsafe.Pointer(lpOverlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -3405,7 +3583,7 @@ func waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMil if waitAll { _p0 = 1 } - r0, _, e1 := syscall.Syscall6(procWaitForMultipleObjects.Addr(), 4, uintptr(count), uintptr(handles), uintptr(_p0), uintptr(waitMilliseconds), 0, 0) + r0, _, e1 := syscall.SyscallN(procWaitForMultipleObjects.Addr(), uintptr(count), uintptr(handles), uintptr(_p0), uintptr(waitMilliseconds)) event = uint32(r0) if event == 0xffffffff { err = errnoErr(e1) @@ -3414,7 +3592,7 @@ func waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMil } func WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) { - r0, _, e1 := syscall.Syscall(procWaitForSingleObject.Addr(), 2, uintptr(handle), uintptr(waitMilliseconds), 0) + r0, _, e1 := syscall.SyscallN(procWaitForSingleObject.Addr(), uintptr(handle), uintptr(waitMilliseconds)) event = uint32(r0) if event == 0xffffffff { err = errnoErr(e1) @@ -3423,7 +3601,7 @@ func WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, } func WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) { - r1, _, e1 := syscall.Syscall6(procWriteConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(towrite), uintptr(unsafe.Pointer(written)), uintptr(unsafe.Pointer(reserved)), 0) + r1, _, e1 := syscall.SyscallN(procWriteConsoleW.Addr(), uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(towrite), uintptr(unsafe.Pointer(written)), uintptr(unsafe.Pointer(reserved))) if r1 == 0 { err = errnoErr(e1) } @@ -3435,7 +3613,7 @@ func writeFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) if len(buf) > 0 { _p0 = &buf[0] } - r1, _, e1 := syscall.Syscall6(procWriteFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procWriteFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -3443,7 +3621,7 @@ func writeFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) } func WriteProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size uintptr, numberOfBytesWritten *uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procWriteProcessMemory.Addr(), 5, uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesWritten)), 0) + r1, _, e1 := syscall.SyscallN(procWriteProcessMemory.Addr(), uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesWritten))) if r1 == 0 { err = errnoErr(e1) } @@ -3451,7 +3629,7 @@ func WriteProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size } func AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, recvd *uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall9(procAcceptEx.Addr(), 8, uintptr(ls), uintptr(as), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procAcceptEx.Addr(), uintptr(ls), uintptr(as), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -3459,12 +3637,12 @@ func AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32 } func GetAcceptExSockaddrs(buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, lrsa **RawSockaddrAny, lrsalen *int32, rrsa **RawSockaddrAny, rrsalen *int32) { - syscall.Syscall9(procGetAcceptExSockaddrs.Addr(), 8, uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(lrsa)), uintptr(unsafe.Pointer(lrsalen)), uintptr(unsafe.Pointer(rrsa)), uintptr(unsafe.Pointer(rrsalen)), 0) + syscall.SyscallN(procGetAcceptExSockaddrs.Addr(), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(lrsa)), uintptr(unsafe.Pointer(lrsalen)), uintptr(unsafe.Pointer(rrsa)), uintptr(unsafe.Pointer(rrsalen))) return } func TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint32, overlapped *Overlapped, transmitFileBuf *TransmitFileBuffers, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procTransmitFile.Addr(), 7, uintptr(s), uintptr(handle), uintptr(bytesToWrite), uintptr(bytsPerSend), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(transmitFileBuf)), uintptr(flags), 0, 0) + r1, _, e1 := syscall.SyscallN(procTransmitFile.Addr(), uintptr(s), uintptr(handle), uintptr(bytesToWrite), uintptr(bytsPerSend), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(transmitFileBuf)), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -3472,7 +3650,7 @@ func TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint } func NetApiBufferFree(buf *byte) (neterr error) { - r0, _, _ := syscall.Syscall(procNetApiBufferFree.Addr(), 1, uintptr(unsafe.Pointer(buf)), 0, 0) + r0, _, _ := syscall.SyscallN(procNetApiBufferFree.Addr(), uintptr(unsafe.Pointer(buf))) if r0 != 0 { neterr = syscall.Errno(r0) } @@ -3480,7 +3658,7 @@ func NetApiBufferFree(buf *byte) (neterr error) { } func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) { - r0, _, _ := syscall.Syscall(procNetGetJoinInformation.Addr(), 3, uintptr(unsafe.Pointer(server)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bufType))) + r0, _, _ := syscall.SyscallN(procNetGetJoinInformation.Addr(), uintptr(unsafe.Pointer(server)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bufType))) if r0 != 0 { neterr = syscall.Errno(r0) } @@ -3488,7 +3666,7 @@ func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (nete } func NetUserEnum(serverName *uint16, level uint32, filter uint32, buf **byte, prefMaxLen uint32, entriesRead *uint32, totalEntries *uint32, resumeHandle *uint32) (neterr error) { - r0, _, _ := syscall.Syscall9(procNetUserEnum.Addr(), 8, uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(filter), uintptr(unsafe.Pointer(buf)), uintptr(prefMaxLen), uintptr(unsafe.Pointer(entriesRead)), uintptr(unsafe.Pointer(totalEntries)), uintptr(unsafe.Pointer(resumeHandle)), 0) + r0, _, _ := syscall.SyscallN(procNetUserEnum.Addr(), uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(filter), uintptr(unsafe.Pointer(buf)), uintptr(prefMaxLen), uintptr(unsafe.Pointer(entriesRead)), uintptr(unsafe.Pointer(totalEntries)), uintptr(unsafe.Pointer(resumeHandle))) if r0 != 0 { neterr = syscall.Errno(r0) } @@ -3496,7 +3674,7 @@ func NetUserEnum(serverName *uint16, level uint32, filter uint32, buf **byte, pr } func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) { - r0, _, _ := syscall.Syscall6(procNetUserGetInfo.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf)), 0, 0) + r0, _, _ := syscall.SyscallN(procNetUserGetInfo.Addr(), uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf))) if r0 != 0 { neterr = syscall.Errno(r0) } @@ -3504,7 +3682,7 @@ func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **by } func NtCreateFile(handle *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, allocationSize *int64, attributes uint32, share uint32, disposition uint32, options uint32, eabuffer uintptr, ealength uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall12(procNtCreateFile.Addr(), 11, uintptr(unsafe.Pointer(handle)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(attributes), uintptr(share), uintptr(disposition), uintptr(options), uintptr(eabuffer), uintptr(ealength), 0) + r0, _, _ := syscall.SyscallN(procNtCreateFile.Addr(), uintptr(unsafe.Pointer(handle)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(attributes), uintptr(share), uintptr(disposition), uintptr(options), uintptr(eabuffer), uintptr(ealength)) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3512,7 +3690,7 @@ func NtCreateFile(handle *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO } func NtCreateNamedPipeFile(pipe *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (ntstatus error) { - r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0) + r0, _, _ := syscall.SyscallN(procNtCreateNamedPipeFile.Addr(), uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3520,7 +3698,7 @@ func NtCreateNamedPipeFile(pipe *Handle, access uint32, oa *OBJECT_ATTRIBUTES, i } func NtQueryInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32, retLen *uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procNtQueryInformationProcess.Addr(), 5, uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen), uintptr(unsafe.Pointer(retLen)), 0) + r0, _, _ := syscall.SyscallN(procNtQueryInformationProcess.Addr(), uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen), uintptr(unsafe.Pointer(retLen))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3528,7 +3706,7 @@ func NtQueryInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe } func NtQuerySystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoLen uint32, retLen *uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procNtQuerySystemInformation.Addr(), 4, uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen), uintptr(unsafe.Pointer(retLen)), 0, 0) + r0, _, _ := syscall.SyscallN(procNtQuerySystemInformation.Addr(), uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen), uintptr(unsafe.Pointer(retLen))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3536,7 +3714,7 @@ func NtQuerySystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInf } func NtSetInformationFile(handle Handle, iosb *IO_STATUS_BLOCK, inBuffer *byte, inBufferLen uint32, class uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procNtSetInformationFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen), uintptr(class), 0) + r0, _, _ := syscall.SyscallN(procNtSetInformationFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen), uintptr(class)) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3544,7 +3722,7 @@ func NtSetInformationFile(handle Handle, iosb *IO_STATUS_BLOCK, inBuffer *byte, } func NtSetInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procNtSetInformationProcess.Addr(), 4, uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen), 0, 0) + r0, _, _ := syscall.SyscallN(procNtSetInformationProcess.Addr(), uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen)) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3552,7 +3730,7 @@ func NtSetInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.P } func NtSetSystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoLen uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall(procNtSetSystemInformation.Addr(), 3, uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen)) + r0, _, _ := syscall.SyscallN(procNtSetSystemInformation.Addr(), uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen)) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3560,13 +3738,13 @@ func NtSetSystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoL } func RtlAddFunctionTable(functionTable *RUNTIME_FUNCTION, entryCount uint32, baseAddress uintptr) (ret bool) { - r0, _, _ := syscall.Syscall(procRtlAddFunctionTable.Addr(), 3, uintptr(unsafe.Pointer(functionTable)), uintptr(entryCount), uintptr(baseAddress)) + r0, _, _ := syscall.SyscallN(procRtlAddFunctionTable.Addr(), uintptr(unsafe.Pointer(functionTable)), uintptr(entryCount), uintptr(baseAddress)) ret = r0 != 0 return } func RtlDefaultNpAcl(acl **ACL) (ntstatus error) { - r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(acl)), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlDefaultNpAcl.Addr(), uintptr(unsafe.Pointer(acl))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3574,13 +3752,13 @@ func RtlDefaultNpAcl(acl **ACL) (ntstatus error) { } func RtlDeleteFunctionTable(functionTable *RUNTIME_FUNCTION) (ret bool) { - r0, _, _ := syscall.Syscall(procRtlDeleteFunctionTable.Addr(), 1, uintptr(unsafe.Pointer(functionTable)), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlDeleteFunctionTable.Addr(), uintptr(unsafe.Pointer(functionTable))) ret = r0 != 0 return } func RtlDosPathNameToNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U_WithStatus.Addr(), 4, uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName)), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlDosPathNameToNtPathName_U_WithStatus.Addr(), uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3588,7 +3766,7 @@ func RtlDosPathNameToNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFile } func RtlDosPathNameToRelativeNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procRtlDosPathNameToRelativeNtPathName_U_WithStatus.Addr(), 4, uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName)), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlDosPathNameToRelativeNtPathName_U_WithStatus.Addr(), uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3596,18 +3774,18 @@ func RtlDosPathNameToRelativeNtPathName(dosName *uint16, ntName *NTUnicodeString } func RtlGetCurrentPeb() (peb *PEB) { - r0, _, _ := syscall.Syscall(procRtlGetCurrentPeb.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procRtlGetCurrentPeb.Addr()) peb = (*PEB)(unsafe.Pointer(r0)) return } func rtlGetNtVersionNumbers(majorVersion *uint32, minorVersion *uint32, buildNumber *uint32) { - syscall.Syscall(procRtlGetNtVersionNumbers.Addr(), 3, uintptr(unsafe.Pointer(majorVersion)), uintptr(unsafe.Pointer(minorVersion)), uintptr(unsafe.Pointer(buildNumber))) + syscall.SyscallN(procRtlGetNtVersionNumbers.Addr(), uintptr(unsafe.Pointer(majorVersion)), uintptr(unsafe.Pointer(minorVersion)), uintptr(unsafe.Pointer(buildNumber))) return } func rtlGetVersion(info *OsVersionInfoEx) (ntstatus error) { - r0, _, _ := syscall.Syscall(procRtlGetVersion.Addr(), 1, uintptr(unsafe.Pointer(info)), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlGetVersion.Addr(), uintptr(unsafe.Pointer(info))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3615,23 +3793,23 @@ func rtlGetVersion(info *OsVersionInfoEx) (ntstatus error) { } func RtlInitString(destinationString *NTString, sourceString *byte) { - syscall.Syscall(procRtlInitString.Addr(), 2, uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString)), 0) + syscall.SyscallN(procRtlInitString.Addr(), uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString))) return } func RtlInitUnicodeString(destinationString *NTUnicodeString, sourceString *uint16) { - syscall.Syscall(procRtlInitUnicodeString.Addr(), 2, uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString)), 0) + syscall.SyscallN(procRtlInitUnicodeString.Addr(), uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString))) return } func rtlNtStatusToDosErrorNoTeb(ntstatus NTStatus) (ret syscall.Errno) { - r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(ntstatus), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlNtStatusToDosErrorNoTeb.Addr(), uintptr(ntstatus)) ret = syscall.Errno(r0) return } func clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) { - r0, _, _ := syscall.Syscall(procCLSIDFromString.Addr(), 2, uintptr(unsafe.Pointer(lpsz)), uintptr(unsafe.Pointer(pclsid)), 0) + r0, _, _ := syscall.SyscallN(procCLSIDFromString.Addr(), uintptr(unsafe.Pointer(lpsz)), uintptr(unsafe.Pointer(pclsid))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -3639,7 +3817,7 @@ func clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) { } func coCreateGuid(pguid *GUID) (ret error) { - r0, _, _ := syscall.Syscall(procCoCreateGuid.Addr(), 1, uintptr(unsafe.Pointer(pguid)), 0, 0) + r0, _, _ := syscall.SyscallN(procCoCreateGuid.Addr(), uintptr(unsafe.Pointer(pguid))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -3647,7 +3825,7 @@ func coCreateGuid(pguid *GUID) (ret error) { } func CoGetObject(name *uint16, bindOpts *BIND_OPTS3, guid *GUID, functionTable **uintptr) (ret error) { - r0, _, _ := syscall.Syscall6(procCoGetObject.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bindOpts)), uintptr(unsafe.Pointer(guid)), uintptr(unsafe.Pointer(functionTable)), 0, 0) + r0, _, _ := syscall.SyscallN(procCoGetObject.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bindOpts)), uintptr(unsafe.Pointer(guid)), uintptr(unsafe.Pointer(functionTable))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -3655,7 +3833,7 @@ func CoGetObject(name *uint16, bindOpts *BIND_OPTS3, guid *GUID, functionTable * } func CoInitializeEx(reserved uintptr, coInit uint32) (ret error) { - r0, _, _ := syscall.Syscall(procCoInitializeEx.Addr(), 2, uintptr(reserved), uintptr(coInit), 0) + r0, _, _ := syscall.SyscallN(procCoInitializeEx.Addr(), uintptr(reserved), uintptr(coInit)) if r0 != 0 { ret = syscall.Errno(r0) } @@ -3663,23 +3841,23 @@ func CoInitializeEx(reserved uintptr, coInit uint32) (ret error) { } func CoTaskMemFree(address unsafe.Pointer) { - syscall.Syscall(procCoTaskMemFree.Addr(), 1, uintptr(address), 0, 0) + syscall.SyscallN(procCoTaskMemFree.Addr(), uintptr(address)) return } func CoUninitialize() { - syscall.Syscall(procCoUninitialize.Addr(), 0, 0, 0, 0) + syscall.SyscallN(procCoUninitialize.Addr()) return } func stringFromGUID2(rguid *GUID, lpsz *uint16, cchMax int32) (chars int32) { - r0, _, _ := syscall.Syscall(procStringFromGUID2.Addr(), 3, uintptr(unsafe.Pointer(rguid)), uintptr(unsafe.Pointer(lpsz)), uintptr(cchMax)) + r0, _, _ := syscall.SyscallN(procStringFromGUID2.Addr(), uintptr(unsafe.Pointer(rguid)), uintptr(unsafe.Pointer(lpsz)), uintptr(cchMax)) chars = int32(r0) return } func EnumProcessModules(process Handle, module *Handle, cb uint32, cbNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procEnumProcessModules.Addr(), 4, uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded)), 0, 0) + r1, _, e1 := syscall.SyscallN(procEnumProcessModules.Addr(), uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded))) if r1 == 0 { err = errnoErr(e1) } @@ -3687,7 +3865,7 @@ func EnumProcessModules(process Handle, module *Handle, cb uint32, cbNeeded *uin } func EnumProcessModulesEx(process Handle, module *Handle, cb uint32, cbNeeded *uint32, filterFlag uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procEnumProcessModulesEx.Addr(), 5, uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded)), uintptr(filterFlag), 0) + r1, _, e1 := syscall.SyscallN(procEnumProcessModulesEx.Addr(), uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded)), uintptr(filterFlag)) if r1 == 0 { err = errnoErr(e1) } @@ -3695,7 +3873,7 @@ func EnumProcessModulesEx(process Handle, module *Handle, cb uint32, cbNeeded *u } func enumProcesses(processIds *uint32, nSize uint32, bytesReturned *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procEnumProcesses.Addr(), 3, uintptr(unsafe.Pointer(processIds)), uintptr(nSize), uintptr(unsafe.Pointer(bytesReturned))) + r1, _, e1 := syscall.SyscallN(procEnumProcesses.Addr(), uintptr(unsafe.Pointer(processIds)), uintptr(nSize), uintptr(unsafe.Pointer(bytesReturned))) if r1 == 0 { err = errnoErr(e1) } @@ -3703,7 +3881,7 @@ func enumProcesses(processIds *uint32, nSize uint32, bytesReturned *uint32) (err } func GetModuleBaseName(process Handle, module Handle, baseName *uint16, size uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetModuleBaseNameW.Addr(), 4, uintptr(process), uintptr(module), uintptr(unsafe.Pointer(baseName)), uintptr(size), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetModuleBaseNameW.Addr(), uintptr(process), uintptr(module), uintptr(unsafe.Pointer(baseName)), uintptr(size)) if r1 == 0 { err = errnoErr(e1) } @@ -3711,7 +3889,7 @@ func GetModuleBaseName(process Handle, module Handle, baseName *uint16, size uin } func GetModuleFileNameEx(process Handle, module Handle, filename *uint16, size uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetModuleFileNameExW.Addr(), 4, uintptr(process), uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetModuleFileNameExW.Addr(), uintptr(process), uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size)) if r1 == 0 { err = errnoErr(e1) } @@ -3719,7 +3897,7 @@ func GetModuleFileNameEx(process Handle, module Handle, filename *uint16, size u } func GetModuleInformation(process Handle, module Handle, modinfo *ModuleInfo, cb uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetModuleInformation.Addr(), 4, uintptr(process), uintptr(module), uintptr(unsafe.Pointer(modinfo)), uintptr(cb), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetModuleInformation.Addr(), uintptr(process), uintptr(module), uintptr(unsafe.Pointer(modinfo)), uintptr(cb)) if r1 == 0 { err = errnoErr(e1) } @@ -3727,7 +3905,7 @@ func GetModuleInformation(process Handle, module Handle, modinfo *ModuleInfo, cb } func QueryWorkingSetEx(process Handle, pv uintptr, cb uint32) (err error) { - r1, _, e1 := syscall.Syscall(procQueryWorkingSetEx.Addr(), 3, uintptr(process), uintptr(pv), uintptr(cb)) + r1, _, e1 := syscall.SyscallN(procQueryWorkingSetEx.Addr(), uintptr(process), uintptr(pv), uintptr(cb)) if r1 == 0 { err = errnoErr(e1) } @@ -3739,7 +3917,7 @@ func SubscribeServiceChangeNotifications(service Handle, eventType uint32, callb if ret != nil { return } - r0, _, _ := syscall.Syscall6(procSubscribeServiceChangeNotifications.Addr(), 5, uintptr(service), uintptr(eventType), uintptr(callback), uintptr(callbackCtx), uintptr(unsafe.Pointer(subscription)), 0) + r0, _, _ := syscall.SyscallN(procSubscribeServiceChangeNotifications.Addr(), uintptr(service), uintptr(eventType), uintptr(callback), uintptr(callbackCtx), uintptr(unsafe.Pointer(subscription))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -3751,12 +3929,12 @@ func UnsubscribeServiceChangeNotifications(subscription uintptr) (err error) { if err != nil { return } - syscall.Syscall(procUnsubscribeServiceChangeNotifications.Addr(), 1, uintptr(subscription), 0, 0) + syscall.SyscallN(procUnsubscribeServiceChangeNotifications.Addr(), uintptr(subscription)) return } func GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetUserNameExW.Addr(), 3, uintptr(nameFormat), uintptr(unsafe.Pointer(nameBuffre)), uintptr(unsafe.Pointer(nSize))) + r1, _, e1 := syscall.SyscallN(procGetUserNameExW.Addr(), uintptr(nameFormat), uintptr(unsafe.Pointer(nameBuffre)), uintptr(unsafe.Pointer(nSize))) if r1&0xff == 0 { err = errnoErr(e1) } @@ -3764,7 +3942,7 @@ func GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err er } func TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint32, translatedName *uint16, nSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procTranslateNameW.Addr(), 5, uintptr(unsafe.Pointer(accName)), uintptr(accNameFormat), uintptr(desiredNameFormat), uintptr(unsafe.Pointer(translatedName)), uintptr(unsafe.Pointer(nSize)), 0) + r1, _, e1 := syscall.SyscallN(procTranslateNameW.Addr(), uintptr(unsafe.Pointer(accName)), uintptr(accNameFormat), uintptr(desiredNameFormat), uintptr(unsafe.Pointer(translatedName)), uintptr(unsafe.Pointer(nSize))) if r1&0xff == 0 { err = errnoErr(e1) } @@ -3772,7 +3950,7 @@ func TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint } func SetupDiBuildDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiBuildDriverInfoList.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType)) + r1, _, e1 := syscall.SyscallN(procSetupDiBuildDriverInfoList.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType)) if r1 == 0 { err = errnoErr(e1) } @@ -3780,7 +3958,7 @@ func SetupDiBuildDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoDa } func SetupDiCallClassInstaller(installFunction DI_FUNCTION, deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiCallClassInstaller.Addr(), 3, uintptr(installFunction), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData))) + r1, _, e1 := syscall.SyscallN(procSetupDiCallClassInstaller.Addr(), uintptr(installFunction), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -3788,7 +3966,7 @@ func SetupDiCallClassInstaller(installFunction DI_FUNCTION, deviceInfoSet DevInf } func SetupDiCancelDriverInfoSearch(deviceInfoSet DevInfo) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiCancelDriverInfoSearch.Addr(), 1, uintptr(deviceInfoSet), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetupDiCancelDriverInfoSearch.Addr(), uintptr(deviceInfoSet)) if r1 == 0 { err = errnoErr(e1) } @@ -3796,7 +3974,7 @@ func SetupDiCancelDriverInfoSearch(deviceInfoSet DevInfo) (err error) { } func setupDiClassGuidsFromNameEx(className *uint16, classGuidList *GUID, classGuidListSize uint32, requiredSize *uint32, machineName *uint16, reserved uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiClassGuidsFromNameExW.Addr(), 6, uintptr(unsafe.Pointer(className)), uintptr(unsafe.Pointer(classGuidList)), uintptr(classGuidListSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) + r1, _, e1 := syscall.SyscallN(procSetupDiClassGuidsFromNameExW.Addr(), uintptr(unsafe.Pointer(className)), uintptr(unsafe.Pointer(classGuidList)), uintptr(classGuidListSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) if r1 == 0 { err = errnoErr(e1) } @@ -3804,7 +3982,7 @@ func setupDiClassGuidsFromNameEx(className *uint16, classGuidList *GUID, classGu } func setupDiClassNameFromGuidEx(classGUID *GUID, className *uint16, classNameSize uint32, requiredSize *uint32, machineName *uint16, reserved uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiClassNameFromGuidExW.Addr(), 6, uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(className)), uintptr(classNameSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) + r1, _, e1 := syscall.SyscallN(procSetupDiClassNameFromGuidExW.Addr(), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(className)), uintptr(classNameSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) if r1 == 0 { err = errnoErr(e1) } @@ -3812,7 +3990,7 @@ func setupDiClassNameFromGuidEx(classGUID *GUID, className *uint16, classNameSiz } func setupDiCreateDeviceInfoListEx(classGUID *GUID, hwndParent uintptr, machineName *uint16, reserved uintptr) (handle DevInfo, err error) { - r0, _, e1 := syscall.Syscall6(procSetupDiCreateDeviceInfoListExW.Addr(), 4, uintptr(unsafe.Pointer(classGUID)), uintptr(hwndParent), uintptr(unsafe.Pointer(machineName)), uintptr(reserved), 0, 0) + r0, _, e1 := syscall.SyscallN(procSetupDiCreateDeviceInfoListExW.Addr(), uintptr(unsafe.Pointer(classGUID)), uintptr(hwndParent), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) handle = DevInfo(r0) if handle == DevInfo(InvalidHandle) { err = errnoErr(e1) @@ -3821,7 +3999,7 @@ func setupDiCreateDeviceInfoListEx(classGUID *GUID, hwndParent uintptr, machineN } func setupDiCreateDeviceInfo(deviceInfoSet DevInfo, DeviceName *uint16, classGUID *GUID, DeviceDescription *uint16, hwndParent uintptr, CreationFlags DICD, deviceInfoData *DevInfoData) (err error) { - r1, _, e1 := syscall.Syscall9(procSetupDiCreateDeviceInfoW.Addr(), 7, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(DeviceName)), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(DeviceDescription)), uintptr(hwndParent), uintptr(CreationFlags), uintptr(unsafe.Pointer(deviceInfoData)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetupDiCreateDeviceInfoW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(DeviceName)), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(DeviceDescription)), uintptr(hwndParent), uintptr(CreationFlags), uintptr(unsafe.Pointer(deviceInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -3829,7 +4007,7 @@ func setupDiCreateDeviceInfo(deviceInfoSet DevInfo, DeviceName *uint16, classGUI } func SetupDiDestroyDeviceInfoList(deviceInfoSet DevInfo) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiDestroyDeviceInfoList.Addr(), 1, uintptr(deviceInfoSet), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetupDiDestroyDeviceInfoList.Addr(), uintptr(deviceInfoSet)) if r1 == 0 { err = errnoErr(e1) } @@ -3837,7 +4015,7 @@ func SetupDiDestroyDeviceInfoList(deviceInfoSet DevInfo) (err error) { } func SetupDiDestroyDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiDestroyDriverInfoList.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType)) + r1, _, e1 := syscall.SyscallN(procSetupDiDestroyDriverInfoList.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType)) if r1 == 0 { err = errnoErr(e1) } @@ -3845,7 +4023,7 @@ func SetupDiDestroyDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfo } func setupDiEnumDeviceInfo(deviceInfoSet DevInfo, memberIndex uint32, deviceInfoData *DevInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiEnumDeviceInfo.Addr(), 3, uintptr(deviceInfoSet), uintptr(memberIndex), uintptr(unsafe.Pointer(deviceInfoData))) + r1, _, e1 := syscall.SyscallN(procSetupDiEnumDeviceInfo.Addr(), uintptr(deviceInfoSet), uintptr(memberIndex), uintptr(unsafe.Pointer(deviceInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -3853,7 +4031,7 @@ func setupDiEnumDeviceInfo(deviceInfoSet DevInfo, memberIndex uint32, deviceInfo } func setupDiEnumDriverInfo(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT, memberIndex uint32, driverInfoData *DrvInfoData) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiEnumDriverInfoW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType), uintptr(memberIndex), uintptr(unsafe.Pointer(driverInfoData)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiEnumDriverInfoW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType), uintptr(memberIndex), uintptr(unsafe.Pointer(driverInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -3861,7 +4039,7 @@ func setupDiEnumDriverInfo(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, d } func setupDiGetClassDevsEx(classGUID *GUID, Enumerator *uint16, hwndParent uintptr, Flags DIGCF, deviceInfoSet DevInfo, machineName *uint16, reserved uintptr) (handle DevInfo, err error) { - r0, _, e1 := syscall.Syscall9(procSetupDiGetClassDevsExW.Addr(), 7, uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(Enumerator)), uintptr(hwndParent), uintptr(Flags), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(machineName)), uintptr(reserved), 0, 0) + r0, _, e1 := syscall.SyscallN(procSetupDiGetClassDevsExW.Addr(), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(Enumerator)), uintptr(hwndParent), uintptr(Flags), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) handle = DevInfo(r0) if handle == DevInfo(InvalidHandle) { err = errnoErr(e1) @@ -3870,7 +4048,7 @@ func setupDiGetClassDevsEx(classGUID *GUID, Enumerator *uint16, hwndParent uintp } func SetupDiGetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32, requiredSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiGetClassInstallParamsW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize), uintptr(unsafe.Pointer(requiredSize)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetClassInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize), uintptr(unsafe.Pointer(requiredSize))) if r1 == 0 { err = errnoErr(e1) } @@ -3878,7 +4056,7 @@ func SetupDiGetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfo } func setupDiGetDeviceInfoListDetail(deviceInfoSet DevInfo, deviceInfoSetDetailData *DevInfoListDetailData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiGetDeviceInfoListDetailW.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoSetDetailData)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceInfoListDetailW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoSetDetailData))) if r1 == 0 { err = errnoErr(e1) } @@ -3886,7 +4064,7 @@ func setupDiGetDeviceInfoListDetail(deviceInfoSet DevInfo, deviceInfoSetDetailDa } func setupDiGetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiGetDeviceInstallParamsW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams))) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams))) if r1 == 0 { err = errnoErr(e1) } @@ -3894,7 +4072,7 @@ func setupDiGetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInf } func setupDiGetDeviceInstanceId(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, instanceId *uint16, instanceIdSize uint32, instanceIdRequiredSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiGetDeviceInstanceIdW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(instanceId)), uintptr(instanceIdSize), uintptr(unsafe.Pointer(instanceIdRequiredSize)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceInstanceIdW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(instanceId)), uintptr(instanceIdSize), uintptr(unsafe.Pointer(instanceIdRequiredSize))) if r1 == 0 { err = errnoErr(e1) } @@ -3902,7 +4080,7 @@ func setupDiGetDeviceInstanceId(deviceInfoSet DevInfo, deviceInfoData *DevInfoDa } func setupDiGetDeviceProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, propertyKey *DEVPROPKEY, propertyType *DEVPROPTYPE, propertyBuffer *byte, propertyBufferSize uint32, requiredSize *uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procSetupDiGetDevicePropertyW.Addr(), 8, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDevicePropertyW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -3910,7 +4088,7 @@ func setupDiGetDeviceProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData } func setupDiGetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyRegDataType *uint32, propertyBuffer *byte, propertyBufferSize uint32, requiredSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procSetupDiGetDeviceRegistryPropertyW.Addr(), 7, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyRegDataType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceRegistryPropertyW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyRegDataType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize))) if r1 == 0 { err = errnoErr(e1) } @@ -3918,7 +4096,7 @@ func setupDiGetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *Dev } func setupDiGetDriverInfoDetail(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData, driverInfoDetailData *DrvInfoDetailData, driverInfoDetailDataSize uint32, requiredSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiGetDriverInfoDetailW.Addr(), 6, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData)), uintptr(unsafe.Pointer(driverInfoDetailData)), uintptr(driverInfoDetailDataSize), uintptr(unsafe.Pointer(requiredSize))) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDriverInfoDetailW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData)), uintptr(unsafe.Pointer(driverInfoDetailData)), uintptr(driverInfoDetailDataSize), uintptr(unsafe.Pointer(requiredSize))) if r1 == 0 { err = errnoErr(e1) } @@ -3926,7 +4104,7 @@ func setupDiGetDriverInfoDetail(deviceInfoSet DevInfo, deviceInfoData *DevInfoDa } func setupDiGetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiGetSelectedDevice.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetSelectedDevice.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -3934,7 +4112,7 @@ func setupDiGetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData } func setupDiGetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiGetSelectedDriverW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData))) + r1, _, e1 := syscall.SyscallN(procSetupDiGetSelectedDriverW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -3942,7 +4120,7 @@ func setupDiGetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData } func SetupDiOpenDevRegKey(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, Scope DICS_FLAG, HwProfile uint32, KeyType DIREG, samDesired uint32) (key Handle, err error) { - r0, _, e1 := syscall.Syscall6(procSetupDiOpenDevRegKey.Addr(), 6, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(Scope), uintptr(HwProfile), uintptr(KeyType), uintptr(samDesired)) + r0, _, e1 := syscall.SyscallN(procSetupDiOpenDevRegKey.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(Scope), uintptr(HwProfile), uintptr(KeyType), uintptr(samDesired)) key = Handle(r0) if key == InvalidHandle { err = errnoErr(e1) @@ -3951,7 +4129,7 @@ func SetupDiOpenDevRegKey(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, Sc } func SetupDiSetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiSetClassInstallParamsW.Addr(), 4, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetupDiSetClassInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize)) if r1 == 0 { err = errnoErr(e1) } @@ -3959,7 +4137,7 @@ func SetupDiSetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfo } func SetupDiSetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiSetDeviceInstallParamsW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams))) + r1, _, e1 := syscall.SyscallN(procSetupDiSetDeviceInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams))) if r1 == 0 { err = errnoErr(e1) } @@ -3967,7 +4145,7 @@ func SetupDiSetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInf } func setupDiSetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyBuffer *byte, propertyBufferSize uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiSetDeviceRegistryPropertyW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiSetDeviceRegistryPropertyW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize)) if r1 == 0 { err = errnoErr(e1) } @@ -3975,7 +4153,7 @@ func setupDiSetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *Dev } func SetupDiSetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiSetSelectedDevice.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiSetSelectedDevice.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -3983,7 +4161,7 @@ func SetupDiSetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData } func SetupDiSetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiSetSelectedDriverW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData))) + r1, _, e1 := syscall.SyscallN(procSetupDiSetSelectedDriverW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -3991,7 +4169,7 @@ func SetupDiSetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData } func setupUninstallOEMInf(infFileName *uint16, flags SUOI, reserved uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procSetupUninstallOEMInfW.Addr(), 3, uintptr(unsafe.Pointer(infFileName)), uintptr(flags), uintptr(reserved)) + r1, _, e1 := syscall.SyscallN(procSetupUninstallOEMInfW.Addr(), uintptr(unsafe.Pointer(infFileName)), uintptr(flags), uintptr(reserved)) if r1 == 0 { err = errnoErr(e1) } @@ -3999,7 +4177,7 @@ func setupUninstallOEMInf(infFileName *uint16, flags SUOI, reserved uintptr) (er } func commandLineToArgv(cmd *uint16, argc *int32) (argv **uint16, err error) { - r0, _, e1 := syscall.Syscall(procCommandLineToArgvW.Addr(), 2, uintptr(unsafe.Pointer(cmd)), uintptr(unsafe.Pointer(argc)), 0) + r0, _, e1 := syscall.SyscallN(procCommandLineToArgvW.Addr(), uintptr(unsafe.Pointer(cmd)), uintptr(unsafe.Pointer(argc))) argv = (**uint16)(unsafe.Pointer(r0)) if argv == nil { err = errnoErr(e1) @@ -4008,7 +4186,7 @@ func commandLineToArgv(cmd *uint16, argc *int32) (argv **uint16, err error) { } func shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) { - r0, _, _ := syscall.Syscall6(procSHGetKnownFolderPath.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(flags), uintptr(token), uintptr(unsafe.Pointer(path)), 0, 0) + r0, _, _ := syscall.SyscallN(procSHGetKnownFolderPath.Addr(), uintptr(unsafe.Pointer(id)), uintptr(flags), uintptr(token), uintptr(unsafe.Pointer(path))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -4016,7 +4194,7 @@ func shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **u } func ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) { - r1, _, e1 := syscall.Syscall6(procShellExecuteW.Addr(), 6, uintptr(hwnd), uintptr(unsafe.Pointer(verb)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(args)), uintptr(unsafe.Pointer(cwd)), uintptr(showCmd)) + r1, _, e1 := syscall.SyscallN(procShellExecuteW.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(verb)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(args)), uintptr(unsafe.Pointer(cwd)), uintptr(showCmd)) if r1 <= 32 { err = errnoErr(e1) } @@ -4024,12 +4202,12 @@ func ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *ui } func EnumChildWindows(hwnd HWND, enumFunc uintptr, param unsafe.Pointer) { - syscall.Syscall(procEnumChildWindows.Addr(), 3, uintptr(hwnd), uintptr(enumFunc), uintptr(param)) + syscall.SyscallN(procEnumChildWindows.Addr(), uintptr(hwnd), uintptr(enumFunc), uintptr(param)) return } func EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) { - r1, _, e1 := syscall.Syscall(procEnumWindows.Addr(), 2, uintptr(enumFunc), uintptr(param), 0) + r1, _, e1 := syscall.SyscallN(procEnumWindows.Addr(), uintptr(enumFunc), uintptr(param)) if r1 == 0 { err = errnoErr(e1) } @@ -4037,7 +4215,7 @@ func EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) { } func ExitWindowsEx(flags uint32, reason uint32) (err error) { - r1, _, e1 := syscall.Syscall(procExitWindowsEx.Addr(), 2, uintptr(flags), uintptr(reason), 0) + r1, _, e1 := syscall.SyscallN(procExitWindowsEx.Addr(), uintptr(flags), uintptr(reason)) if r1 == 0 { err = errnoErr(e1) } @@ -4045,7 +4223,7 @@ func ExitWindowsEx(flags uint32, reason uint32) (err error) { } func GetClassName(hwnd HWND, className *uint16, maxCount int32) (copied int32, err error) { - r0, _, e1 := syscall.Syscall(procGetClassNameW.Addr(), 3, uintptr(hwnd), uintptr(unsafe.Pointer(className)), uintptr(maxCount)) + r0, _, e1 := syscall.SyscallN(procGetClassNameW.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(className)), uintptr(maxCount)) copied = int32(r0) if copied == 0 { err = errnoErr(e1) @@ -4054,33 +4232,39 @@ func GetClassName(hwnd HWND, className *uint16, maxCount int32) (copied int32, e } func GetDesktopWindow() (hwnd HWND) { - r0, _, _ := syscall.Syscall(procGetDesktopWindow.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetDesktopWindow.Addr()) hwnd = HWND(r0) return } func GetForegroundWindow() (hwnd HWND) { - r0, _, _ := syscall.Syscall(procGetForegroundWindow.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetForegroundWindow.Addr()) hwnd = HWND(r0) return } func GetGUIThreadInfo(thread uint32, info *GUIThreadInfo) (err error) { - r1, _, e1 := syscall.Syscall(procGetGUIThreadInfo.Addr(), 2, uintptr(thread), uintptr(unsafe.Pointer(info)), 0) + r1, _, e1 := syscall.SyscallN(procGetGUIThreadInfo.Addr(), uintptr(thread), uintptr(unsafe.Pointer(info))) if r1 == 0 { err = errnoErr(e1) } return } +func GetKeyboardLayout(tid uint32) (hkl Handle) { + r0, _, _ := syscall.SyscallN(procGetKeyboardLayout.Addr(), uintptr(tid)) + hkl = Handle(r0) + return +} + func GetShellWindow() (shellWindow HWND) { - r0, _, _ := syscall.Syscall(procGetShellWindow.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetShellWindow.Addr()) shellWindow = HWND(r0) return } func GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetWindowThreadProcessId.Addr(), 2, uintptr(hwnd), uintptr(unsafe.Pointer(pid)), 0) + r0, _, e1 := syscall.SyscallN(procGetWindowThreadProcessId.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(pid))) tid = uint32(r0) if tid == 0 { err = errnoErr(e1) @@ -4089,25 +4273,34 @@ func GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) { } func IsWindow(hwnd HWND) (isWindow bool) { - r0, _, _ := syscall.Syscall(procIsWindow.Addr(), 1, uintptr(hwnd), 0, 0) + r0, _, _ := syscall.SyscallN(procIsWindow.Addr(), uintptr(hwnd)) isWindow = r0 != 0 return } func IsWindowUnicode(hwnd HWND) (isUnicode bool) { - r0, _, _ := syscall.Syscall(procIsWindowUnicode.Addr(), 1, uintptr(hwnd), 0, 0) + r0, _, _ := syscall.SyscallN(procIsWindowUnicode.Addr(), uintptr(hwnd)) isUnicode = r0 != 0 return } func IsWindowVisible(hwnd HWND) (isVisible bool) { - r0, _, _ := syscall.Syscall(procIsWindowVisible.Addr(), 1, uintptr(hwnd), 0, 0) + r0, _, _ := syscall.SyscallN(procIsWindowVisible.Addr(), uintptr(hwnd)) isVisible = r0 != 0 return } +func LoadKeyboardLayout(name *uint16, flags uint32) (hkl Handle, err error) { + r0, _, e1 := syscall.SyscallN(procLoadKeyboardLayoutW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(flags)) + hkl = Handle(r0) + if hkl == 0 { + err = errnoErr(e1) + } + return +} + func MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) { - r0, _, e1 := syscall.Syscall6(procMessageBoxW.Addr(), 4, uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype), 0, 0) + r0, _, e1 := syscall.SyscallN(procMessageBoxW.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype)) ret = int32(r0) if ret == 0 { err = errnoErr(e1) @@ -4115,12 +4308,26 @@ func MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret i return } +func ToUnicodeEx(vkey uint32, scancode uint32, keystate *byte, pwszBuff *uint16, cchBuff int32, flags uint32, hkl Handle) (ret int32) { + r0, _, _ := syscall.SyscallN(procToUnicodeEx.Addr(), uintptr(vkey), uintptr(scancode), uintptr(unsafe.Pointer(keystate)), uintptr(unsafe.Pointer(pwszBuff)), uintptr(cchBuff), uintptr(flags), uintptr(hkl)) + ret = int32(r0) + return +} + +func UnloadKeyboardLayout(hkl Handle) (err error) { + r1, _, e1 := syscall.SyscallN(procUnloadKeyboardLayout.Addr(), uintptr(hkl)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) { var _p0 uint32 if inheritExisting { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procCreateEnvironmentBlock.Addr(), 3, uintptr(unsafe.Pointer(block)), uintptr(token), uintptr(_p0)) + r1, _, e1 := syscall.SyscallN(procCreateEnvironmentBlock.Addr(), uintptr(unsafe.Pointer(block)), uintptr(token), uintptr(_p0)) if r1 == 0 { err = errnoErr(e1) } @@ -4128,7 +4335,7 @@ func CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) ( } func DestroyEnvironmentBlock(block *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDestroyEnvironmentBlock.Addr(), 1, uintptr(unsafe.Pointer(block)), 0, 0) + r1, _, e1 := syscall.SyscallN(procDestroyEnvironmentBlock.Addr(), uintptr(unsafe.Pointer(block))) if r1 == 0 { err = errnoErr(e1) } @@ -4136,7 +4343,7 @@ func DestroyEnvironmentBlock(block *uint16) (err error) { } func GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetUserProfileDirectoryW.Addr(), 3, uintptr(t), uintptr(unsafe.Pointer(dir)), uintptr(unsafe.Pointer(dirLen))) + r1, _, e1 := syscall.SyscallN(procGetUserProfileDirectoryW.Addr(), uintptr(t), uintptr(unsafe.Pointer(dir)), uintptr(unsafe.Pointer(dirLen))) if r1 == 0 { err = errnoErr(e1) } @@ -4153,7 +4360,7 @@ func GetFileVersionInfoSize(filename string, zeroHandle *Handle) (bufSize uint32 } func _GetFileVersionInfoSize(filename *uint16, zeroHandle *Handle) (bufSize uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetFileVersionInfoSizeW.Addr(), 2, uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(zeroHandle)), 0) + r0, _, e1 := syscall.SyscallN(procGetFileVersionInfoSizeW.Addr(), uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(zeroHandle))) bufSize = uint32(r0) if bufSize == 0 { err = errnoErr(e1) @@ -4171,7 +4378,7 @@ func GetFileVersionInfo(filename string, handle uint32, bufSize uint32, buffer u } func _GetFileVersionInfo(filename *uint16, handle uint32, bufSize uint32, buffer unsafe.Pointer) (err error) { - r1, _, e1 := syscall.Syscall6(procGetFileVersionInfoW.Addr(), 4, uintptr(unsafe.Pointer(filename)), uintptr(handle), uintptr(bufSize), uintptr(buffer), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetFileVersionInfoW.Addr(), uintptr(unsafe.Pointer(filename)), uintptr(handle), uintptr(bufSize), uintptr(buffer)) if r1 == 0 { err = errnoErr(e1) } @@ -4188,7 +4395,7 @@ func VerQueryValue(block unsafe.Pointer, subBlock string, pointerToBufferPointer } func _VerQueryValue(block unsafe.Pointer, subBlock *uint16, pointerToBufferPointer unsafe.Pointer, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procVerQueryValueW.Addr(), 4, uintptr(block), uintptr(unsafe.Pointer(subBlock)), uintptr(pointerToBufferPointer), uintptr(unsafe.Pointer(bufSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procVerQueryValueW.Addr(), uintptr(block), uintptr(unsafe.Pointer(subBlock)), uintptr(pointerToBufferPointer), uintptr(unsafe.Pointer(bufSize))) if r1 == 0 { err = errnoErr(e1) } @@ -4196,7 +4403,7 @@ func _VerQueryValue(block unsafe.Pointer, subBlock *uint16, pointerToBufferPoint } func TimeBeginPeriod(period uint32) (err error) { - r1, _, e1 := syscall.Syscall(proctimeBeginPeriod.Addr(), 1, uintptr(period), 0, 0) + r1, _, e1 := syscall.SyscallN(proctimeBeginPeriod.Addr(), uintptr(period)) if r1 != 0 { err = errnoErr(e1) } @@ -4204,7 +4411,7 @@ func TimeBeginPeriod(period uint32) (err error) { } func TimeEndPeriod(period uint32) (err error) { - r1, _, e1 := syscall.Syscall(proctimeEndPeriod.Addr(), 1, uintptr(period), 0, 0) + r1, _, e1 := syscall.SyscallN(proctimeEndPeriod.Addr(), uintptr(period)) if r1 != 0 { err = errnoErr(e1) } @@ -4212,7 +4419,7 @@ func TimeEndPeriod(period uint32) (err error) { } func WinVerifyTrustEx(hwnd HWND, actionId *GUID, data *WinTrustData) (ret error) { - r0, _, _ := syscall.Syscall(procWinVerifyTrustEx.Addr(), 3, uintptr(hwnd), uintptr(unsafe.Pointer(actionId)), uintptr(unsafe.Pointer(data))) + r0, _, _ := syscall.SyscallN(procWinVerifyTrustEx.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(actionId)), uintptr(unsafe.Pointer(data))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -4220,12 +4427,12 @@ func WinVerifyTrustEx(hwnd HWND, actionId *GUID, data *WinTrustData) (ret error) } func FreeAddrInfoW(addrinfo *AddrinfoW) { - syscall.Syscall(procFreeAddrInfoW.Addr(), 1, uintptr(unsafe.Pointer(addrinfo)), 0, 0) + syscall.SyscallN(procFreeAddrInfoW.Addr(), uintptr(unsafe.Pointer(addrinfo))) return } func GetAddrInfoW(nodename *uint16, servicename *uint16, hints *AddrinfoW, result **AddrinfoW) (sockerr error) { - r0, _, _ := syscall.Syscall6(procGetAddrInfoW.Addr(), 4, uintptr(unsafe.Pointer(nodename)), uintptr(unsafe.Pointer(servicename)), uintptr(unsafe.Pointer(hints)), uintptr(unsafe.Pointer(result)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetAddrInfoW.Addr(), uintptr(unsafe.Pointer(nodename)), uintptr(unsafe.Pointer(servicename)), uintptr(unsafe.Pointer(hints)), uintptr(unsafe.Pointer(result))) if r0 != 0 { sockerr = syscall.Errno(r0) } @@ -4233,15 +4440,23 @@ func GetAddrInfoW(nodename *uint16, servicename *uint16, hints *AddrinfoW, resul } func WSACleanup() (err error) { - r1, _, e1 := syscall.Syscall(procWSACleanup.Addr(), 0, 0, 0, 0) + r1, _, e1 := syscall.SyscallN(procWSACleanup.Addr()) if r1 == socket_error { err = errnoErr(e1) } return } +func WSADuplicateSocket(s Handle, processID uint32, info *WSAProtocolInfo) (err error) { + r1, _, e1 := syscall.SyscallN(procWSADuplicateSocketW.Addr(), uintptr(s), uintptr(processID), uintptr(unsafe.Pointer(info))) + if r1 != 0 { + err = errnoErr(e1) + } + return +} + func WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferLength *uint32) (n int32, err error) { - r0, _, e1 := syscall.Syscall(procWSAEnumProtocolsW.Addr(), 3, uintptr(unsafe.Pointer(protocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufferLength))) + r0, _, e1 := syscall.SyscallN(procWSAEnumProtocolsW.Addr(), uintptr(unsafe.Pointer(protocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufferLength))) n = int32(r0) if n == -1 { err = errnoErr(e1) @@ -4254,7 +4469,7 @@ func WSAGetOverlappedResult(h Handle, o *Overlapped, bytes *uint32, wait bool, f if wait { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0) + r1, _, e1 := syscall.SyscallN(procWSAGetOverlappedResult.Addr(), uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags))) if r1 == 0 { err = errnoErr(e1) } @@ -4262,7 +4477,7 @@ func WSAGetOverlappedResult(h Handle, o *Overlapped, bytes *uint32, wait bool, f } func WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbob uint32, cbbr *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) { - r1, _, e1 := syscall.Syscall9(procWSAIoctl.Addr(), 9, uintptr(s), uintptr(iocc), uintptr(unsafe.Pointer(inbuf)), uintptr(cbif), uintptr(unsafe.Pointer(outbuf)), uintptr(cbob), uintptr(unsafe.Pointer(cbbr)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine)) + r1, _, e1 := syscall.SyscallN(procWSAIoctl.Addr(), uintptr(s), uintptr(iocc), uintptr(unsafe.Pointer(inbuf)), uintptr(cbif), uintptr(unsafe.Pointer(outbuf)), uintptr(cbob), uintptr(unsafe.Pointer(cbbr)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine)) if r1 == socket_error { err = errnoErr(e1) } @@ -4270,7 +4485,7 @@ func WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbo } func WSALookupServiceBegin(querySet *WSAQUERYSET, flags uint32, handle *Handle) (err error) { - r1, _, e1 := syscall.Syscall(procWSALookupServiceBeginW.Addr(), 3, uintptr(unsafe.Pointer(querySet)), uintptr(flags), uintptr(unsafe.Pointer(handle))) + r1, _, e1 := syscall.SyscallN(procWSALookupServiceBeginW.Addr(), uintptr(unsafe.Pointer(querySet)), uintptr(flags), uintptr(unsafe.Pointer(handle))) if r1 == socket_error { err = errnoErr(e1) } @@ -4278,7 +4493,7 @@ func WSALookupServiceBegin(querySet *WSAQUERYSET, flags uint32, handle *Handle) } func WSALookupServiceEnd(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procWSALookupServiceEnd.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procWSALookupServiceEnd.Addr(), uintptr(handle)) if r1 == socket_error { err = errnoErr(e1) } @@ -4286,7 +4501,7 @@ func WSALookupServiceEnd(handle Handle) (err error) { } func WSALookupServiceNext(handle Handle, flags uint32, size *int32, querySet *WSAQUERYSET) (err error) { - r1, _, e1 := syscall.Syscall6(procWSALookupServiceNextW.Addr(), 4, uintptr(handle), uintptr(flags), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(querySet)), 0, 0) + r1, _, e1 := syscall.SyscallN(procWSALookupServiceNextW.Addr(), uintptr(handle), uintptr(flags), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(querySet))) if r1 == socket_error { err = errnoErr(e1) } @@ -4294,7 +4509,7 @@ func WSALookupServiceNext(handle Handle, flags uint32, size *int32, querySet *WS } func WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSARecv.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0) + r1, _, e1 := syscall.SyscallN(procWSARecv.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) if r1 == socket_error { err = errnoErr(e1) } @@ -4302,7 +4517,7 @@ func WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32 } func WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSARecvFrom.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) + r1, _, e1 := syscall.SyscallN(procWSARecvFrom.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) if r1 == socket_error { err = errnoErr(e1) } @@ -4310,7 +4525,7 @@ func WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *ui } func WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSASend.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0) + r1, _, e1 := syscall.SyscallN(procWSASend.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) if r1 == socket_error { err = errnoErr(e1) } @@ -4318,7 +4533,7 @@ func WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, } func WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSASendTo.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(to)), uintptr(tolen), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) + r1, _, e1 := syscall.SyscallN(procWSASendTo.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(to)), uintptr(tolen), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) if r1 == socket_error { err = errnoErr(e1) } @@ -4326,7 +4541,7 @@ func WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32 } func WSASocket(af int32, typ int32, protocol int32, protoInfo *WSAProtocolInfo, group uint32, flags uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procWSASocketW.Addr(), 6, uintptr(af), uintptr(typ), uintptr(protocol), uintptr(unsafe.Pointer(protoInfo)), uintptr(group), uintptr(flags)) + r0, _, e1 := syscall.SyscallN(procWSASocketW.Addr(), uintptr(af), uintptr(typ), uintptr(protocol), uintptr(unsafe.Pointer(protoInfo)), uintptr(group), uintptr(flags)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -4335,7 +4550,7 @@ func WSASocket(af int32, typ int32, protocol int32, protoInfo *WSAProtocolInfo, } func WSAStartup(verreq uint32, data *WSAData) (sockerr error) { - r0, _, _ := syscall.Syscall(procWSAStartup.Addr(), 2, uintptr(verreq), uintptr(unsafe.Pointer(data)), 0) + r0, _, _ := syscall.SyscallN(procWSAStartup.Addr(), uintptr(verreq), uintptr(unsafe.Pointer(data))) if r0 != 0 { sockerr = syscall.Errno(r0) } @@ -4343,7 +4558,7 @@ func WSAStartup(verreq uint32, data *WSAData) (sockerr error) { } func bind(s Handle, name unsafe.Pointer, namelen int32) (err error) { - r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + r1, _, e1 := syscall.SyscallN(procbind.Addr(), uintptr(s), uintptr(name), uintptr(namelen)) if r1 == socket_error { err = errnoErr(e1) } @@ -4351,7 +4566,7 @@ func bind(s Handle, name unsafe.Pointer, namelen int32) (err error) { } func Closesocket(s Handle) (err error) { - r1, _, e1 := syscall.Syscall(procclosesocket.Addr(), 1, uintptr(s), 0, 0) + r1, _, e1 := syscall.SyscallN(procclosesocket.Addr(), uintptr(s)) if r1 == socket_error { err = errnoErr(e1) } @@ -4359,7 +4574,7 @@ func Closesocket(s Handle) (err error) { } func connect(s Handle, name unsafe.Pointer, namelen int32) (err error) { - r1, _, e1 := syscall.Syscall(procconnect.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + r1, _, e1 := syscall.SyscallN(procconnect.Addr(), uintptr(s), uintptr(name), uintptr(namelen)) if r1 == socket_error { err = errnoErr(e1) } @@ -4376,7 +4591,7 @@ func GetHostByName(name string) (h *Hostent, err error) { } func _GetHostByName(name *byte) (h *Hostent, err error) { - r0, _, e1 := syscall.Syscall(procgethostbyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + r0, _, e1 := syscall.SyscallN(procgethostbyname.Addr(), uintptr(unsafe.Pointer(name))) h = (*Hostent)(unsafe.Pointer(r0)) if h == nil { err = errnoErr(e1) @@ -4385,7 +4600,7 @@ func _GetHostByName(name *byte) (h *Hostent, err error) { } func getpeername(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { - r1, _, e1 := syscall.Syscall(procgetpeername.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + r1, _, e1 := syscall.SyscallN(procgetpeername.Addr(), uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if r1 == socket_error { err = errnoErr(e1) } @@ -4402,7 +4617,7 @@ func GetProtoByName(name string) (p *Protoent, err error) { } func _GetProtoByName(name *byte) (p *Protoent, err error) { - r0, _, e1 := syscall.Syscall(procgetprotobyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + r0, _, e1 := syscall.SyscallN(procgetprotobyname.Addr(), uintptr(unsafe.Pointer(name))) p = (*Protoent)(unsafe.Pointer(r0)) if p == nil { err = errnoErr(e1) @@ -4425,7 +4640,7 @@ func GetServByName(name string, proto string) (s *Servent, err error) { } func _GetServByName(name *byte, proto *byte) (s *Servent, err error) { - r0, _, e1 := syscall.Syscall(procgetservbyname.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(proto)), 0) + r0, _, e1 := syscall.SyscallN(procgetservbyname.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(proto))) s = (*Servent)(unsafe.Pointer(r0)) if s == nil { err = errnoErr(e1) @@ -4434,7 +4649,7 @@ func _GetServByName(name *byte, proto *byte) (s *Servent, err error) { } func getsockname(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { - r1, _, e1 := syscall.Syscall(procgetsockname.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + r1, _, e1 := syscall.SyscallN(procgetsockname.Addr(), uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if r1 == socket_error { err = errnoErr(e1) } @@ -4442,7 +4657,7 @@ func getsockname(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { } func Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int32) (err error) { - r1, _, e1 := syscall.Syscall6(procgetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(unsafe.Pointer(optlen)), 0) + r1, _, e1 := syscall.SyscallN(procgetsockopt.Addr(), uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(unsafe.Pointer(optlen))) if r1 == socket_error { err = errnoErr(e1) } @@ -4450,7 +4665,7 @@ func Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int3 } func listen(s Handle, backlog int32) (err error) { - r1, _, e1 := syscall.Syscall(proclisten.Addr(), 2, uintptr(s), uintptr(backlog), 0) + r1, _, e1 := syscall.SyscallN(proclisten.Addr(), uintptr(s), uintptr(backlog)) if r1 == socket_error { err = errnoErr(e1) } @@ -4458,7 +4673,7 @@ func listen(s Handle, backlog int32) (err error) { } func Ntohs(netshort uint16) (u uint16) { - r0, _, _ := syscall.Syscall(procntohs.Addr(), 1, uintptr(netshort), 0, 0) + r0, _, _ := syscall.SyscallN(procntohs.Addr(), uintptr(netshort)) u = uint16(r0) return } @@ -4468,7 +4683,7 @@ func recvfrom(s Handle, buf []byte, flags int32, from *RawSockaddrAny, fromlen * if len(buf) > 0 { _p0 = &buf[0] } - r0, _, e1 := syscall.Syscall6(procrecvfrom.Addr(), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + r0, _, e1 := syscall.SyscallN(procrecvfrom.Addr(), uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) n = int32(r0) if n == -1 { err = errnoErr(e1) @@ -4481,7 +4696,7 @@ func sendto(s Handle, buf []byte, flags int32, to unsafe.Pointer, tolen int32) ( if len(buf) > 0 { _p0 = &buf[0] } - r1, _, e1 := syscall.Syscall6(procsendto.Addr(), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(tolen)) + r1, _, e1 := syscall.SyscallN(procsendto.Addr(), uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(tolen)) if r1 == socket_error { err = errnoErr(e1) } @@ -4489,7 +4704,7 @@ func sendto(s Handle, buf []byte, flags int32, to unsafe.Pointer, tolen int32) ( } func Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32) (err error) { - r1, _, e1 := syscall.Syscall6(procsetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(optlen), 0) + r1, _, e1 := syscall.SyscallN(procsetsockopt.Addr(), uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(optlen)) if r1 == socket_error { err = errnoErr(e1) } @@ -4497,7 +4712,7 @@ func Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32 } func shutdown(s Handle, how int32) (err error) { - r1, _, e1 := syscall.Syscall(procshutdown.Addr(), 2, uintptr(s), uintptr(how), 0) + r1, _, e1 := syscall.SyscallN(procshutdown.Addr(), uintptr(s), uintptr(how)) if r1 == socket_error { err = errnoErr(e1) } @@ -4505,7 +4720,7 @@ func shutdown(s Handle, how int32) (err error) { } func socket(af int32, typ int32, protocol int32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procsocket.Addr(), 3, uintptr(af), uintptr(typ), uintptr(protocol)) + r0, _, e1 := syscall.SyscallN(procsocket.Addr(), uintptr(af), uintptr(typ), uintptr(protocol)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -4514,7 +4729,7 @@ func socket(af int32, typ int32, protocol int32) (handle Handle, err error) { } func WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessions **WTS_SESSION_INFO, count *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procWTSEnumerateSessionsW.Addr(), 5, uintptr(handle), uintptr(reserved), uintptr(version), uintptr(unsafe.Pointer(sessions)), uintptr(unsafe.Pointer(count)), 0) + r1, _, e1 := syscall.SyscallN(procWTSEnumerateSessionsW.Addr(), uintptr(handle), uintptr(reserved), uintptr(version), uintptr(unsafe.Pointer(sessions)), uintptr(unsafe.Pointer(count))) if r1 == 0 { err = errnoErr(e1) } @@ -4522,12 +4737,12 @@ func WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessio } func WTSFreeMemory(ptr uintptr) { - syscall.Syscall(procWTSFreeMemory.Addr(), 1, uintptr(ptr), 0, 0) + syscall.SyscallN(procWTSFreeMemory.Addr(), uintptr(ptr)) return } func WTSQueryUserToken(session uint32, token *Token) (err error) { - r1, _, e1 := syscall.Syscall(procWTSQueryUserToken.Addr(), 2, uintptr(session), uintptr(unsafe.Pointer(token)), 0) + r1, _, e1 := syscall.SyscallN(procWTSQueryUserToken.Addr(), uintptr(session), uintptr(unsafe.Pointer(token))) if r1 == 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/text/LICENSE b/vendor/golang.org/x/text/LICENSE index 6a66aea5..2a7cf70d 100644 --- a/vendor/golang.org/x/text/LICENSE +++ b/vendor/golang.org/x/text/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/text/unicode/bidi/core.go b/vendor/golang.org/x/text/unicode/bidi/core.go index 9d2ae547..fb827323 100644 --- a/vendor/golang.org/x/text/unicode/bidi/core.go +++ b/vendor/golang.org/x/text/unicode/bidi/core.go @@ -427,13 +427,6 @@ type isolatingRunSequence struct { func (i *isolatingRunSequence) Len() int { return len(i.indexes) } -func maxLevel(a, b level) level { - if a > b { - return a - } - return b -} - // Rule X10, second bullet: Determine the start-of-sequence (sos) and end-of-sequence (eos) types, // either L or R, for each isolating run sequence. func (p *paragraph) isolatingRunSequence(indexes []int) *isolatingRunSequence { @@ -474,8 +467,8 @@ func (p *paragraph) isolatingRunSequence(indexes []int) *isolatingRunSequence { indexes: indexes, types: types, level: level, - sos: typeForLevel(maxLevel(prevLevel, level)), - eos: typeForLevel(maxLevel(succLevel, level)), + sos: typeForLevel(max(prevLevel, level)), + eos: typeForLevel(max(succLevel, level)), } } diff --git a/vendor/golang.org/x/time/LICENSE b/vendor/golang.org/x/time/LICENSE index 6a66aea5..2a7cf70d 100644 --- a/vendor/golang.org/x/time/LICENSE +++ b/vendor/golang.org/x/time/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index 8f6c7f49..563270c1 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -85,7 +85,7 @@ func (lim *Limiter) Burst() int { // TokensAt returns the number of tokens available at time t. func (lim *Limiter) TokensAt(t time.Time) float64 { lim.mu.Lock() - _, tokens := lim.advance(t) // does not mutate lim + tokens := lim.advance(t) // does not mutate lim lim.mu.Unlock() return tokens } @@ -99,8 +99,9 @@ func (lim *Limiter) Tokens() float64 { // bursts of at most b tokens. func NewLimiter(r Limit, b int) *Limiter { return &Limiter{ - limit: r, - burst: b, + limit: r, + burst: b, + tokens: float64(b), } } @@ -185,7 +186,7 @@ func (r *Reservation) CancelAt(t time.Time) { return } // advance time to now - t, tokens := r.lim.advance(t) + tokens := r.lim.advance(t) // calculate new number of tokens tokens += restoreTokens if burst := float64(r.lim.burst); tokens > burst { @@ -194,7 +195,7 @@ func (r *Reservation) CancelAt(t time.Time) { // update state r.lim.last = t r.lim.tokens = tokens - if r.timeToAct == r.lim.lastEvent { + if r.timeToAct.Equal(r.lim.lastEvent) { prevEvent := r.timeToAct.Add(r.limit.durationFromTokens(float64(-r.tokens))) if !prevEvent.Before(t) { r.lim.lastEvent = prevEvent @@ -306,7 +307,7 @@ func (lim *Limiter) SetLimitAt(t time.Time, newLimit Limit) { lim.mu.Lock() defer lim.mu.Unlock() - t, tokens := lim.advance(t) + tokens := lim.advance(t) lim.last = t lim.tokens = tokens @@ -323,7 +324,7 @@ func (lim *Limiter) SetBurstAt(t time.Time, newBurst int) { lim.mu.Lock() defer lim.mu.Unlock() - t, tokens := lim.advance(t) + tokens := lim.advance(t) lim.last = t lim.tokens = tokens @@ -344,21 +345,9 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration) tokens: n, timeToAct: t, } - } else if lim.limit == 0 { - var ok bool - if lim.burst >= n { - ok = true - lim.burst -= n - } - return Reservation{ - ok: ok, - lim: lim, - tokens: lim.burst, - timeToAct: t, - } } - t, tokens := lim.advance(t) + tokens := lim.advance(t) // Calculate the remaining number of tokens resulting from the request. tokens -= float64(n) @@ -391,10 +380,11 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration) return r } -// advance calculates and returns an updated state for lim resulting from the passage of time. +// advance calculates and returns an updated number of tokens for lim +// resulting from the passage of time. // lim is not changed. // advance requires that lim.mu is held. -func (lim *Limiter) advance(t time.Time) (newT time.Time, newTokens float64) { +func (lim *Limiter) advance(t time.Time) (newTokens float64) { last := lim.last if t.Before(last) { last = t @@ -407,7 +397,7 @@ func (lim *Limiter) advance(t time.Time) (newT time.Time, newTokens float64) { if burst := float64(lim.burst); tokens > burst { tokens = burst } - return t, tokens + return tokens } // durationFromTokens is a unit conversion function from the number of tokens to the duration @@ -416,8 +406,15 @@ func (limit Limit) durationFromTokens(tokens float64) time.Duration { if limit <= 0 { return InfDuration } - seconds := tokens / float64(limit) - return time.Duration(float64(time.Second) * seconds) + + duration := (tokens / float64(limit)) * float64(time.Second) + + // Cap the duration to the maximum representable int64 value, to avoid overflow. + if duration > float64(math.MaxInt64) { + return InfDuration + } + + return time.Duration(duration) } // tokensFromDuration is a unit conversion function from a time duration to the number of tokens diff --git a/vendor/golang.org/x/time/rate/sometimes.go b/vendor/golang.org/x/time/rate/sometimes.go index 6ba99ddb..9b839326 100644 --- a/vendor/golang.org/x/time/rate/sometimes.go +++ b/vendor/golang.org/x/time/rate/sometimes.go @@ -61,7 +61,9 @@ func (s *Sometimes) Do(f func()) { (s.Every > 0 && s.count%s.Every == 0) || (s.Interval > 0 && time.Since(s.last) >= s.Interval) { f() - s.last = time.Now() + if s.Interval > 0 { + s.last = time.Now() + } } s.count++ } diff --git a/vendor/golang.org/x/tools/LICENSE b/vendor/golang.org/x/tools/LICENSE index 6a66aea5..2a7cf70d 100644 --- a/vendor/golang.org/x/tools/LICENSE +++ b/vendor/golang.org/x/tools/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go index 2c4c4e23..0fb4e7ee 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go @@ -106,8 +106,21 @@ func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Nod // Does augmented child strictly contain [start, end)? if augPos <= start && end <= augEnd { - _, isToken := child.(tokenNode) - return isToken || visit(child) + if is[tokenNode](child) { + return true + } + + // childrenOf elides the FuncType node beneath FuncDecl. + // Add it back here for TypeParams, Params, Results, + // all FieldLists). But we don't add it back for the "func" token + // even though it is the tree at FuncDecl.Type.Func. + if decl, ok := node.(*ast.FuncDecl); ok { + if fields, ok := child.(*ast.FieldList); ok && fields != decl.Recv { + path = append(path, decl.Type) + } + } + + return visit(child) } // Does [start, end) overlap multiple children? @@ -194,6 +207,9 @@ func childrenOf(n ast.Node) []ast.Node { return false // no recursion }) + // TODO(adonovan): be more careful about missing (!Pos.Valid) + // tokens in trees produced from invalid input. + // Then add fake Nodes for bare tokens. switch n := n.(type) { case *ast.ArrayType: @@ -213,9 +229,12 @@ func childrenOf(n ast.Node) []ast.Node { children = append(children, tok(n.OpPos, len(n.Op.String()))) case *ast.BlockStmt: - children = append(children, - tok(n.Lbrace, len("{")), - tok(n.Rbrace, len("}"))) + if n.Lbrace.IsValid() { + children = append(children, tok(n.Lbrace, len("{"))) + } + if n.Rbrace.IsValid() { + children = append(children, tok(n.Rbrace, len("}"))) + } case *ast.BranchStmt: children = append(children, @@ -291,9 +310,12 @@ func childrenOf(n ast.Node) []ast.Node { // TODO(adonovan): Field.{Doc,Comment,Tag}? case *ast.FieldList: - children = append(children, - tok(n.Opening, len("(")), // or len("[") - tok(n.Closing, len(")"))) // or len("]") + if n.Opening.IsValid() { + children = append(children, tok(n.Opening, len("("))) + } + if n.Closing.IsValid() { + children = append(children, tok(n.Closing, len(")"))) + } case *ast.File: // TODO test: Doc @@ -313,6 +335,8 @@ func childrenOf(n ast.Node) []ast.Node { // // As a workaround, we inline the case for FuncType // here and order things correctly. + // We also need to insert the elided FuncType just + // before the 'visit' recursion. // children = nil // discard ast.Walk(FuncDecl) info subtrees children = append(children, tok(n.Type.Func, len("func"))) @@ -632,3 +656,8 @@ func NodeDescription(n ast.Node) string { } panic(fmt.Sprintf("unexpected node type: %T", n)) } + +func is[T any](x any) bool { + _, ok := x.(T) + return ok +} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/imports.go b/vendor/golang.org/x/tools/go/ast/astutil/imports.go index 18d1adb0..5bacc0fa 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/imports.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/imports.go @@ -9,6 +9,7 @@ import ( "fmt" "go/ast" "go/token" + "slices" "strconv" "strings" ) @@ -186,7 +187,7 @@ func AddNamedImport(fset *token.FileSet, f *ast.File, name, path string) (added spec.(*ast.ImportSpec).Path.ValuePos = first.Pos() first.Specs = append(first.Specs, spec) } - f.Decls = append(f.Decls[:i], f.Decls[i+1:]...) + f.Decls = slices.Delete(f.Decls, i, i+1) i-- } @@ -208,48 +209,46 @@ func DeleteImport(fset *token.FileSet, f *ast.File, path string) (deleted bool) // DeleteNamedImport deletes the import with the given name and path from the file f, if present. // If there are duplicate import declarations, all matching ones are deleted. func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (deleted bool) { - var delspecs []*ast.ImportSpec - var delcomments []*ast.CommentGroup + var ( + delspecs = make(map[*ast.ImportSpec]bool) + delcomments = make(map[*ast.CommentGroup]bool) + ) // Find the import nodes that import path, if any. for i := 0; i < len(f.Decls); i++ { - decl := f.Decls[i] - gen, ok := decl.(*ast.GenDecl) + gen, ok := f.Decls[i].(*ast.GenDecl) if !ok || gen.Tok != token.IMPORT { continue } for j := 0; j < len(gen.Specs); j++ { - spec := gen.Specs[j] - impspec := spec.(*ast.ImportSpec) + impspec := gen.Specs[j].(*ast.ImportSpec) if importName(impspec) != name || importPath(impspec) != path { continue } // We found an import spec that imports path. // Delete it. - delspecs = append(delspecs, impspec) + delspecs[impspec] = true deleted = true - copy(gen.Specs[j:], gen.Specs[j+1:]) - gen.Specs = gen.Specs[:len(gen.Specs)-1] + gen.Specs = slices.Delete(gen.Specs, j, j+1) // If this was the last import spec in this decl, // delete the decl, too. if len(gen.Specs) == 0 { - copy(f.Decls[i:], f.Decls[i+1:]) - f.Decls = f.Decls[:len(f.Decls)-1] + f.Decls = slices.Delete(f.Decls, i, i+1) i-- break } else if len(gen.Specs) == 1 { if impspec.Doc != nil { - delcomments = append(delcomments, impspec.Doc) + delcomments[impspec.Doc] = true } if impspec.Comment != nil { - delcomments = append(delcomments, impspec.Comment) + delcomments[impspec.Comment] = true } for _, cg := range f.Comments { // Found comment on the same line as the import spec. if cg.End() < impspec.Pos() && fset.Position(cg.End()).Line == fset.Position(impspec.Pos()).Line { - delcomments = append(delcomments, cg) + delcomments[cg] = true break } } @@ -293,38 +292,21 @@ func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (del } // Delete imports from f.Imports. - for i := 0; i < len(f.Imports); i++ { - imp := f.Imports[i] - for j, del := range delspecs { - if imp == del { - copy(f.Imports[i:], f.Imports[i+1:]) - f.Imports = f.Imports[:len(f.Imports)-1] - copy(delspecs[j:], delspecs[j+1:]) - delspecs = delspecs[:len(delspecs)-1] - i-- - break - } - } + before := len(f.Imports) + f.Imports = slices.DeleteFunc(f.Imports, func(imp *ast.ImportSpec) bool { + _, ok := delspecs[imp] + return ok + }) + if len(f.Imports)+len(delspecs) != before { + // This can happen when the AST is invalid (i.e. imports differ between f.Decls and f.Imports). + panic(fmt.Sprintf("deleted specs from Decls but not Imports: %v", delspecs)) } // Delete comments from f.Comments. - for i := 0; i < len(f.Comments); i++ { - cg := f.Comments[i] - for j, del := range delcomments { - if cg == del { - copy(f.Comments[i:], f.Comments[i+1:]) - f.Comments = f.Comments[:len(f.Comments)-1] - copy(delcomments[j:], delcomments[j+1:]) - delcomments = delcomments[:len(delcomments)-1] - i-- - break - } - } - } - - if len(delspecs) > 0 { - panic(fmt.Sprintf("deleted specs from Decls but not Imports: %v", delspecs)) - } + f.Comments = slices.DeleteFunc(f.Comments, func(cg *ast.CommentGroup) bool { + _, ok := delcomments[cg] + return ok + }) return } @@ -344,7 +326,12 @@ func RewriteImport(fset *token.FileSet, f *ast.File, oldPath, newPath string) (r } // UsesImport reports whether a given import is used. +// The provided File must have been parsed with syntactic object resolution +// (not using go/parser.SkipObjectResolution). func UsesImport(f *ast.File, path string) (used bool) { + if f.Scope == nil { + panic("file f was not parsed with syntactic object resolution") + } spec := importSpec(f, path) if spec == nil { return diff --git a/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go index 58934f76..4ad05493 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go @@ -67,6 +67,10 @@ var abort = new(int) // singleton, to signal termination of Apply // // The methods Replace, Delete, InsertBefore, and InsertAfter // can be used to change the AST without disrupting Apply. +// +// This type is not to be confused with [inspector.Cursor] from +// package [golang.org/x/tools/go/ast/inspector], which provides +// stateless navigation of immutable syntax trees. type Cursor struct { parent ast.Node name string @@ -183,7 +187,7 @@ type application struct { func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast.Node) { // convert typed nil into untyped nil - if v := reflect.ValueOf(n); v.Kind() == reflect.Ptr && v.IsNil() { + if v := reflect.ValueOf(n); v.Kind() == reflect.Pointer && v.IsNil() { n = nil } diff --git a/vendor/golang.org/x/tools/go/ast/astutil/util.go b/vendor/golang.org/x/tools/go/ast/astutil/util.go index 919d5305..c820b208 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/util.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/util.go @@ -7,12 +7,7 @@ package astutil import "go/ast" // Unparen returns e with any enclosing parentheses stripped. -func Unparen(e ast.Expr) ast.Expr { - for { - p, ok := e.(*ast.ParenExpr) - if !ok { - return e - } - e = p.X - } -} +// Deprecated: use [ast.Unparen]. +// +//go:fix inline +func Unparen(e ast.Expr) ast.Expr { return ast.Unparen(e) } diff --git a/vendor/golang.org/x/tools/go/buildutil/allpackages.go b/vendor/golang.org/x/tools/go/buildutil/allpackages.go index dfb8cd6c..8a7f0fcc 100644 --- a/vendor/golang.org/x/tools/go/buildutil/allpackages.go +++ b/vendor/golang.org/x/tools/go/buildutil/allpackages.go @@ -52,7 +52,6 @@ func ForEachPackage(ctxt *build.Context, found func(importPath string, err error var wg sync.WaitGroup for _, root := range ctxt.SrcDirs() { - root := root wg.Add(1) go func() { allPackages(ctxt, root, ch) @@ -107,7 +106,6 @@ func allPackages(ctxt *build.Context, root string, ch chan<- item) { ch <- item{pkg, err} } for _, fi := range files { - fi := fi if fi.IsDir() { wg.Add(1) go func() { @@ -177,7 +175,7 @@ func ExpandPatterns(ctxt *build.Context, patterns []string) map[string]bool { for _, pkg := range all { doPkg(pkg, neg) } - } else if dir := strings.TrimSuffix(arg, "/..."); dir != arg { + } else if dir, ok := strings.CutSuffix(arg, "/..."); ok { // dir/... matches all packages beneath dir for _, pkg := range all { if strings.HasPrefix(pkg, dir) && diff --git a/vendor/golang.org/x/tools/go/buildutil/fakecontext.go b/vendor/golang.org/x/tools/go/buildutil/fakecontext.go index 763d1880..1f75141d 100644 --- a/vendor/golang.org/x/tools/go/buildutil/fakecontext.go +++ b/vendor/golang.org/x/tools/go/buildutil/fakecontext.go @@ -95,7 +95,7 @@ func (s byName) Less(i, j int) bool { return s[i].Name() < s[j].Name() } type fakeFileInfo string func (fi fakeFileInfo) Name() string { return string(fi) } -func (fakeFileInfo) Sys() interface{} { return nil } +func (fakeFileInfo) Sys() any { return nil } func (fakeFileInfo) ModTime() time.Time { return time.Time{} } func (fakeFileInfo) IsDir() bool { return false } func (fakeFileInfo) Size() int64 { return 0 } @@ -104,7 +104,7 @@ func (fakeFileInfo) Mode() os.FileMode { return 0644 } type fakeDirInfo string func (fd fakeDirInfo) Name() string { return string(fd) } -func (fakeDirInfo) Sys() interface{} { return nil } +func (fakeDirInfo) Sys() any { return nil } func (fakeDirInfo) ModTime() time.Time { return time.Time{} } func (fakeDirInfo) IsDir() bool { return true } func (fakeDirInfo) Size() int64 { return 0 } diff --git a/vendor/golang.org/x/tools/go/buildutil/tags.go b/vendor/golang.org/x/tools/go/buildutil/tags.go index 32c8d142..f66cd5df 100644 --- a/vendor/golang.org/x/tools/go/buildutil/tags.go +++ b/vendor/golang.org/x/tools/go/buildutil/tags.go @@ -43,7 +43,7 @@ func (v *TagsFlag) Set(s string) error { // Starting in Go 1.13, the -tags flag is a comma-separated list of build tags. *v = []string{} - for _, s := range strings.Split(s, ",") { + for s := range strings.SplitSeq(s, ",") { if s != "" { *v = append(*v, s) } @@ -51,7 +51,7 @@ func (v *TagsFlag) Set(s string) error { return nil } -func (v *TagsFlag) Get() interface{} { return *v } +func (v *TagsFlag) Get() any { return *v } func splitQuotedFields(s string) ([]string, error) { // See $GOROOT/src/cmd/internal/quoted/quoted.go (Split) diff --git a/vendor/golang.org/x/tools/go/internal/cgo/cgo.go b/vendor/golang.org/x/tools/go/internal/cgo/cgo.go index 697974bb..735efeb5 100644 --- a/vendor/golang.org/x/tools/go/internal/cgo/cgo.go +++ b/vendor/golang.org/x/tools/go/internal/cgo/cgo.go @@ -203,7 +203,7 @@ func envList(key, def string) []string { // stringList's arguments should be a sequence of string or []string values. // stringList flattens them into a single []string. -func stringList(args ...interface{}) []string { +func stringList(args ...any) []string { var x []string for _, arg := range args { switch arg := arg.(type) { diff --git a/vendor/golang.org/x/tools/go/loader/doc.go b/vendor/golang.org/x/tools/go/loader/doc.go index e35b1fd7..769a1fcf 100644 --- a/vendor/golang.org/x/tools/go/loader/doc.go +++ b/vendor/golang.org/x/tools/go/loader/doc.go @@ -164,7 +164,7 @@ package loader // entry is created in this cache by startLoad the first time the // package is imported. The first goroutine to request an entry becomes // responsible for completing the task and broadcasting completion to -// subsequent requestors, which block until then. +// subsequent requesters, which block until then. // // Type checking occurs in (parallel) postorder: we cannot type-check a // set of files until we have loaded and type-checked all of their diff --git a/vendor/golang.org/x/tools/go/loader/loader.go b/vendor/golang.org/x/tools/go/loader/loader.go index 013c0f50..9c5f7db1 100644 --- a/vendor/golang.org/x/tools/go/loader/loader.go +++ b/vendor/golang.org/x/tools/go/loader/loader.go @@ -23,7 +23,6 @@ import ( "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/go/internal/cgo" - "golang.org/x/tools/internal/versions" ) var ignoreVendor build.ImportMode @@ -216,7 +215,7 @@ func (conf *Config) fset() *token.FileSet { // src specifies the parser input as a string, []byte, or io.Reader, and // filename is its apparent name. If src is nil, the contents of // filename are read from the file system. -func (conf *Config) ParseFile(filename string, src interface{}) (*ast.File, error) { +func (conf *Config) ParseFile(filename string, src any) (*ast.File, error) { // TODO(adonovan): use conf.build() etc like parseFiles does. return parser.ParseFile(conf.fset(), filename, src, conf.ParserMode) } @@ -341,13 +340,7 @@ func (conf *Config) addImport(path string, tests bool) { func (prog *Program) PathEnclosingInterval(start, end token.Pos) (pkg *PackageInfo, path []ast.Node, exact bool) { for _, info := range prog.AllPackages { for _, f := range info.Files { - if f.Pos() == token.NoPos { - // This can happen if the parser saw - // too many errors and bailed out. - // (Use parser.AllErrors to prevent that.) - continue - } - if !tokenFileContainsPos(prog.Fset.File(f.Pos()), start) { + if !tokenFileContainsPos(prog.Fset.File(f.FileStart), start) { continue } if path, exact := astutil.PathEnclosingInterval(f, start, end); path != nil { @@ -1029,18 +1022,18 @@ func (imp *importer) newPackageInfo(path, dir string) *PackageInfo { info := &PackageInfo{ Pkg: pkg, Info: types.Info{ - Types: make(map[ast.Expr]types.TypeAndValue), - Defs: make(map[*ast.Ident]types.Object), - Uses: make(map[*ast.Ident]types.Object), - Implicits: make(map[ast.Node]types.Object), - Instances: make(map[*ast.Ident]types.Instance), - Scopes: make(map[ast.Node]*types.Scope), - Selections: make(map[*ast.SelectorExpr]*types.Selection), + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Instances: make(map[*ast.Ident]types.Instance), + Scopes: make(map[ast.Node]*types.Scope), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + FileVersions: make(map[*ast.File]string), }, errorFunc: imp.conf.TypeChecker.Error, dir: dir, } - versions.InitFileVersions(&info.Info) // Copy the types.Config so we can vary it across PackageInfos. tc := imp.conf.TypeChecker diff --git a/vendor/golang.org/x/tools/imports/forward.go b/vendor/golang.org/x/tools/imports/forward.go new file mode 100644 index 00000000..5d120d07 --- /dev/null +++ b/vendor/golang.org/x/tools/imports/forward.go @@ -0,0 +1,77 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package imports implements a Go pretty-printer (like package "go/format") +// that also adds or removes import statements as necessary. +package imports // import "golang.org/x/tools/imports" + +import ( + "log" + "os" + + "golang.org/x/tools/internal/gocommand" + intimp "golang.org/x/tools/internal/imports" +) + +// Options specifies options for processing files. +type Options struct { + Fragment bool // Accept fragment of a source file (no package statement) + AllErrors bool // Report all errors (not just the first 10 on different lines) + + Comments bool // Print comments (true if nil *Options provided) + TabIndent bool // Use tabs for indent (true if nil *Options provided) + TabWidth int // Tab width (8 if nil *Options provided) + + FormatOnly bool // Disable the insertion and deletion of imports +} + +// Debug controls verbose logging. +var Debug = false + +// LocalPrefix is a comma-separated string of import path prefixes, which, if +// set, instructs Process to sort the import paths with the given prefixes +// into another group after 3rd-party packages. +var LocalPrefix string + +// Process formats and adjusts imports for the provided file. +// If opt is nil the defaults are used, and if src is nil the source +// is read from the filesystem. +// +// Note that filename's directory influences which imports can be chosen, +// so it is important that filename be accurate. +// To process data “as if” it were in filename, pass the data as a non-nil src. +func Process(filename string, src []byte, opt *Options) ([]byte, error) { + var err error + if src == nil { + src, err = os.ReadFile(filename) + if err != nil { + return nil, err + } + } + if opt == nil { + opt = &Options{Comments: true, TabIndent: true, TabWidth: 8} + } + intopt := &intimp.Options{ + Env: &intimp.ProcessEnv{ + GocmdRunner: &gocommand.Runner{}, + }, + LocalPrefix: LocalPrefix, + AllErrors: opt.AllErrors, + Comments: opt.Comments, + FormatOnly: opt.FormatOnly, + Fragment: opt.Fragment, + TabIndent: opt.TabIndent, + TabWidth: opt.TabWidth, + } + if Debug { + intopt.Env.Logf = log.Printf + } + return intimp.Process(filename, src, intopt) +} + +// VendorlessPath returns the devendorized version of the import path ipath. +// For example, VendorlessPath("foo/barbendor/a/b") return "a/b". +func VendorlessPath(ipath string) string { + return intimp.VendorlessPath(ipath) +} diff --git a/vendor/golang.org/x/tools/internal/event/core/event.go b/vendor/golang.org/x/tools/internal/event/core/event.go new file mode 100644 index 00000000..ade5d1e7 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/core/event.go @@ -0,0 +1,80 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package core provides support for event based telemetry. +package core + +import ( + "fmt" + "time" + + "golang.org/x/tools/internal/event/label" +) + +// Event holds the information about an event of note that occurred. +type Event struct { + at time.Time + + // As events are often on the stack, storing the first few labels directly + // in the event can avoid an allocation at all for the very common cases of + // simple events. + // The length needs to be large enough to cope with the majority of events + // but no so large as to cause undue stack pressure. + // A log message with two values will use 3 labels (one for each value and + // one for the message itself). + + static [3]label.Label // inline storage for the first few labels + dynamic []label.Label // dynamically sized storage for remaining labels +} + +func (ev Event) At() time.Time { return ev.at } + +func (ev Event) Format(f fmt.State, r rune) { + if !ev.at.IsZero() { + fmt.Fprint(f, ev.at.Format("2006/01/02 15:04:05 ")) + } + for index := 0; ev.Valid(index); index++ { + if l := ev.Label(index); l.Valid() { + fmt.Fprintf(f, "\n\t%v", l) + } + } +} + +func (ev Event) Valid(index int) bool { + return index >= 0 && index < len(ev.static)+len(ev.dynamic) +} + +func (ev Event) Label(index int) label.Label { + if index < len(ev.static) { + return ev.static[index] + } + return ev.dynamic[index-len(ev.static)] +} + +func (ev Event) Find(key label.Key) label.Label { + for _, l := range ev.static { + if l.Key() == key { + return l + } + } + for _, l := range ev.dynamic { + if l.Key() == key { + return l + } + } + return label.Label{} +} + +func MakeEvent(static [3]label.Label, labels []label.Label) Event { + return Event{ + static: static, + dynamic: labels, + } +} + +// CloneEvent event returns a copy of the event with the time adjusted to at. +func CloneEvent(ev Event, at time.Time) Event { + ev.at = at + return ev +} diff --git a/vendor/golang.org/x/tools/internal/event/core/export.go b/vendor/golang.org/x/tools/internal/event/core/export.go new file mode 100644 index 00000000..05f3a9a5 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/core/export.go @@ -0,0 +1,70 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package core + +import ( + "context" + "sync/atomic" + "time" + "unsafe" + + "golang.org/x/tools/internal/event/label" +) + +// Exporter is a function that handles events. +// It may return a modified context and event. +type Exporter func(context.Context, Event, label.Map) context.Context + +var ( + exporter unsafe.Pointer +) + +// SetExporter sets the global exporter function that handles all events. +// The exporter is called synchronously from the event call site, so it should +// return quickly so as not to hold up user code. +func SetExporter(e Exporter) { + p := unsafe.Pointer(&e) + if e == nil { + // &e is always valid, and so p is always valid, but for the early abort + // of ProcessEvent to be efficient it needs to make the nil check on the + // pointer without having to dereference it, so we make the nil function + // also a nil pointer + p = nil + } + atomic.StorePointer(&exporter, p) +} + +// deliver is called to deliver an event to the supplied exporter. +// it will fill in the time. +func deliver(ctx context.Context, exporter Exporter, ev Event) context.Context { + // add the current time to the event + ev.at = time.Now() + // hand the event off to the current exporter + return exporter(ctx, ev, ev) +} + +// Export is called to deliver an event to the global exporter if set. +func Export(ctx context.Context, ev Event) context.Context { + // get the global exporter and abort early if there is not one + exporterPtr := (*Exporter)(atomic.LoadPointer(&exporter)) + if exporterPtr == nil { + return ctx + } + return deliver(ctx, *exporterPtr, ev) +} + +// ExportPair is called to deliver a start event to the supplied exporter. +// It also returns a function that will deliver the end event to the same +// exporter. +// It will fill in the time. +func ExportPair(ctx context.Context, begin, end Event) (context.Context, func()) { + // get the global exporter and abort early if there is not one + exporterPtr := (*Exporter)(atomic.LoadPointer(&exporter)) + if exporterPtr == nil { + return ctx, func() {} + } + ctx = deliver(ctx, *exporterPtr, begin) + return ctx, func() { deliver(ctx, *exporterPtr, end) } +} diff --git a/vendor/golang.org/x/tools/internal/event/core/fast.go b/vendor/golang.org/x/tools/internal/event/core/fast.go new file mode 100644 index 00000000..06c1d461 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/core/fast.go @@ -0,0 +1,77 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package core + +import ( + "context" + + "golang.org/x/tools/internal/event/keys" + "golang.org/x/tools/internal/event/label" +) + +// Log1 takes a message and one label delivers a log event to the exporter. +// It is a customized version of Print that is faster and does no allocation. +func Log1(ctx context.Context, message string, t1 label.Label) { + Export(ctx, MakeEvent([3]label.Label{ + keys.Msg.Of(message), + t1, + }, nil)) +} + +// Log2 takes a message and two labels and delivers a log event to the exporter. +// It is a customized version of Print that is faster and does no allocation. +func Log2(ctx context.Context, message string, t1 label.Label, t2 label.Label) { + Export(ctx, MakeEvent([3]label.Label{ + keys.Msg.Of(message), + t1, + t2, + }, nil)) +} + +// Metric1 sends a label event to the exporter with the supplied labels. +func Metric1(ctx context.Context, t1 label.Label) context.Context { + return Export(ctx, MakeEvent([3]label.Label{ + keys.Metric.New(), + t1, + }, nil)) +} + +// Metric2 sends a label event to the exporter with the supplied labels. +func Metric2(ctx context.Context, t1, t2 label.Label) context.Context { + return Export(ctx, MakeEvent([3]label.Label{ + keys.Metric.New(), + t1, + t2, + }, nil)) +} + +// Start1 sends a span start event with the supplied label list to the exporter. +// It also returns a function that will end the span, which should normally be +// deferred. +func Start1(ctx context.Context, name string, t1 label.Label) (context.Context, func()) { + return ExportPair(ctx, + MakeEvent([3]label.Label{ + keys.Start.Of(name), + t1, + }, nil), + MakeEvent([3]label.Label{ + keys.End.New(), + }, nil)) +} + +// Start2 sends a span start event with the supplied label list to the exporter. +// It also returns a function that will end the span, which should normally be +// deferred. +func Start2(ctx context.Context, name string, t1, t2 label.Label) (context.Context, func()) { + return ExportPair(ctx, + MakeEvent([3]label.Label{ + keys.Start.Of(name), + t1, + t2, + }, nil), + MakeEvent([3]label.Label{ + keys.End.New(), + }, nil)) +} diff --git a/vendor/golang.org/x/tools/internal/event/doc.go b/vendor/golang.org/x/tools/internal/event/doc.go new file mode 100644 index 00000000..5dc6e6ba --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/doc.go @@ -0,0 +1,7 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package event provides a set of packages that cover the main +// concepts of telemetry in an implementation agnostic way. +package event diff --git a/vendor/golang.org/x/tools/internal/event/event.go b/vendor/golang.org/x/tools/internal/event/event.go new file mode 100644 index 00000000..4d55e577 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/event.go @@ -0,0 +1,127 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package event + +import ( + "context" + + "golang.org/x/tools/internal/event/core" + "golang.org/x/tools/internal/event/keys" + "golang.org/x/tools/internal/event/label" +) + +// Exporter is a function that handles events. +// It may return a modified context and event. +type Exporter func(context.Context, core.Event, label.Map) context.Context + +// SetExporter sets the global exporter function that handles all events. +// The exporter is called synchronously from the event call site, so it should +// return quickly so as not to hold up user code. +func SetExporter(e Exporter) { + core.SetExporter(core.Exporter(e)) +} + +// Log takes a message and a label list and combines them into a single event +// before delivering them to the exporter. +func Log(ctx context.Context, message string, labels ...label.Label) { + core.Export(ctx, core.MakeEvent([3]label.Label{ + keys.Msg.Of(message), + }, labels)) +} + +// IsLog returns true if the event was built by the Log function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsLog(ev core.Event) bool { + return ev.Label(0).Key() == keys.Msg +} + +// Error takes a message and a label list and combines them into a single event +// before delivering them to the exporter. It captures the error in the +// delivered event. +func Error(ctx context.Context, message string, err error, labels ...label.Label) { + core.Export(ctx, core.MakeEvent([3]label.Label{ + keys.Msg.Of(message), + keys.Err.Of(err), + }, labels)) +} + +// IsError returns true if the event was built by the Error function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsError(ev core.Event) bool { + return ev.Label(0).Key() == keys.Msg && + ev.Label(1).Key() == keys.Err +} + +// Metric sends a label event to the exporter with the supplied labels. +func Metric(ctx context.Context, labels ...label.Label) { + core.Export(ctx, core.MakeEvent([3]label.Label{ + keys.Metric.New(), + }, labels)) +} + +// IsMetric returns true if the event was built by the Metric function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsMetric(ev core.Event) bool { + return ev.Label(0).Key() == keys.Metric +} + +// Label sends a label event to the exporter with the supplied labels. +func Label(ctx context.Context, labels ...label.Label) context.Context { + return core.Export(ctx, core.MakeEvent([3]label.Label{ + keys.Label.New(), + }, labels)) +} + +// IsLabel returns true if the event was built by the Label function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsLabel(ev core.Event) bool { + return ev.Label(0).Key() == keys.Label +} + +// Start sends a span start event with the supplied label list to the exporter. +// It also returns a function that will end the span, which should normally be +// deferred. +func Start(ctx context.Context, name string, labels ...label.Label) (context.Context, func()) { + return core.ExportPair(ctx, + core.MakeEvent([3]label.Label{ + keys.Start.Of(name), + }, labels), + core.MakeEvent([3]label.Label{ + keys.End.New(), + }, nil)) +} + +// IsStart returns true if the event was built by the Start function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsStart(ev core.Event) bool { + return ev.Label(0).Key() == keys.Start +} + +// IsEnd returns true if the event was built by the End function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsEnd(ev core.Event) bool { + return ev.Label(0).Key() == keys.End +} + +// Detach returns a context without an associated span. +// This allows the creation of spans that are not children of the current span. +func Detach(ctx context.Context) context.Context { + return core.Export(ctx, core.MakeEvent([3]label.Label{ + keys.Detach.New(), + }, nil)) +} + +// IsDetach returns true if the event was built by the Detach function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsDetach(ev core.Event) bool { + return ev.Label(0).Key() == keys.Detach +} diff --git a/vendor/golang.org/x/tools/internal/event/keys/keys.go b/vendor/golang.org/x/tools/internal/event/keys/keys.go new file mode 100644 index 00000000..4cfa51b6 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/keys/keys.go @@ -0,0 +1,564 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package keys + +import ( + "fmt" + "io" + "math" + "strconv" + + "golang.org/x/tools/internal/event/label" +) + +// Value represents a key for untyped values. +type Value struct { + name string + description string +} + +// New creates a new Key for untyped values. +func New(name, description string) *Value { + return &Value{name: name, description: description} +} + +func (k *Value) Name() string { return k.name } +func (k *Value) Description() string { return k.description } + +func (k *Value) Format(w io.Writer, buf []byte, l label.Label) { + fmt.Fprint(w, k.From(l)) +} + +// Get can be used to get a label for the key from a label.Map. +func (k *Value) Get(lm label.Map) any { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return nil +} + +// From can be used to get a value from a Label. +func (k *Value) From(t label.Label) any { return t.UnpackValue() } + +// Of creates a new Label with this key and the supplied value. +func (k *Value) Of(value any) label.Label { return label.OfValue(k, value) } + +// Tag represents a key for tagging labels that have no value. +// These are used when the existence of the label is the entire information it +// carries, such as marking events to be of a specific kind, or from a specific +// package. +type Tag struct { + name string + description string +} + +// NewTag creates a new Key for tagging labels. +func NewTag(name, description string) *Tag { + return &Tag{name: name, description: description} +} + +func (k *Tag) Name() string { return k.name } +func (k *Tag) Description() string { return k.description } + +func (k *Tag) Format(w io.Writer, buf []byte, l label.Label) {} + +// New creates a new Label with this key. +func (k *Tag) New() label.Label { return label.OfValue(k, nil) } + +// Int represents a key +type Int struct { + name string + description string +} + +// NewInt creates a new Key for int values. +func NewInt(name, description string) *Int { + return &Int{name: name, description: description} +} + +func (k *Int) Name() string { return k.name } +func (k *Int) Description() string { return k.description } + +func (k *Int) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Int) Of(v int) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Int) Get(lm label.Map) int { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Int) From(t label.Label) int { return int(t.Unpack64()) } + +// Int8 represents a key +type Int8 struct { + name string + description string +} + +// NewInt8 creates a new Key for int8 values. +func NewInt8(name, description string) *Int8 { + return &Int8{name: name, description: description} +} + +func (k *Int8) Name() string { return k.name } +func (k *Int8) Description() string { return k.description } + +func (k *Int8) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Int8) Of(v int8) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Int8) Get(lm label.Map) int8 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Int8) From(t label.Label) int8 { return int8(t.Unpack64()) } + +// Int16 represents a key +type Int16 struct { + name string + description string +} + +// NewInt16 creates a new Key for int16 values. +func NewInt16(name, description string) *Int16 { + return &Int16{name: name, description: description} +} + +func (k *Int16) Name() string { return k.name } +func (k *Int16) Description() string { return k.description } + +func (k *Int16) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Int16) Of(v int16) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Int16) Get(lm label.Map) int16 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Int16) From(t label.Label) int16 { return int16(t.Unpack64()) } + +// Int32 represents a key +type Int32 struct { + name string + description string +} + +// NewInt32 creates a new Key for int32 values. +func NewInt32(name, description string) *Int32 { + return &Int32{name: name, description: description} +} + +func (k *Int32) Name() string { return k.name } +func (k *Int32) Description() string { return k.description } + +func (k *Int32) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Int32) Of(v int32) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Int32) Get(lm label.Map) int32 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Int32) From(t label.Label) int32 { return int32(t.Unpack64()) } + +// Int64 represents a key +type Int64 struct { + name string + description string +} + +// NewInt64 creates a new Key for int64 values. +func NewInt64(name, description string) *Int64 { + return &Int64{name: name, description: description} +} + +func (k *Int64) Name() string { return k.name } +func (k *Int64) Description() string { return k.description } + +func (k *Int64) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendInt(buf, k.From(l), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Int64) Of(v int64) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Int64) Get(lm label.Map) int64 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Int64) From(t label.Label) int64 { return int64(t.Unpack64()) } + +// UInt represents a key +type UInt struct { + name string + description string +} + +// NewUInt creates a new Key for uint values. +func NewUInt(name, description string) *UInt { + return &UInt{name: name, description: description} +} + +func (k *UInt) Name() string { return k.name } +func (k *UInt) Description() string { return k.description } + +func (k *UInt) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *UInt) Of(v uint) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *UInt) Get(lm label.Map) uint { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *UInt) From(t label.Label) uint { return uint(t.Unpack64()) } + +// UInt8 represents a key +type UInt8 struct { + name string + description string +} + +// NewUInt8 creates a new Key for uint8 values. +func NewUInt8(name, description string) *UInt8 { + return &UInt8{name: name, description: description} +} + +func (k *UInt8) Name() string { return k.name } +func (k *UInt8) Description() string { return k.description } + +func (k *UInt8) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *UInt8) Of(v uint8) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *UInt8) Get(lm label.Map) uint8 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *UInt8) From(t label.Label) uint8 { return uint8(t.Unpack64()) } + +// UInt16 represents a key +type UInt16 struct { + name string + description string +} + +// NewUInt16 creates a new Key for uint16 values. +func NewUInt16(name, description string) *UInt16 { + return &UInt16{name: name, description: description} +} + +func (k *UInt16) Name() string { return k.name } +func (k *UInt16) Description() string { return k.description } + +func (k *UInt16) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *UInt16) Of(v uint16) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *UInt16) Get(lm label.Map) uint16 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *UInt16) From(t label.Label) uint16 { return uint16(t.Unpack64()) } + +// UInt32 represents a key +type UInt32 struct { + name string + description string +} + +// NewUInt32 creates a new Key for uint32 values. +func NewUInt32(name, description string) *UInt32 { + return &UInt32{name: name, description: description} +} + +func (k *UInt32) Name() string { return k.name } +func (k *UInt32) Description() string { return k.description } + +func (k *UInt32) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *UInt32) Of(v uint32) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *UInt32) Get(lm label.Map) uint32 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *UInt32) From(t label.Label) uint32 { return uint32(t.Unpack64()) } + +// UInt64 represents a key +type UInt64 struct { + name string + description string +} + +// NewUInt64 creates a new Key for uint64 values. +func NewUInt64(name, description string) *UInt64 { + return &UInt64{name: name, description: description} +} + +func (k *UInt64) Name() string { return k.name } +func (k *UInt64) Description() string { return k.description } + +func (k *UInt64) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendUint(buf, k.From(l), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *UInt64) Of(v uint64) label.Label { return label.Of64(k, v) } + +// Get can be used to get a label for the key from a label.Map. +func (k *UInt64) Get(lm label.Map) uint64 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *UInt64) From(t label.Label) uint64 { return t.Unpack64() } + +// Float32 represents a key +type Float32 struct { + name string + description string +} + +// NewFloat32 creates a new Key for float32 values. +func NewFloat32(name, description string) *Float32 { + return &Float32{name: name, description: description} +} + +func (k *Float32) Name() string { return k.name } +func (k *Float32) Description() string { return k.description } + +func (k *Float32) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendFloat(buf, float64(k.From(l)), 'E', -1, 32)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Float32) Of(v float32) label.Label { + return label.Of64(k, uint64(math.Float32bits(v))) +} + +// Get can be used to get a label for the key from a label.Map. +func (k *Float32) Get(lm label.Map) float32 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Float32) From(t label.Label) float32 { + return math.Float32frombits(uint32(t.Unpack64())) +} + +// Float64 represents a key +type Float64 struct { + name string + description string +} + +// NewFloat64 creates a new Key for int64 values. +func NewFloat64(name, description string) *Float64 { + return &Float64{name: name, description: description} +} + +func (k *Float64) Name() string { return k.name } +func (k *Float64) Description() string { return k.description } + +func (k *Float64) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendFloat(buf, k.From(l), 'E', -1, 64)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Float64) Of(v float64) label.Label { + return label.Of64(k, math.Float64bits(v)) +} + +// Get can be used to get a label for the key from a label.Map. +func (k *Float64) Get(lm label.Map) float64 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Float64) From(t label.Label) float64 { + return math.Float64frombits(t.Unpack64()) +} + +// String represents a key +type String struct { + name string + description string +} + +// NewString creates a new Key for int64 values. +func NewString(name, description string) *String { + return &String{name: name, description: description} +} + +func (k *String) Name() string { return k.name } +func (k *String) Description() string { return k.description } + +func (k *String) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendQuote(buf, k.From(l))) +} + +// Of creates a new Label with this key and the supplied value. +func (k *String) Of(v string) label.Label { return label.OfString(k, v) } + +// Get can be used to get a label for the key from a label.Map. +func (k *String) Get(lm label.Map) string { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return "" +} + +// From can be used to get a value from a Label. +func (k *String) From(t label.Label) string { return t.UnpackString() } + +// Boolean represents a key +type Boolean struct { + name string + description string +} + +// NewBoolean creates a new Key for bool values. +func NewBoolean(name, description string) *Boolean { + return &Boolean{name: name, description: description} +} + +func (k *Boolean) Name() string { return k.name } +func (k *Boolean) Description() string { return k.description } + +func (k *Boolean) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendBool(buf, k.From(l))) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Boolean) Of(v bool) label.Label { + if v { + return label.Of64(k, 1) + } + return label.Of64(k, 0) +} + +// Get can be used to get a label for the key from a label.Map. +func (k *Boolean) Get(lm label.Map) bool { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return false +} + +// From can be used to get a value from a Label. +func (k *Boolean) From(t label.Label) bool { return t.Unpack64() > 0 } + +// Error represents a key +type Error struct { + name string + description string +} + +// NewError creates a new Key for int64 values. +func NewError(name, description string) *Error { + return &Error{name: name, description: description} +} + +func (k *Error) Name() string { return k.name } +func (k *Error) Description() string { return k.description } + +func (k *Error) Format(w io.Writer, buf []byte, l label.Label) { + io.WriteString(w, k.From(l).Error()) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Error) Of(v error) label.Label { return label.OfValue(k, v) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Error) Get(lm label.Map) error { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return nil +} + +// From can be used to get a value from a Label. +func (k *Error) From(t label.Label) error { + err, _ := t.UnpackValue().(error) + return err +} diff --git a/vendor/golang.org/x/tools/internal/event/keys/standard.go b/vendor/golang.org/x/tools/internal/event/keys/standard.go new file mode 100644 index 00000000..7e958665 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/keys/standard.go @@ -0,0 +1,22 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package keys + +var ( + // Msg is a key used to add message strings to label lists. + Msg = NewString("message", "a readable message") + // Label is a key used to indicate an event adds labels to the context. + Label = NewTag("label", "a label context marker") + // Start is used for things like traces that have a name. + Start = NewString("start", "span start") + // Metric is a key used to indicate an event records metrics. + End = NewTag("end", "a span end marker") + // Metric is a key used to indicate an event records metrics. + Detach = NewTag("detach", "a span detach marker") + // Err is a key used to add error values to label lists. + Err = NewError("error", "an error that occurred") + // Metric is a key used to indicate an event records metrics. + Metric = NewTag("metric", "a metric event marker") +) diff --git a/vendor/golang.org/x/tools/internal/event/keys/util.go b/vendor/golang.org/x/tools/internal/event/keys/util.go new file mode 100644 index 00000000..c0e8e731 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/keys/util.go @@ -0,0 +1,21 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package keys + +import ( + "sort" + "strings" +) + +// Join returns a canonical join of the keys in S: +// a sorted comma-separated string list. +func Join[S ~[]T, T ~string](s S) string { + strs := make([]string, 0, len(s)) + for _, v := range s { + strs = append(strs, string(v)) + } + sort.Strings(strs) + return strings.Join(strs, ",") +} diff --git a/vendor/golang.org/x/tools/internal/event/label/label.go b/vendor/golang.org/x/tools/internal/event/label/label.go new file mode 100644 index 00000000..92a39105 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/label/label.go @@ -0,0 +1,214 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package label + +import ( + "fmt" + "io" + "reflect" + "slices" + "unsafe" +) + +// Key is used as the identity of a Label. +// Keys are intended to be compared by pointer only, the name should be unique +// for communicating with external systems, but it is not required or enforced. +type Key interface { + // Name returns the key name. + Name() string + // Description returns a string that can be used to describe the value. + Description() string + + // Format is used in formatting to append the value of the label to the + // supplied buffer. + // The formatter may use the supplied buf as a scratch area to avoid + // allocations. + Format(w io.Writer, buf []byte, l Label) +} + +// Label holds a key and value pair. +// It is normally used when passing around lists of labels. +type Label struct { + key Key + packed uint64 + untyped any +} + +// Map is the interface to a collection of Labels indexed by key. +type Map interface { + // Find returns the label that matches the supplied key. + Find(key Key) Label +} + +// List is the interface to something that provides an iterable +// list of labels. +// Iteration should start from 0 and continue until Valid returns false. +type List interface { + // Valid returns true if the index is within range for the list. + // It does not imply the label at that index will itself be valid. + Valid(index int) bool + // Label returns the label at the given index. + Label(index int) Label +} + +// list implements LabelList for a list of Labels. +type list struct { + labels []Label +} + +// filter wraps a LabelList filtering out specific labels. +type filter struct { + keys []Key + underlying List +} + +// listMap implements LabelMap for a simple list of labels. +type listMap struct { + labels []Label +} + +// mapChain implements LabelMap for a list of underlying LabelMap. +type mapChain struct { + maps []Map +} + +// OfValue creates a new label from the key and value. +// This method is for implementing new key types, label creation should +// normally be done with the Of method of the key. +func OfValue(k Key, value any) Label { return Label{key: k, untyped: value} } + +// UnpackValue assumes the label was built using LabelOfValue and returns the value +// that was passed to that constructor. +// This method is for implementing new key types, for type safety normal +// access should be done with the From method of the key. +func (t Label) UnpackValue() any { return t.untyped } + +// Of64 creates a new label from a key and a uint64. This is often +// used for non uint64 values that can be packed into a uint64. +// This method is for implementing new key types, label creation should +// normally be done with the Of method of the key. +func Of64(k Key, v uint64) Label { return Label{key: k, packed: v} } + +// Unpack64 assumes the label was built using LabelOf64 and returns the value that +// was passed to that constructor. +// This method is for implementing new key types, for type safety normal +// access should be done with the From method of the key. +func (t Label) Unpack64() uint64 { return t.packed } + +type stringptr unsafe.Pointer + +// OfString creates a new label from a key and a string. +// This method is for implementing new key types, label creation should +// normally be done with the Of method of the key. +func OfString(k Key, v string) Label { + hdr := (*reflect.StringHeader)(unsafe.Pointer(&v)) + return Label{ + key: k, + packed: uint64(hdr.Len), + untyped: stringptr(hdr.Data), + } +} + +// UnpackString assumes the label was built using LabelOfString and returns the +// value that was passed to that constructor. +// This method is for implementing new key types, for type safety normal +// access should be done with the From method of the key. +func (t Label) UnpackString() string { + var v string + hdr := (*reflect.StringHeader)(unsafe.Pointer(&v)) + hdr.Data = uintptr(t.untyped.(stringptr)) + hdr.Len = int(t.packed) + return v +} + +// Valid returns true if the Label is a valid one (it has a key). +func (t Label) Valid() bool { return t.key != nil } + +// Key returns the key of this Label. +func (t Label) Key() Key { return t.key } + +// Format is used for debug printing of labels. +func (t Label) Format(f fmt.State, r rune) { + if !t.Valid() { + io.WriteString(f, `nil`) + return + } + io.WriteString(f, t.Key().Name()) + io.WriteString(f, "=") + var buf [128]byte + t.Key().Format(f, buf[:0], t) +} + +func (l *list) Valid(index int) bool { + return index >= 0 && index < len(l.labels) +} + +func (l *list) Label(index int) Label { + return l.labels[index] +} + +func (f *filter) Valid(index int) bool { + return f.underlying.Valid(index) +} + +func (f *filter) Label(index int) Label { + l := f.underlying.Label(index) + if slices.Contains(f.keys, l.Key()) { + return Label{} + } + return l +} + +func (lm listMap) Find(key Key) Label { + for _, l := range lm.labels { + if l.Key() == key { + return l + } + } + return Label{} +} + +func (c mapChain) Find(key Key) Label { + for _, src := range c.maps { + l := src.Find(key) + if l.Valid() { + return l + } + } + return Label{} +} + +var emptyList = &list{} + +func NewList(labels ...Label) List { + if len(labels) == 0 { + return emptyList + } + return &list{labels: labels} +} + +func Filter(l List, keys ...Key) List { + if len(keys) == 0 { + return l + } + return &filter{keys: keys, underlying: l} +} + +func NewMap(labels ...Label) Map { + return listMap{labels: labels} +} + +func MergeMaps(srcs ...Map) Map { + var nonNil []Map + for _, src := range srcs { + if src != nil { + nonNil = append(nonNil, src) + } + } + if len(nonNil) == 1 { + return nonNil[0] + } + return mapChain{maps: nonNil} +} diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go new file mode 100644 index 00000000..58721202 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go @@ -0,0 +1,567 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gocommand is a helper for calling the go command. +package gocommand + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "os" + "os/exec" + "path/filepath" + "regexp" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/event/keys" + "golang.org/x/tools/internal/event/label" +) + +// A Runner will run go command invocations and serialize +// them if it sees a concurrency error. +type Runner struct { + // once guards the runner initialization. + once sync.Once + + // inFlight tracks available workers. + inFlight chan struct{} + + // serialized guards the ability to run a go command serially, + // to avoid deadlocks when claiming workers. + serialized chan struct{} +} + +const maxInFlight = 10 + +func (runner *Runner) initialize() { + runner.once.Do(func() { + runner.inFlight = make(chan struct{}, maxInFlight) + runner.serialized = make(chan struct{}, 1) + }) +} + +// 1.13: go: updates to go.mod needed, but contents have changed +// 1.14: go: updating go.mod: existing contents have changed since last read +var modConcurrencyError = regexp.MustCompile(`go:.*go.mod.*contents have changed`) + +// event keys for go command invocations +var ( + verb = keys.NewString("verb", "go command verb") + directory = keys.NewString("directory", "") +) + +func invLabels(inv Invocation) []label.Label { + return []label.Label{verb.Of(inv.Verb), directory.Of(inv.WorkingDir)} +} + +// Run is a convenience wrapper around RunRaw. +// It returns only stdout and a "friendly" error. +func (runner *Runner) Run(ctx context.Context, inv Invocation) (*bytes.Buffer, error) { + ctx, done := event.Start(ctx, "gocommand.Runner.Run", invLabels(inv)...) + defer done() + + stdout, _, friendly, _ := runner.RunRaw(ctx, inv) + return stdout, friendly +} + +// RunPiped runs the invocation serially, always waiting for any concurrent +// invocations to complete first. +func (runner *Runner) RunPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) error { + ctx, done := event.Start(ctx, "gocommand.Runner.RunPiped", invLabels(inv)...) + defer done() + + _, err := runner.runPiped(ctx, inv, stdout, stderr) + return err +} + +// RunRaw runs the invocation, serializing requests only if they fight over +// go.mod changes. +// Postcondition: both error results have same nilness. +func (runner *Runner) RunRaw(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) { + ctx, done := event.Start(ctx, "gocommand.Runner.RunRaw", invLabels(inv)...) + defer done() + // Make sure the runner is always initialized. + runner.initialize() + + // First, try to run the go command concurrently. + stdout, stderr, friendlyErr, err := runner.runConcurrent(ctx, inv) + + // If we encounter a load concurrency error, we need to retry serially. + if friendlyErr != nil && modConcurrencyError.MatchString(friendlyErr.Error()) { + event.Error(ctx, "Load concurrency error, will retry serially", err) + + // Run serially by calling runPiped. + stdout.Reset() + stderr.Reset() + friendlyErr, err = runner.runPiped(ctx, inv, stdout, stderr) + } + + return stdout, stderr, friendlyErr, err +} + +// Postcondition: both error results have same nilness. +func (runner *Runner) runConcurrent(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) { + // Wait for 1 worker to become available. + select { + case <-ctx.Done(): + return nil, nil, ctx.Err(), ctx.Err() + case runner.inFlight <- struct{}{}: + defer func() { <-runner.inFlight }() + } + + stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{} + friendlyErr, err := inv.runWithFriendlyError(ctx, stdout, stderr) + return stdout, stderr, friendlyErr, err +} + +// Postcondition: both error results have same nilness. +func (runner *Runner) runPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) (error, error) { + // Make sure the runner is always initialized. + runner.initialize() + + // Acquire the serialization lock. This avoids deadlocks between two + // runPiped commands. + select { + case <-ctx.Done(): + return ctx.Err(), ctx.Err() + case runner.serialized <- struct{}{}: + defer func() { <-runner.serialized }() + } + + // Wait for all in-progress go commands to return before proceeding, + // to avoid load concurrency errors. + for range maxInFlight { + select { + case <-ctx.Done(): + return ctx.Err(), ctx.Err() + case runner.inFlight <- struct{}{}: + // Make sure we always "return" any workers we took. + defer func() { <-runner.inFlight }() + } + } + + return inv.runWithFriendlyError(ctx, stdout, stderr) +} + +// An Invocation represents a call to the go command. +type Invocation struct { + Verb string + Args []string + BuildFlags []string + + // If ModFlag is set, the go command is invoked with -mod=ModFlag. + // TODO(rfindley): remove, in favor of Args. + ModFlag string + + // If ModFile is set, the go command is invoked with -modfile=ModFile. + // TODO(rfindley): remove, in favor of Args. + ModFile string + + // Overlay is the name of the JSON overlay file that describes + // unsaved editor buffers; see [WriteOverlays]. + // If set, the go command is invoked with -overlay=Overlay. + // TODO(rfindley): remove, in favor of Args. + Overlay string + + // If CleanEnv is set, the invocation will run only with the environment + // in Env, not starting with os.Environ. + CleanEnv bool + Env []string + WorkingDir string + Logf func(format string, args ...any) +} + +// Postcondition: both error results have same nilness. +func (i *Invocation) runWithFriendlyError(ctx context.Context, stdout, stderr io.Writer) (friendlyError error, rawError error) { + rawError = i.run(ctx, stdout, stderr) + if rawError != nil { + friendlyError = rawError + // Check for 'go' executable not being found. + if ee, ok := rawError.(*exec.Error); ok && ee.Err == exec.ErrNotFound { + friendlyError = fmt.Errorf("go command required, not found: %v", ee) + } + if ctx.Err() != nil { + friendlyError = ctx.Err() + } + friendlyError = fmt.Errorf("err: %v: stderr: %s", friendlyError, stderr) + } + return +} + +// logf logs if i.Logf is non-nil. +func (i *Invocation) logf(format string, args ...any) { + if i.Logf != nil { + i.Logf(format, args...) + } +} + +func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { + goArgs := []string{i.Verb} + + appendModFile := func() { + if i.ModFile != "" { + goArgs = append(goArgs, "-modfile="+i.ModFile) + } + } + appendModFlag := func() { + if i.ModFlag != "" { + goArgs = append(goArgs, "-mod="+i.ModFlag) + } + } + appendOverlayFlag := func() { + if i.Overlay != "" { + goArgs = append(goArgs, "-overlay="+i.Overlay) + } + } + + switch i.Verb { + case "env", "version": + goArgs = append(goArgs, i.Args...) + case "mod": + // mod needs the sub-verb before flags. + goArgs = append(goArgs, i.Args[0]) + appendModFile() + goArgs = append(goArgs, i.Args[1:]...) + case "get": + goArgs = append(goArgs, i.BuildFlags...) + appendModFile() + goArgs = append(goArgs, i.Args...) + + default: // notably list and build. + goArgs = append(goArgs, i.BuildFlags...) + appendModFile() + appendModFlag() + appendOverlayFlag() + goArgs = append(goArgs, i.Args...) + } + cmd := exec.Command("go", goArgs...) + cmd.Stdout = stdout + cmd.Stderr = stderr + + // https://go.dev/issue/59541: don't wait forever copying stderr + // after the command has exited. + // After CL 484741 we copy stdout manually, so we we'll stop reading that as + // soon as ctx is done. However, we also don't want to wait around forever + // for stderr. Give a much-longer-than-reasonable delay and then assume that + // something has wedged in the kernel or runtime. + cmd.WaitDelay = 30 * time.Second + + // The cwd gets resolved to the real path. On Darwin, where + // /tmp is a symlink, this breaks anything that expects the + // working directory to keep the original path, including the + // go command when dealing with modules. + // + // os.Getwd has a special feature where if the cwd and the PWD + // are the same node then it trusts the PWD, so by setting it + // in the env for the child process we fix up all the paths + // returned by the go command. + if !i.CleanEnv { + cmd.Env = os.Environ() + } + cmd.Env = append(cmd.Env, i.Env...) + if i.WorkingDir != "" { + cmd.Env = append(cmd.Env, "PWD="+i.WorkingDir) + cmd.Dir = i.WorkingDir + } + + debugStr := cmdDebugStr(cmd) + i.logf("starting %v", debugStr) + start := time.Now() + defer func() { + i.logf("%s for %v", time.Since(start), debugStr) + }() + + return runCmdContext(ctx, cmd) +} + +// DebugHangingGoCommands may be set by tests to enable additional +// instrumentation (including panics) for debugging hanging Go commands. +// +// See golang/go#54461 for details. +var DebugHangingGoCommands = false + +// runCmdContext is like exec.CommandContext except it sends os.Interrupt +// before os.Kill. +func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) { + // If cmd.Stdout is not an *os.File, the exec package will create a pipe and + // copy it to the Writer in a goroutine until the process has finished and + // either the pipe reaches EOF or command's WaitDelay expires. + // + // However, the output from 'go list' can be quite large, and we don't want to + // keep reading (and allocating buffers) if we've already decided we don't + // care about the output. We don't want to wait for the process to finish, and + // we don't wait to wait for the WaitDelay to expire either. + // + // Instead, if cmd.Stdout requires a copying goroutine we explicitly replace + // it with a pipe (which is an *os.File), which we can close in order to stop + // copying output as soon as we realize we don't care about it. + var stdoutW *os.File + if cmd.Stdout != nil { + if _, ok := cmd.Stdout.(*os.File); !ok { + var stdoutR *os.File + stdoutR, stdoutW, err = os.Pipe() + if err != nil { + return err + } + prevStdout := cmd.Stdout + cmd.Stdout = stdoutW + + stdoutErr := make(chan error, 1) + go func() { + _, err := io.Copy(prevStdout, stdoutR) + if err != nil { + err = fmt.Errorf("copying stdout: %w", err) + } + stdoutErr <- err + }() + defer func() { + // We started a goroutine to copy a stdout pipe. + // Wait for it to finish, or terminate it if need be. + var err2 error + select { + case err2 = <-stdoutErr: + stdoutR.Close() + case <-ctx.Done(): + stdoutR.Close() + // Per https://pkg.go.dev/os#File.Close, the call to stdoutR.Close + // should cause the Read call in io.Copy to unblock and return + // immediately, but we still need to receive from stdoutErr to confirm + // that it has happened. + <-stdoutErr + err2 = ctx.Err() + } + if err == nil { + err = err2 + } + }() + + // Per https://pkg.go.dev/os/exec#Cmd, “If Stdout and Stderr are the + // same writer, and have a type that can be compared with ==, at most + // one goroutine at a time will call Write.” + // + // Since we're starting a goroutine that writes to cmd.Stdout, we must + // also update cmd.Stderr so that it still holds. + func() { + defer func() { recover() }() + if cmd.Stderr == prevStdout { + cmd.Stderr = cmd.Stdout + } + }() + } + } + + startTime := time.Now() + err = cmd.Start() + if stdoutW != nil { + // The child process has inherited the pipe file, + // so close the copy held in this process. + stdoutW.Close() + stdoutW = nil + } + if err != nil { + return err + } + + resChan := make(chan error, 1) + go func() { + resChan <- cmd.Wait() + }() + + // If we're interested in debugging hanging Go commands, stop waiting after a + // minute and panic with interesting information. + debug := DebugHangingGoCommands + if debug { + timer := time.NewTimer(1 * time.Minute) + defer timer.Stop() + select { + case err := <-resChan: + return err + case <-timer.C: + // HandleHangingGoCommand terminates this process. + // Pass off resChan in case we can collect the command error. + handleHangingGoCommand(startTime, cmd, resChan) + case <-ctx.Done(): + } + } else { + select { + case err := <-resChan: + return err + case <-ctx.Done(): + } + } + + // Cancelled. Interrupt and see if it ends voluntarily. + if err := cmd.Process.Signal(os.Interrupt); err == nil { + // (We used to wait only 1s but this proved + // fragile on loaded builder machines.) + timer := time.NewTimer(5 * time.Second) + defer timer.Stop() + select { + case err := <-resChan: + return err + case <-timer.C: + } + } + + // Didn't shut down in response to interrupt. Kill it hard. + if err := cmd.Process.Kill(); err != nil && !errors.Is(err, os.ErrProcessDone) && debug { + log.Printf("error killing the Go command: %v", err) + } + + return <-resChan +} + +// handleHangingGoCommand outputs debugging information to help diagnose the +// cause of a hanging Go command, and then exits with log.Fatalf. +func handleHangingGoCommand(start time.Time, cmd *exec.Cmd, resChan chan error) { + switch runtime.GOOS { + case "linux", "darwin", "freebsd", "netbsd", "openbsd": + fmt.Fprintln(os.Stderr, `DETECTED A HANGING GO COMMAND + + The gopls test runner has detected a hanging go command. In order to debug + this, the output of ps and lsof/fstat is printed below. + + See golang/go#54461 for more details.`) + + fmt.Fprintln(os.Stderr, "\nps axo ppid,pid,command:") + fmt.Fprintln(os.Stderr, "-------------------------") + psCmd := exec.Command("ps", "axo", "ppid,pid,command") + psCmd.Stdout = os.Stderr + psCmd.Stderr = os.Stderr + if err := psCmd.Run(); err != nil { + log.Printf("Handling hanging Go command: running ps: %v", err) + } + + listFiles := "lsof" + if runtime.GOOS == "freebsd" || runtime.GOOS == "netbsd" { + listFiles = "fstat" + } + + fmt.Fprintln(os.Stderr, "\n"+listFiles+":") + fmt.Fprintln(os.Stderr, "-----") + listFilesCmd := exec.Command(listFiles) + listFilesCmd.Stdout = os.Stderr + listFilesCmd.Stderr = os.Stderr + if err := listFilesCmd.Run(); err != nil { + log.Printf("Handling hanging Go command: running %s: %v", listFiles, err) + } + // Try to extract information about the slow go process by issuing a SIGQUIT. + if err := cmd.Process.Signal(sigStuckProcess); err == nil { + select { + case err := <-resChan: + stderr := "not a bytes.Buffer" + if buf, _ := cmd.Stderr.(*bytes.Buffer); buf != nil { + stderr = buf.String() + } + log.Printf("Quit hanging go command:\n\terr:%v\n\tstderr:\n%v\n\n", err, stderr) + case <-time.After(5 * time.Second): + } + } else { + log.Printf("Sending signal %d to hanging go command: %v", sigStuckProcess, err) + } + } + log.Fatalf("detected hanging go command (golang/go#54461); waited %s\n\tcommand:%s\n\tpid:%d", time.Since(start), cmd, cmd.Process.Pid) +} + +func cmdDebugStr(cmd *exec.Cmd) string { + env := make(map[string]string) + for _, kv := range cmd.Env { + split := strings.SplitN(kv, "=", 2) + if len(split) == 2 { + k, v := split[0], split[1] + env[k] = v + } + } + + var args []string + for _, arg := range cmd.Args { + quoted := strconv.Quote(arg) + if quoted[1:len(quoted)-1] != arg || strings.Contains(arg, " ") { + args = append(args, quoted) + } else { + args = append(args, arg) + } + } + return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], strings.Join(args, " ")) +} + +// WriteOverlays writes each value in the overlay (see the Overlay +// field of go/packages.Config) to a temporary file and returns the name +// of a JSON file describing the mapping that is suitable for the "go +// list -overlay" flag. +// +// On success, the caller must call the cleanup function exactly once +// when the files are no longer needed. +func WriteOverlays(overlay map[string][]byte) (filename string, cleanup func(), err error) { + // Do nothing if there are no overlays in the config. + if len(overlay) == 0 { + return "", func() {}, nil + } + + dir, err := os.MkdirTemp("", "gocommand-*") + if err != nil { + return "", nil, err + } + + // The caller must clean up this directory, + // unless this function returns an error. + // (The cleanup operand of each return + // statement below is ignored.) + defer func() { + cleanup = func() { + os.RemoveAll(dir) + } + if err != nil { + cleanup() + cleanup = nil + } + }() + + // Write each map entry to a temporary file. + overlays := make(map[string]string) + for k, v := range overlay { + // Use a unique basename for each file (001-foo.go), + // to avoid creating nested directories. + base := fmt.Sprintf("%d-%s", 1+len(overlays), filepath.Base(k)) + filename := filepath.Join(dir, base) + err := os.WriteFile(filename, v, 0666) + if err != nil { + return "", nil, err + } + overlays[k] = filename + } + + // Write the JSON overlay file that maps logical file names to temp files. + // + // OverlayJSON is the format overlay files are expected to be in. + // The Replace map maps from overlaid paths to replacement paths: + // the Go command will forward all reads trying to open + // each overlaid path to its replacement path, or consider the overlaid + // path not to exist if the replacement path is empty. + // + // From golang/go#39958. + type OverlayJSON struct { + Replace map[string]string `json:"replace,omitempty"` + } + b, err := json.Marshal(OverlayJSON{Replace: overlays}) + if err != nil { + return "", nil, err + } + filename = filepath.Join(dir, "overlay.json") + if err := os.WriteFile(filename, b, 0666); err != nil { + return "", nil, err + } + + return filename, nil, nil +} diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke_notunix.go b/vendor/golang.org/x/tools/internal/gocommand/invoke_notunix.go new file mode 100644 index 00000000..469c648e --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke_notunix.go @@ -0,0 +1,13 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !unix + +package gocommand + +import "os" + +// sigStuckProcess is the signal to send to kill a hanging subprocess. +// On Unix we send SIGQUIT, but on non-Unix we only have os.Kill. +var sigStuckProcess = os.Kill diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke_unix.go b/vendor/golang.org/x/tools/internal/gocommand/invoke_unix.go new file mode 100644 index 00000000..169d37c8 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke_unix.go @@ -0,0 +1,13 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix + +package gocommand + +import "syscall" + +// Sigstuckprocess is the signal to send to kill a hanging subprocess. +// Send SIGQUIT to get a stack trace. +var sigStuckProcess = syscall.SIGQUIT diff --git a/vendor/golang.org/x/tools/internal/gocommand/vendor.go b/vendor/golang.org/x/tools/internal/gocommand/vendor.go new file mode 100644 index 00000000..e38d1fb4 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gocommand/vendor.go @@ -0,0 +1,163 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gocommand + +import ( + "bytes" + "context" + "fmt" + "os" + "path/filepath" + "regexp" + "strings" + "time" + + "golang.org/x/mod/semver" +) + +// ModuleJSON holds information about a module. +type ModuleJSON struct { + Path string // module path + Version string // module version + Versions []string // available module versions (with -versions) + Replace *ModuleJSON // replaced by this module + Time *time.Time // time version was created + Update *ModuleJSON // available update, if any (with -u) + Main bool // is this the main module? + Indirect bool // is this module only an indirect dependency of main module? + Dir string // directory holding files for this module, if any + GoMod string // path to go.mod file used when loading this module, if any + GoVersion string // go version used in module +} + +var modFlagRegexp = regexp.MustCompile(`-mod[ =](\w+)`) + +// VendorEnabled reports whether vendoring is enabled. It takes a *Runner to execute Go commands +// with the supplied context.Context and Invocation. The Invocation can contain pre-defined fields, +// of which only Verb and Args are modified to run the appropriate Go command. +// Inspired by setDefaultBuildMod in modload/init.go +func VendorEnabled(ctx context.Context, inv Invocation, r *Runner) (bool, *ModuleJSON, error) { + mainMod, go114, err := getMainModuleAnd114(ctx, inv, r) + if err != nil { + return false, nil, err + } + + // We check the GOFLAGS to see if there is anything overridden or not. + inv.Verb = "env" + inv.Args = []string{"GOFLAGS"} + stdout, err := r.Run(ctx, inv) + if err != nil { + return false, nil, err + } + goflags := string(bytes.TrimSpace(stdout.Bytes())) + matches := modFlagRegexp.FindStringSubmatch(goflags) + var modFlag string + if len(matches) != 0 { + modFlag = matches[1] + } + // Don't override an explicit '-mod=' argument. + if modFlag == "vendor" { + return true, mainMod, nil + } else if modFlag != "" { + return false, nil, nil + } + if mainMod == nil || !go114 { + return false, nil, nil + } + // Check 1.14's automatic vendor mode. + if fi, err := os.Stat(filepath.Join(mainMod.Dir, "vendor")); err == nil && fi.IsDir() { + if mainMod.GoVersion != "" && semver.Compare("v"+mainMod.GoVersion, "v1.14") >= 0 { + // The Go version is at least 1.14, and a vendor directory exists. + // Set -mod=vendor by default. + return true, mainMod, nil + } + } + return false, nil, nil +} + +// getMainModuleAnd114 gets one of the main modules' information and whether the +// go command in use is 1.14+. This is the information needed to figure out +// if vendoring should be enabled. +func getMainModuleAnd114(ctx context.Context, inv Invocation, r *Runner) (*ModuleJSON, bool, error) { + const format = `{{.Path}} +{{.Dir}} +{{.GoMod}} +{{.GoVersion}} +{{range context.ReleaseTags}}{{if eq . "go1.14"}}{{.}}{{end}}{{end}} +` + inv.Verb = "list" + inv.Args = []string{"-m", "-f", format} + stdout, err := r.Run(ctx, inv) + if err != nil { + return nil, false, err + } + + lines := strings.Split(stdout.String(), "\n") + if len(lines) < 5 { + return nil, false, fmt.Errorf("unexpected stdout: %q", stdout.String()) + } + mod := &ModuleJSON{ + Path: lines[0], + Dir: lines[1], + GoMod: lines[2], + GoVersion: lines[3], + Main: true, + } + return mod, lines[4] == "go1.14", nil +} + +// WorkspaceVendorEnabled reports whether workspace vendoring is enabled. It takes a *Runner to execute Go commands +// with the supplied context.Context and Invocation. The Invocation can contain pre-defined fields, +// of which only Verb and Args are modified to run the appropriate Go command. +// Inspired by setDefaultBuildMod in modload/init.go +func WorkspaceVendorEnabled(ctx context.Context, inv Invocation, r *Runner) (bool, []*ModuleJSON, error) { + inv.Verb = "env" + inv.Args = []string{"GOWORK"} + stdout, err := r.Run(ctx, inv) + if err != nil { + return false, nil, err + } + goWork := string(bytes.TrimSpace(stdout.Bytes())) + if fi, err := os.Stat(filepath.Join(filepath.Dir(goWork), "vendor")); err == nil && fi.IsDir() { + mainMods, err := getWorkspaceMainModules(ctx, inv, r) + if err != nil { + return false, nil, err + } + return true, mainMods, nil + } + return false, nil, nil +} + +// getWorkspaceMainModules gets the main modules' information. +// This is the information needed to figure out if vendoring should be enabled. +func getWorkspaceMainModules(ctx context.Context, inv Invocation, r *Runner) ([]*ModuleJSON, error) { + const format = `{{.Path}} +{{.Dir}} +{{.GoMod}} +{{.GoVersion}} +` + inv.Verb = "list" + inv.Args = []string{"-m", "-f", format} + stdout, err := r.Run(ctx, inv) + if err != nil { + return nil, err + } + + lines := strings.Split(strings.TrimSuffix(stdout.String(), "\n"), "\n") + if len(lines) < 4 { + return nil, fmt.Errorf("unexpected stdout: %q", stdout.String()) + } + mods := make([]*ModuleJSON, 0, len(lines)/4) + for i := 0; i < len(lines); i += 4 { + mods = append(mods, &ModuleJSON{ + Path: lines[i], + Dir: lines[i+1], + GoMod: lines[i+2], + GoVersion: lines[i+3], + Main: true, + }) + } + return mods, nil +} diff --git a/vendor/golang.org/x/tools/internal/gocommand/version.go b/vendor/golang.org/x/tools/internal/gocommand/version.go new file mode 100644 index 00000000..446c5846 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gocommand/version.go @@ -0,0 +1,71 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gocommand + +import ( + "context" + "fmt" + "regexp" + "strings" +) + +// GoVersion reports the minor version number of the highest release +// tag built into the go command on the PATH. +// +// Note that this may be higher than the version of the go tool used +// to build this application, and thus the versions of the standard +// go/{scanner,parser,ast,types} packages that are linked into it. +// In that case, callers should either downgrade to the version of +// go used to build the application, or report an error that the +// application is too old to use the go command on the PATH. +func GoVersion(ctx context.Context, inv Invocation, r *Runner) (int, error) { + inv.Verb = "list" + inv.Args = []string{"-e", "-f", `{{context.ReleaseTags}}`, `--`, `unsafe`} + inv.BuildFlags = nil // This is not a build command. + inv.ModFlag = "" + inv.ModFile = "" + inv.Env = append(inv.Env[:len(inv.Env):len(inv.Env)], "GO111MODULE=off") + + stdoutBytes, err := r.Run(ctx, inv) + if err != nil { + return 0, err + } + stdout := stdoutBytes.String() + if len(stdout) < 3 { + return 0, fmt.Errorf("bad ReleaseTags output: %q", stdout) + } + // Split up "[go1.1 go1.15]" and return highest go1.X value. + tags := strings.Fields(stdout[1 : len(stdout)-2]) + for i := len(tags) - 1; i >= 0; i-- { + var version int + if _, err := fmt.Sscanf(tags[i], "go1.%d", &version); err != nil { + continue + } + return version, nil + } + return 0, fmt.Errorf("no parseable ReleaseTags in %v", tags) +} + +// GoVersionOutput returns the complete output of the go version command. +func GoVersionOutput(ctx context.Context, inv Invocation, r *Runner) (string, error) { + inv.Verb = "version" + goVersion, err := r.Run(ctx, inv) + if err != nil { + return "", err + } + return goVersion.String(), nil +} + +// ParseGoVersionOutput extracts the Go version string +// from the output of the "go version" command. +// Given an unrecognized form, it returns an empty string. +func ParseGoVersionOutput(data string) string { + re := regexp.MustCompile(`^go version (go\S+|devel \S+)`) + m := re.FindStringSubmatch(data) + if len(m) != 2 { + return "" // unrecognized version + } + return m[1] +} diff --git a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go new file mode 100644 index 00000000..5252144d --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go @@ -0,0 +1,336 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gopathwalk is like filepath.Walk but specialized for finding Go +// packages, particularly in $GOPATH and $GOROOT. +package gopathwalk + +import ( + "bufio" + "bytes" + "io" + "io/fs" + "os" + "path/filepath" + "runtime" + "slices" + "strings" + "sync" + "time" +) + +// Options controls the behavior of a Walk call. +type Options struct { + // If Logf is non-nil, debug logging is enabled through this function. + Logf func(format string, args ...any) + + // Search module caches. Also disables legacy goimports ignore rules. + ModulesEnabled bool + + // Maximum number of concurrent calls to user-provided callbacks, + // or 0 for GOMAXPROCS. + Concurrency int +} + +// RootType indicates the type of a Root. +type RootType int + +const ( + RootUnknown RootType = iota + RootGOROOT + RootGOPATH + RootCurrentModule + RootModuleCache + RootOther +) + +// A Root is a starting point for a Walk. +type Root struct { + Path string + Type RootType +} + +// Walk concurrently walks Go source directories ($GOROOT, $GOPATH, etc) to find packages. +// +// For each package found, add will be called with the absolute +// paths of the containing source directory and the package directory. +// +// Unlike filepath.WalkDir, Walk follows symbolic links +// (while guarding against cycles). +func Walk(roots []Root, add func(root Root, dir string), opts Options) { + WalkSkip(roots, add, func(Root, string) bool { return false }, opts) +} + +// WalkSkip concurrently walks Go source directories ($GOROOT, $GOPATH, etc) to +// find packages. +// +// For each package found, add will be called with the absolute +// paths of the containing source directory and the package directory. +// For each directory that will be scanned, skip will be called +// with the absolute paths of the containing source directory and the directory. +// If skip returns false on a directory it will be processed. +// +// Unlike filepath.WalkDir, WalkSkip follows symbolic links +// (while guarding against cycles). +func WalkSkip(roots []Root, add func(root Root, dir string), skip func(root Root, dir string) bool, opts Options) { + for _, root := range roots { + walkDir(root, add, skip, opts) + } +} + +// walkDir creates a walker and starts fastwalk with this walker. +func walkDir(root Root, add func(Root, string), skip func(root Root, dir string) bool, opts Options) { + if opts.Logf == nil { + opts.Logf = func(format string, args ...any) {} + } + if _, err := os.Stat(root.Path); os.IsNotExist(err) { + opts.Logf("skipping nonexistent directory: %v", root.Path) + return + } + start := time.Now() + opts.Logf("scanning %s", root.Path) + + concurrency := opts.Concurrency + if concurrency == 0 { + // The walk be either CPU-bound or I/O-bound, depending on what the + // caller-supplied add function does and the details of the user's platform + // and machine. Rather than trying to fine-tune the concurrency level for a + // specific environment, we default to GOMAXPROCS: it is likely to be a good + // choice for a CPU-bound add function, and if it is instead I/O-bound, then + // dealing with I/O saturation is arguably the job of the kernel and/or + // runtime. (Oversaturating I/O seems unlikely to harm performance as badly + // as failing to saturate would.) + concurrency = runtime.GOMAXPROCS(0) + } + w := &walker{ + root: root, + add: add, + skip: skip, + opts: opts, + sem: make(chan struct{}, concurrency), + } + w.init() + + w.sem <- struct{}{} + path := root.Path + if path == "" { + path = "." + } + if fi, err := os.Lstat(path); err == nil { + w.walk(path, nil, fs.FileInfoToDirEntry(fi)) + } else { + w.opts.Logf("scanning directory %v: %v", root.Path, err) + } + <-w.sem + w.walking.Wait() + + opts.Logf("scanned %s in %v", root.Path, time.Since(start)) +} + +// walker is the callback for fastwalk.Walk. +type walker struct { + root Root // The source directory to scan. + add func(Root, string) // The callback that will be invoked for every possible Go package dir. + skip func(Root, string) bool // The callback that will be invoked for every dir. dir is skipped if it returns true. + opts Options // Options passed to Walk by the user. + + walking sync.WaitGroup + sem chan struct{} // Channel of semaphore tokens; send to acquire, receive to release. + ignoredDirs []string + + added sync.Map // map[string]bool +} + +// A symlinkList is a linked list of os.FileInfos for parent directories +// reached via symlinks. +type symlinkList struct { + info os.FileInfo + prev *symlinkList +} + +// init initializes the walker based on its Options +func (w *walker) init() { + var ignoredPaths []string + if w.root.Type == RootModuleCache { + ignoredPaths = []string{"cache"} + } + if !w.opts.ModulesEnabled && w.root.Type == RootGOPATH { + ignoredPaths = w.getIgnoredDirs(w.root.Path) + ignoredPaths = append(ignoredPaths, "v", "mod") + } + + for _, p := range ignoredPaths { + full := filepath.Join(w.root.Path, p) + w.ignoredDirs = append(w.ignoredDirs, full) + w.opts.Logf("Directory added to ignore list: %s", full) + } +} + +// getIgnoredDirs reads an optional config file at /.goimportsignore +// of relative directories to ignore when scanning for go files. +// The provided path is one of the $GOPATH entries with "src" appended. +func (w *walker) getIgnoredDirs(path string) []string { + file := filepath.Join(path, ".goimportsignore") + slurp, err := os.ReadFile(file) + if err != nil { + w.opts.Logf("%v", err) + } else { + w.opts.Logf("Read %s", file) + } + if err != nil { + return nil + } + + var ignoredDirs []string + bs := bufio.NewScanner(bytes.NewReader(slurp)) + for bs.Scan() { + line := strings.TrimSpace(bs.Text()) + if line == "" || strings.HasPrefix(line, "#") { + continue + } + ignoredDirs = append(ignoredDirs, line) + } + return ignoredDirs +} + +// shouldSkipDir reports whether the file should be skipped or not. +func (w *walker) shouldSkipDir(dir string) bool { + if slices.Contains(w.ignoredDirs, dir) { + return true + } + if w.skip != nil { + // Check with the user specified callback. + return w.skip(w.root, dir) + } + return false +} + +// walk walks through the given path. +// +// Errors are logged if w.opts.Logf is non-nil, but otherwise ignored. +func (w *walker) walk(path string, pathSymlinks *symlinkList, d fs.DirEntry) { + if d.Type()&os.ModeSymlink != 0 { + // Walk the symlink's target rather than the symlink itself. + // + // (Note that os.Stat, unlike the lower-lever os.Readlink, + // follows arbitrarily many layers of symlinks, so it will eventually + // reach either a non-symlink or a nonexistent target.) + // + // TODO(bcmills): 'go list all' itself ignores symlinks within GOROOT/src + // and GOPATH/src. Do we really need to traverse them here? If so, why? + + fi, err := os.Stat(path) + if err != nil { + w.opts.Logf("%v", err) + return + } + + // Avoid walking symlink cycles: if we have already followed a symlink to + // this directory as a parent of itself, don't follow it again. + // + // This doesn't catch the first time through a cycle, but it also minimizes + // the number of extra stat calls we make if we *don't* encounter a cycle. + // Since we don't actually expect to encounter symlink cycles in practice, + // this seems like the right tradeoff. + for parent := pathSymlinks; parent != nil; parent = parent.prev { + if os.SameFile(fi, parent.info) { + return + } + } + + pathSymlinks = &symlinkList{ + info: fi, + prev: pathSymlinks, + } + d = fs.FileInfoToDirEntry(fi) + } + + if d.Type().IsRegular() { + if !strings.HasSuffix(path, ".go") { + return + } + + dir := filepath.Dir(path) + if dir == w.root.Path && (w.root.Type == RootGOROOT || w.root.Type == RootGOPATH) { + // Doesn't make sense to have regular files + // directly in your $GOPATH/src or $GOROOT/src. + // + // TODO(bcmills): there are many levels of directory within + // RootModuleCache where this also wouldn't make sense, + // Can we generalize this to any directory without a corresponding + // import path? + return + } + + if _, dup := w.added.LoadOrStore(dir, true); !dup { + w.add(w.root, dir) + } + } + + if !d.IsDir() { + return + } + + base := filepath.Base(path) + if base == "" || base[0] == '.' || base[0] == '_' || + base == "testdata" || + (w.root.Type == RootGOROOT && w.opts.ModulesEnabled && base == "vendor") || + (!w.opts.ModulesEnabled && base == "node_modules") || + w.shouldSkipDir(path) { + return + } + + // Read the directory and walk its entries. + + f, err := os.Open(path) + if err != nil { + w.opts.Logf("%v", err) + return + } + defer f.Close() + + for { + // We impose an arbitrary limit on the number of ReadDir results per + // directory to limit the amount of memory consumed for stale or upcoming + // directory entries. The limit trades off CPU (number of syscalls to read + // the whole directory) against RAM (reachable directory entries other than + // the one currently being processed). + // + // Since we process the directories recursively, we will end up maintaining + // a slice of entries for each level of the directory tree. + // (Compare https://go.dev/issue/36197.) + ents, err := f.ReadDir(1024) + if err != nil { + if err != io.EOF { + w.opts.Logf("%v", err) + } + break + } + + for _, d := range ents { + nextPath := filepath.Join(path, d.Name()) + if d.IsDir() { + select { + case w.sem <- struct{}{}: + // Got a new semaphore token, so we can traverse the directory concurrently. + d := d + w.walking.Add(1) + go func() { + defer func() { + <-w.sem + w.walking.Done() + }() + w.walk(nextPath, pathSymlinks, d) + }() + continue + + default: + // No tokens available, so traverse serially. + } + } + + w.walk(nextPath, pathSymlinks, d) + } + } +} diff --git a/vendor/golang.org/x/tools/internal/imports/fix.go b/vendor/golang.org/x/tools/internal/imports/fix.go new file mode 100644 index 00000000..1b4dc0cb --- /dev/null +++ b/vendor/golang.org/x/tools/internal/imports/fix.go @@ -0,0 +1,1896 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package imports + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "go/ast" + "go/build" + "go/parser" + "go/token" + "go/types" + "io/fs" + "io/ioutil" + "maps" + "os" + "path" + "path/filepath" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf8" + + "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/gocommand" + "golang.org/x/tools/internal/gopathwalk" + "golang.org/x/tools/internal/stdlib" +) + +// importToGroup is a list of functions which map from an import path to +// a group number. +var importToGroup = []func(localPrefix, importPath string) (num int, ok bool){ + func(localPrefix, importPath string) (num int, ok bool) { + if localPrefix == "" { + return + } + for p := range strings.SplitSeq(localPrefix, ",") { + if strings.HasPrefix(importPath, p) || strings.TrimSuffix(p, "/") == importPath { + return 3, true + } + } + return + }, + func(_, importPath string) (num int, ok bool) { + if strings.HasPrefix(importPath, "appengine") { + return 2, true + } + return + }, + func(_, importPath string) (num int, ok bool) { + firstComponent := strings.Split(importPath, "/")[0] + if strings.Contains(firstComponent, ".") { + return 1, true + } + return + }, +} + +func importGroup(localPrefix, importPath string) int { + for _, fn := range importToGroup { + if n, ok := fn(localPrefix, importPath); ok { + return n + } + } + return 0 +} + +type ImportFixType int + +const ( + AddImport ImportFixType = iota + DeleteImport + SetImportName +) + +type ImportFix struct { + // StmtInfo represents the import statement this fix will add, remove, or change. + StmtInfo ImportInfo + // IdentName is the identifier that this fix will add or remove. + IdentName string + // FixType is the type of fix this is (AddImport, DeleteImport, SetImportName). + FixType ImportFixType + Relevance float64 // see pkg +} + +// parseOtherFiles parses all the Go files in srcDir except filename, including +// test files if filename looks like a test. +// +// It returns an error only if ctx is cancelled. Files with parse errors are +// ignored. +func parseOtherFiles(ctx context.Context, fset *token.FileSet, srcDir, filename string) ([]*ast.File, error) { + // This could use go/packages but it doesn't buy much, and it fails + // with https://golang.org/issue/26296 in LoadFiles mode in some cases. + considerTests := strings.HasSuffix(filename, "_test.go") + + fileBase := filepath.Base(filename) + packageFileInfos, err := os.ReadDir(srcDir) + if err != nil { + return nil, ctx.Err() + } + + var files []*ast.File + for _, fi := range packageFileInfos { + if ctx.Err() != nil { + return nil, ctx.Err() + } + if fi.Name() == fileBase || !strings.HasSuffix(fi.Name(), ".go") { + continue + } + if !considerTests && strings.HasSuffix(fi.Name(), "_test.go") { + continue + } + + f, err := parser.ParseFile(fset, filepath.Join(srcDir, fi.Name()), nil, parser.SkipObjectResolution) + if err != nil { + continue + } + + files = append(files, f) + } + + return files, ctx.Err() +} + +// addGlobals puts the names of package vars into the provided map. +func addGlobals(f *ast.File, globals map[string]bool) { + for _, decl := range f.Decls { + genDecl, ok := decl.(*ast.GenDecl) + if !ok { + continue + } + + for _, spec := range genDecl.Specs { + valueSpec, ok := spec.(*ast.ValueSpec) + if !ok { + continue + } + globals[valueSpec.Names[0].Name] = true + } + } +} + +// collectReferences builds a map of selector expressions, from +// left hand side (X) to a set of right hand sides (Sel). +func collectReferences(f *ast.File) References { + refs := References{} + + var visitor visitFn + visitor = func(node ast.Node) ast.Visitor { + if node == nil { + return visitor + } + switch v := node.(type) { + case *ast.SelectorExpr: + xident, ok := v.X.(*ast.Ident) + if !ok { + break + } + if xident.Obj != nil { + // If the parser can resolve it, it's not a package ref. + break + } + if !ast.IsExported(v.Sel.Name) { + // Whatever this is, it's not exported from a package. + break + } + pkgName := xident.Name + r := refs[pkgName] + if r == nil { + r = make(map[string]bool) + refs[pkgName] = r + } + r[v.Sel.Name] = true + } + return visitor + } + ast.Walk(visitor, f) + return refs +} + +// collectImports returns all the imports in f. +// Unnamed imports (., _) and "C" are ignored. +func collectImports(f *ast.File) []*ImportInfo { + var imports []*ImportInfo + for _, imp := range f.Imports { + var name string + if imp.Name != nil { + name = imp.Name.Name + } + if imp.Path.Value == `"C"` || name == "_" || name == "." { + continue + } + path := strings.Trim(imp.Path.Value, `"`) + imports = append(imports, &ImportInfo{ + Name: name, + ImportPath: path, + }) + } + return imports +} + +// findMissingImport searches pass's candidates for an import that provides +// pkg, containing all of syms. +func (p *pass) findMissingImport(pkg string, syms map[string]bool) *ImportInfo { + for _, candidate := range p.candidates { + pkgInfo, ok := p.knownPackages[candidate.ImportPath] + if !ok { + continue + } + if p.importIdentifier(candidate) != pkg { + continue + } + + allFound := true + for right := range syms { + if !pkgInfo.Exports[right] { + allFound = false + break + } + } + + if allFound { + return candidate + } + } + return nil +} + +// A pass contains all the inputs and state necessary to fix a file's imports. +// It can be modified in some ways during use; see comments below. +type pass struct { + // Inputs. These must be set before a call to load, and not modified after. + fset *token.FileSet // fset used to parse f and its siblings. + f *ast.File // the file being fixed. + srcDir string // the directory containing f. + logf func(string, ...any) + source Source // the environment to use for go commands, etc. + loadRealPackageNames bool // if true, load package names from disk rather than guessing them. + otherFiles []*ast.File // sibling files. + goroot string + + // Intermediate state, generated by load. + existingImports map[string][]*ImportInfo + allRefs References + missingRefs References + + // Inputs to fix. These can be augmented between successive fix calls. + lastTry bool // indicates that this is the last call and fix should clean up as best it can. + candidates []*ImportInfo // candidate imports in priority order. + knownPackages map[string]*PackageInfo // information about all known packages. +} + +// loadPackageNames saves the package names for everything referenced by imports. +func (p *pass) loadPackageNames(ctx context.Context, imports []*ImportInfo) error { + if p.logf != nil { + p.logf("loading package names for %v packages", len(imports)) + defer func() { + p.logf("done loading package names for %v packages", len(imports)) + }() + } + var unknown []string + for _, imp := range imports { + if _, ok := p.knownPackages[imp.ImportPath]; ok { + continue + } + unknown = append(unknown, imp.ImportPath) + } + + names, err := p.source.LoadPackageNames(ctx, p.srcDir, unknown) + if err != nil { + return err + } + + // TODO(rfindley): revisit this. Why do we need to store known packages with + // no exports? The inconsistent data is confusing. + for path, name := range names { + p.knownPackages[path] = &PackageInfo{ + Name: name, + Exports: map[string]bool{}, + } + } + return nil +} + +// WithoutVersion removes a trailing major version, if there is one. +func WithoutVersion(nm string) string { + if v := path.Base(nm); len(v) > 0 && v[0] == 'v' { + if _, err := strconv.Atoi(v[1:]); err == nil { + // this is, for instance, called with rand/v2 and returns rand + if len(v) < len(nm) { + xnm := nm[:len(nm)-len(v)-1] + return path.Base(xnm) + } + } + } + return nm +} + +// importIdentifier returns the identifier that imp will introduce. It will +// guess if the package name has not been loaded, e.g. because the source +// is not available. +func (p *pass) importIdentifier(imp *ImportInfo) string { + if imp.Name != "" { + return imp.Name + } + known := p.knownPackages[imp.ImportPath] + if known != nil && known.Name != "" { + return WithoutVersion(known.Name) + } + return ImportPathToAssumedName(imp.ImportPath) +} + +// load reads in everything necessary to run a pass, and reports whether the +// file already has all the imports it needs. It fills in p.missingRefs with the +// file's missing symbols, if any, or removes unused imports if not. +func (p *pass) load(ctx context.Context) ([]*ImportFix, bool) { + p.knownPackages = map[string]*PackageInfo{} + p.missingRefs = References{} + p.existingImports = map[string][]*ImportInfo{} + + // Load basic information about the file in question. + p.allRefs = collectReferences(p.f) + + // Load stuff from other files in the same package: + // global variables so we know they don't need resolving, and imports + // that we might want to mimic. + globals := map[string]bool{} + for _, otherFile := range p.otherFiles { + // Don't load globals from files that are in the same directory + // but a different package. Using them to suggest imports is OK. + if p.f.Name.Name == otherFile.Name.Name { + addGlobals(otherFile, globals) + } + p.candidates = append(p.candidates, collectImports(otherFile)...) + } + + // Resolve all the import paths we've seen to package names, and store + // f's imports by the identifier they introduce. + imports := collectImports(p.f) + if p.loadRealPackageNames { + err := p.loadPackageNames(ctx, append(imports, p.candidates...)) + if err != nil { + if p.logf != nil { + p.logf("loading package names: %v", err) + } + return nil, false + } + } + for _, imp := range imports { + p.existingImports[p.importIdentifier(imp)] = append(p.existingImports[p.importIdentifier(imp)], imp) + } + + // Find missing references. + for left, rights := range p.allRefs { + if globals[left] { + continue + } + _, ok := p.existingImports[left] + if !ok { + p.missingRefs[left] = rights + continue + } + } + if len(p.missingRefs) != 0 { + return nil, false + } + + return p.fix() +} + +// fix attempts to satisfy missing imports using p.candidates. If it finds +// everything, or if p.lastTry is true, it updates fixes to add the imports it found, +// delete anything unused, and update import names, and returns true. +func (p *pass) fix() ([]*ImportFix, bool) { + // Find missing imports. + var selected []*ImportInfo + for left, rights := range p.missingRefs { + if imp := p.findMissingImport(left, rights); imp != nil { + selected = append(selected, imp) + } + } + + if !p.lastTry && len(selected) != len(p.missingRefs) { + return nil, false + } + + // Found everything, or giving up. Add the new imports and remove any unused. + var fixes []*ImportFix + for _, identifierImports := range p.existingImports { + for _, imp := range identifierImports { + // We deliberately ignore globals here, because we can't be sure + // they're in the same package. People do things like put multiple + // main packages in the same directory, and we don't want to + // remove imports if they happen to have the same name as a var in + // a different package. + if _, ok := p.allRefs[p.importIdentifier(imp)]; !ok { + fixes = append(fixes, &ImportFix{ + StmtInfo: *imp, + IdentName: p.importIdentifier(imp), + FixType: DeleteImport, + }) + continue + } + + // An existing import may need to update its import name to be correct. + if name := p.importSpecName(imp); name != imp.Name { + fixes = append(fixes, &ImportFix{ + StmtInfo: ImportInfo{ + Name: name, + ImportPath: imp.ImportPath, + }, + IdentName: p.importIdentifier(imp), + FixType: SetImportName, + }) + } + } + } + // Collecting fixes involved map iteration, so sort for stability. See + // golang/go#59976. + sortFixes(fixes) + + // collect selected fixes in a separate slice, so that it can be sorted + // separately. Note that these fixes must occur after fixes to existing + // imports. TODO(rfindley): figure out why. + var selectedFixes []*ImportFix + for _, imp := range selected { + selectedFixes = append(selectedFixes, &ImportFix{ + StmtInfo: ImportInfo{ + Name: p.importSpecName(imp), + ImportPath: imp.ImportPath, + }, + IdentName: p.importIdentifier(imp), + FixType: AddImport, + }) + } + sortFixes(selectedFixes) + + return append(fixes, selectedFixes...), true +} + +func sortFixes(fixes []*ImportFix) { + sort.Slice(fixes, func(i, j int) bool { + fi, fj := fixes[i], fixes[j] + if fi.StmtInfo.ImportPath != fj.StmtInfo.ImportPath { + return fi.StmtInfo.ImportPath < fj.StmtInfo.ImportPath + } + if fi.StmtInfo.Name != fj.StmtInfo.Name { + return fi.StmtInfo.Name < fj.StmtInfo.Name + } + if fi.IdentName != fj.IdentName { + return fi.IdentName < fj.IdentName + } + return fi.FixType < fj.FixType + }) +} + +// importSpecName gets the import name of imp in the import spec. +// +// When the import identifier matches the assumed import name, the import name does +// not appear in the import spec. +func (p *pass) importSpecName(imp *ImportInfo) string { + // If we did not load the real package names, or the name is already set, + // we just return the existing name. + if !p.loadRealPackageNames || imp.Name != "" { + return imp.Name + } + + ident := p.importIdentifier(imp) + if ident == ImportPathToAssumedName(imp.ImportPath) { + return "" // ident not needed since the assumed and real names are the same. + } + return ident +} + +// apply will perform the fixes on f in order. +func apply(fset *token.FileSet, f *ast.File, fixes []*ImportFix) { + for _, fix := range fixes { + switch fix.FixType { + case DeleteImport: + astutil.DeleteNamedImport(fset, f, fix.StmtInfo.Name, fix.StmtInfo.ImportPath) + case AddImport: + astutil.AddNamedImport(fset, f, fix.StmtInfo.Name, fix.StmtInfo.ImportPath) + case SetImportName: + // Find the matching import path and change the name. + for _, spec := range f.Imports { + path := strings.Trim(spec.Path.Value, `"`) + if path == fix.StmtInfo.ImportPath { + spec.Name = &ast.Ident{ + Name: fix.StmtInfo.Name, + NamePos: spec.Pos(), + } + } + } + } + } +} + +// assumeSiblingImportsValid assumes that siblings' use of packages is valid, +// adding the exports they use. +func (p *pass) assumeSiblingImportsValid() { + for _, f := range p.otherFiles { + refs := collectReferences(f) + imports := collectImports(f) + importsByName := map[string]*ImportInfo{} + for _, imp := range imports { + importsByName[p.importIdentifier(imp)] = imp + } + for left, rights := range refs { + if imp, ok := importsByName[left]; ok { + if m, ok := stdlib.PackageSymbols[imp.ImportPath]; ok { + // We have the stdlib in memory; no need to guess. + rights = symbolNameSet(m) + } + // TODO(rfindley): we should set package name here, for consistency. + p.addCandidate(imp, &PackageInfo{ + // no name; we already know it. + Exports: rights, + }) + } + } + } +} + +// addCandidate adds a candidate import to p, and merges in the information +// in pkg. +func (p *pass) addCandidate(imp *ImportInfo, pkg *PackageInfo) { + p.candidates = append(p.candidates, imp) + if existing, ok := p.knownPackages[imp.ImportPath]; ok { + if existing.Name == "" { + existing.Name = pkg.Name + } + for export := range pkg.Exports { + existing.Exports[export] = true + } + } else { + p.knownPackages[imp.ImportPath] = pkg + } +} + +// fixImports adds and removes imports from f so that all its references are +// satisfied and there are no unused imports. +// +// This is declared as a variable rather than a function so goimports can +// easily be extended by adding a file with an init function. +// +// DO NOT REMOVE: used internally at Google. +var fixImports = fixImportsDefault + +func fixImportsDefault(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) error { + fixes, err := getFixes(context.Background(), fset, f, filename, env) + if err != nil { + return err + } + apply(fset, f, fixes) + return nil +} + +// getFixes gets the import fixes that need to be made to f in order to fix the imports. +// It does not modify the ast. +func getFixes(ctx context.Context, fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) ([]*ImportFix, error) { + source, err := NewProcessEnvSource(env, filename, f.Name.Name) + if err != nil { + return nil, err + } + goEnv, err := env.goEnv() + if err != nil { + return nil, err + } + return getFixesWithSource(ctx, fset, f, filename, goEnv["GOROOT"], env.logf, source) +} + +func getFixesWithSource(ctx context.Context, fset *token.FileSet, f *ast.File, filename string, goroot string, logf func(string, ...any), source Source) ([]*ImportFix, error) { + // This logic is defensively duplicated from getFixes. + abs, err := filepath.Abs(filename) + if err != nil { + return nil, err + } + srcDir := filepath.Dir(abs) + + if logf != nil { + logf("fixImports(filename=%q), srcDir=%q ...", filename, srcDir) + } + + // First pass: looking only at f, and using the naive algorithm to + // derive package names from import paths, see if the file is already + // complete. We can't add any imports yet, because we don't know + // if missing references are actually package vars. + p := &pass{ + fset: fset, + f: f, + srcDir: srcDir, + logf: logf, + goroot: goroot, + source: source, + } + if fixes, done := p.load(ctx); done { + return fixes, nil + } + + otherFiles, err := parseOtherFiles(ctx, fset, srcDir, filename) + if err != nil { + return nil, err + } + + // Second pass: add information from other files in the same package, + // like their package vars and imports. + p.otherFiles = otherFiles + if fixes, done := p.load(ctx); done { + return fixes, nil + } + + // Now we can try adding imports from the stdlib. + p.assumeSiblingImportsValid() + addStdlibCandidates(p, p.missingRefs) + if fixes, done := p.fix(); done { + return fixes, nil + } + + // Third pass: get real package names where we had previously used + // the naive algorithm. + p = &pass{ + fset: fset, + f: f, + srcDir: srcDir, + logf: logf, + goroot: goroot, + source: p.source, // safe to reuse, as it's just a wrapper around env + } + p.loadRealPackageNames = true + p.otherFiles = otherFiles + if fixes, done := p.load(ctx); done { + return fixes, nil + } + + if err := addStdlibCandidates(p, p.missingRefs); err != nil { + return nil, err + } + p.assumeSiblingImportsValid() + if fixes, done := p.fix(); done { + return fixes, nil + } + + // Go look for candidates in $GOPATH, etc. We don't necessarily load + // the real exports of sibling imports, so keep assuming their contents. + if err := addExternalCandidates(ctx, p, p.missingRefs, filename); err != nil { + return nil, err + } + + p.lastTry = true + fixes, _ := p.fix() + return fixes, nil +} + +// MaxRelevance is the highest relevance, used for the standard library. +// Chosen arbitrarily to match pre-existing gopls code. +const MaxRelevance = 7.0 + +// getCandidatePkgs works with the passed callback to find all acceptable packages. +// It deduplicates by import path, and uses a cached stdlib rather than reading +// from disk. +func getCandidatePkgs(ctx context.Context, wrappedCallback *scanCallback, filename, filePkg string, env *ProcessEnv) error { + notSelf := func(p *pkg) bool { + return p.packageName != filePkg || p.dir != filepath.Dir(filename) + } + goenv, err := env.goEnv() + if err != nil { + return err + } + + var mu sync.Mutex // to guard asynchronous access to dupCheck + dupCheck := map[string]struct{}{} + + // Start off with the standard library. + for importPath, symbols := range stdlib.PackageSymbols { + p := &pkg{ + dir: filepath.Join(goenv["GOROOT"], "src", importPath), + importPathShort: importPath, + packageName: path.Base(importPath), + relevance: MaxRelevance, + } + dupCheck[importPath] = struct{}{} + if notSelf(p) && wrappedCallback.dirFound(p) && wrappedCallback.packageNameLoaded(p) { + var exports []stdlib.Symbol + for _, sym := range symbols { + switch sym.Kind { + case stdlib.Func, stdlib.Type, stdlib.Var, stdlib.Const: + exports = append(exports, sym) + } + } + wrappedCallback.exportsLoaded(p, exports) + } + } + + scanFilter := &scanCallback{ + rootFound: func(root gopathwalk.Root) bool { + // Exclude goroot results -- getting them is relatively expensive, not cached, + // and generally redundant with the in-memory version. + return root.Type != gopathwalk.RootGOROOT && wrappedCallback.rootFound(root) + }, + dirFound: wrappedCallback.dirFound, + packageNameLoaded: func(pkg *pkg) bool { + mu.Lock() + defer mu.Unlock() + if _, ok := dupCheck[pkg.importPathShort]; ok { + return false + } + dupCheck[pkg.importPathShort] = struct{}{} + return notSelf(pkg) && wrappedCallback.packageNameLoaded(pkg) + }, + exportsLoaded: func(pkg *pkg, exports []stdlib.Symbol) { + // If we're an x_test, load the package under test's test variant. + if strings.HasSuffix(filePkg, "_test") && pkg.dir == filepath.Dir(filename) { + var err error + _, exports, err = loadExportsFromFiles(ctx, env, pkg.dir, true) + if err != nil { + return + } + } + wrappedCallback.exportsLoaded(pkg, exports) + }, + } + resolver, err := env.GetResolver() + if err != nil { + return err + } + return resolver.scan(ctx, scanFilter) +} + +func ScoreImportPaths(ctx context.Context, env *ProcessEnv, paths []string) (map[string]float64, error) { + result := make(map[string]float64) + resolver, err := env.GetResolver() + if err != nil { + return nil, err + } + for _, path := range paths { + result[path] = resolver.scoreImportPath(ctx, path) + } + return result, nil +} + +func PrimeCache(ctx context.Context, resolver Resolver) error { + // Fully scan the disk for directories, but don't actually read any Go files. + callback := &scanCallback{ + rootFound: func(root gopathwalk.Root) bool { + // See getCandidatePkgs: walking GOROOT is apparently expensive and + // unnecessary. + return root.Type != gopathwalk.RootGOROOT + }, + dirFound: func(pkg *pkg) bool { + return false + }, + // packageNameLoaded and exportsLoaded must never be called. + } + + return resolver.scan(ctx, callback) +} + +func candidateImportName(pkg *pkg) string { + if ImportPathToAssumedName(pkg.importPathShort) != pkg.packageName { + return pkg.packageName + } + return "" +} + +// GetAllCandidates calls wrapped for each package whose name starts with +// searchPrefix, and can be imported from filename with the package name filePkg. +// +// Beware that the wrapped function may be called multiple times concurrently. +// TODO(adonovan): encapsulate the concurrency. +func GetAllCandidates(ctx context.Context, wrapped func(ImportFix), searchPrefix, filename, filePkg string, env *ProcessEnv) error { + callback := &scanCallback{ + rootFound: func(gopathwalk.Root) bool { + return true + }, + dirFound: func(pkg *pkg) bool { + if !CanUse(filename, pkg.dir) { + return false + } + // Try the assumed package name first, then a simpler path match + // in case of packages named vN, which are not uncommon. + return strings.HasPrefix(ImportPathToAssumedName(pkg.importPathShort), searchPrefix) || + strings.HasPrefix(path.Base(pkg.importPathShort), searchPrefix) + }, + packageNameLoaded: func(pkg *pkg) bool { + if !strings.HasPrefix(pkg.packageName, searchPrefix) { + return false + } + wrapped(ImportFix{ + StmtInfo: ImportInfo{ + ImportPath: pkg.importPathShort, + Name: candidateImportName(pkg), + }, + IdentName: pkg.packageName, + FixType: AddImport, + Relevance: pkg.relevance, + }) + return false + }, + } + return getCandidatePkgs(ctx, callback, filename, filePkg, env) +} + +// GetImportPaths calls wrapped for each package whose import path starts with +// searchPrefix, and can be imported from filename with the package name filePkg. +func GetImportPaths(ctx context.Context, wrapped func(ImportFix), searchPrefix, filename, filePkg string, env *ProcessEnv) error { + callback := &scanCallback{ + rootFound: func(gopathwalk.Root) bool { + return true + }, + dirFound: func(pkg *pkg) bool { + if !CanUse(filename, pkg.dir) { + return false + } + return strings.HasPrefix(pkg.importPathShort, searchPrefix) + }, + packageNameLoaded: func(pkg *pkg) bool { + wrapped(ImportFix{ + StmtInfo: ImportInfo{ + ImportPath: pkg.importPathShort, + Name: candidateImportName(pkg), + }, + IdentName: pkg.packageName, + FixType: AddImport, + Relevance: pkg.relevance, + }) + return false + }, + } + return getCandidatePkgs(ctx, callback, filename, filePkg, env) +} + +// A PackageExport is a package and its exports. +type PackageExport struct { + Fix *ImportFix + Exports []stdlib.Symbol +} + +// GetPackageExports returns all known packages with name pkg and their exports. +func GetPackageExports(ctx context.Context, wrapped func(PackageExport), searchPkg, filename, filePkg string, env *ProcessEnv) error { + callback := &scanCallback{ + rootFound: func(gopathwalk.Root) bool { + return true + }, + dirFound: func(pkg *pkg) bool { + return pkgIsCandidate(filename, References{searchPkg: nil}, pkg) + }, + packageNameLoaded: func(pkg *pkg) bool { + return pkg.packageName == searchPkg + }, + exportsLoaded: func(pkg *pkg, exports []stdlib.Symbol) { + sortSymbols(exports) + wrapped(PackageExport{ + Fix: &ImportFix{ + StmtInfo: ImportInfo{ + ImportPath: pkg.importPathShort, + Name: candidateImportName(pkg), + }, + IdentName: pkg.packageName, + FixType: AddImport, + Relevance: pkg.relevance, + }, + Exports: exports, + }) + }, + } + return getCandidatePkgs(ctx, callback, filename, filePkg, env) +} + +// TODO(rfindley): we should depend on GOOS and GOARCH, to provide accurate +// imports when doing cross-platform development. +var requiredGoEnvVars = []string{ + "GO111MODULE", + "GOFLAGS", + "GOINSECURE", + "GOMOD", + "GOMODCACHE", + "GONOPROXY", + "GONOSUMDB", + "GOPATH", + "GOPROXY", + "GOROOT", + "GOSUMDB", + "GOWORK", +} + +// ProcessEnv contains environment variables and settings that affect the use of +// the go command, the go/build package, etc. +// +// ...a ProcessEnv *also* overwrites its Env along with derived state in the +// form of the resolver. And because it is lazily initialized, an env may just +// be broken and unusable, but there is no way for the caller to detect that: +// all queries will just fail. +// +// TODO(rfindley): refactor this package so that this type (perhaps renamed to +// just Env or Config) is an immutable configuration struct, to be exchanged +// for an initialized object via a constructor that returns an error. Perhaps +// the signature should be `func NewResolver(*Env) (*Resolver, error)`, where +// resolver is a concrete type used for resolving imports. Via this +// refactoring, we can avoid the need to call ProcessEnv.init and +// ProcessEnv.GoEnv everywhere, and implicitly fix all the places where this +// these are misused. Also, we'd delegate the caller the decision of how to +// handle a broken environment. +type ProcessEnv struct { + GocmdRunner *gocommand.Runner + + BuildFlags []string + ModFlag string + + // SkipPathInScan returns true if the path should be skipped from scans of + // the RootCurrentModule root type. The function argument is a clean, + // absolute path. + SkipPathInScan func(string) bool + + // Env overrides the OS environment, and can be used to specify + // GOPROXY, GO111MODULE, etc. PATH cannot be set here, because + // exec.Command will not honor it. + // Specifying all of requiredGoEnvVars avoids a call to `go env`. + Env map[string]string + + WorkingDir string + + // If Logf is non-nil, debug logging is enabled through this function. + Logf func(format string, args ...any) + + // If set, ModCache holds a shared cache of directory info to use across + // multiple ProcessEnvs. + ModCache *DirInfoCache + + initialized bool // see TODO above + + // resolver and resolverErr are lazily evaluated (see GetResolver). + // This is unclean, but see the big TODO in the docstring for ProcessEnv + // above: for now, we can't be sure that the ProcessEnv is fully initialized. + resolver Resolver + resolverErr error +} + +func (e *ProcessEnv) goEnv() (map[string]string, error) { + if err := e.init(); err != nil { + return nil, err + } + return e.Env, nil +} + +func (e *ProcessEnv) matchFile(dir, name string) (bool, error) { + bctx, err := e.buildContext() + if err != nil { + return false, err + } + return bctx.MatchFile(dir, name) +} + +// CopyConfig copies the env's configuration into a new env. +func (e *ProcessEnv) CopyConfig() *ProcessEnv { + copy := &ProcessEnv{ + GocmdRunner: e.GocmdRunner, + initialized: e.initialized, + BuildFlags: e.BuildFlags, + Logf: e.Logf, + WorkingDir: e.WorkingDir, + resolver: nil, + Env: map[string]string{}, + } + maps.Copy(copy.Env, e.Env) + return copy +} + +func (e *ProcessEnv) init() error { + if e.initialized { + return nil + } + + foundAllRequired := true + for _, k := range requiredGoEnvVars { + if _, ok := e.Env[k]; !ok { + foundAllRequired = false + break + } + } + if foundAllRequired { + e.initialized = true + return nil + } + + if e.Env == nil { + e.Env = map[string]string{} + } + + goEnv := map[string]string{} + stdout, err := e.invokeGo(context.TODO(), "env", append([]string{"-json"}, requiredGoEnvVars...)...) + if err != nil { + return err + } + if err := json.Unmarshal(stdout.Bytes(), &goEnv); err != nil { + return err + } + maps.Copy(e.Env, goEnv) + e.initialized = true + return nil +} + +func (e *ProcessEnv) env() []string { + var env []string // the gocommand package will prepend os.Environ. + for k, v := range e.Env { + env = append(env, k+"="+v) + } + return env +} + +func (e *ProcessEnv) GetResolver() (Resolver, error) { + if err := e.init(); err != nil { + return nil, err + } + + if e.resolver == nil && e.resolverErr == nil { + // TODO(rfindley): we should only use a gopathResolver here if the working + // directory is actually *in* GOPATH. (I seem to recall an open gopls issue + // for this behavior, but I can't find it). + // + // For gopls, we can optionally explicitly choose a resolver type, since we + // already know the view type. + if e.Env["GOMOD"] == "" && (e.Env["GOWORK"] == "" || e.Env["GOWORK"] == "off") { + e.resolver = newGopathResolver(e) + e.logf("created gopath resolver") + } else if r, err := newModuleResolver(e, e.ModCache); err != nil { + e.resolverErr = err + e.logf("failed to create module resolver: %v", err) + } else { + e.resolver = Resolver(r) + e.logf("created module resolver") + } + } + + return e.resolver, e.resolverErr +} + +// logf logs if e.Logf is non-nil. +func (e *ProcessEnv) logf(format string, args ...any) { + if e.Logf != nil { + e.Logf(format, args...) + } +} + +// buildContext returns the build.Context to use for matching files. +// +// TODO(rfindley): support dynamic GOOS, GOARCH here, when doing cross-platform +// development. +func (e *ProcessEnv) buildContext() (*build.Context, error) { + ctx := build.Default + goenv, err := e.goEnv() + if err != nil { + return nil, err + } + ctx.GOROOT = goenv["GOROOT"] + ctx.GOPATH = goenv["GOPATH"] + + // As of Go 1.14, build.Context has a Dir field + // (see golang.org/issue/34860). + // Populate it only if present. + rc := reflect.ValueOf(&ctx).Elem() + dir := rc.FieldByName("Dir") + if dir.IsValid() && dir.Kind() == reflect.String { + dir.SetString(e.WorkingDir) + } + + // Since Go 1.11, go/build.Context.Import may invoke 'go list' depending on + // the value in GO111MODULE in the process's environment. We always want to + // run in GOPATH mode when calling Import, so we need to prevent this from + // happening. In Go 1.16, GO111MODULE defaults to "on", so this problem comes + // up more frequently. + // + // HACK: setting any of the Context I/O hooks prevents Import from invoking + // 'go list', regardless of GO111MODULE. This is undocumented, but it's + // unlikely to change before GOPATH support is removed. + ctx.ReadDir = ioutil.ReadDir + + return &ctx, nil +} + +func (e *ProcessEnv) invokeGo(ctx context.Context, verb string, args ...string) (*bytes.Buffer, error) { + inv := gocommand.Invocation{ + Verb: verb, + Args: args, + BuildFlags: e.BuildFlags, + Env: e.env(), + Logf: e.Logf, + WorkingDir: e.WorkingDir, + } + return e.GocmdRunner.Run(ctx, inv) +} + +func addStdlibCandidates(pass *pass, refs References) error { + localbase := func(nm string) string { + ans := path.Base(nm) + if ans[0] == 'v' { + // this is called, for instance, with math/rand/v2 and returns rand/v2 + if _, err := strconv.Atoi(ans[1:]); err == nil { + ix := strings.LastIndex(nm, ans) + more := path.Base(nm[:ix]) + ans = path.Join(more, ans) + } + } + return ans + } + add := func(pkg string) { + // Prevent self-imports. + if path.Base(pkg) == pass.f.Name.Name && filepath.Join(pass.goroot, "src", pkg) == pass.srcDir { + return + } + exports := symbolNameSet(stdlib.PackageSymbols[pkg]) + pass.addCandidate( + &ImportInfo{ImportPath: pkg}, + &PackageInfo{Name: localbase(pkg), Exports: exports}) + } + for left := range refs { + if left == "rand" { + // Make sure we try crypto/rand before any version of math/rand as both have Int() + // and our policy is to recommend crypto + add("crypto/rand") + // if the user's no later than go1.21, this should be "math/rand" + // but we have no way of figuring out what the user is using + // TODO: investigate using the toolchain version to disambiguate in the stdlib + add("math/rand/v2") + // math/rand has an overlapping API + // TestIssue66407 fails without this + add("math/rand") + continue + } + for importPath := range stdlib.PackageSymbols { + if path.Base(importPath) == left { + add(importPath) + } + } + } + return nil +} + +// A Resolver does the build-system-specific parts of goimports. +type Resolver interface { + // loadPackageNames loads the package names in importPaths. + loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) + + // scan works with callback to search for packages. See scanCallback for details. + scan(ctx context.Context, callback *scanCallback) error + + // loadExports returns the package name and set of exported symbols in the + // package at dir. loadExports may be called concurrently. + loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []stdlib.Symbol, error) + + // scoreImportPath returns the relevance for an import path. + scoreImportPath(ctx context.Context, path string) float64 + + // ClearForNewScan returns a new Resolver based on the receiver that has + // cleared its internal caches of directory contents. + // + // The new resolver should be primed and then set via + // [ProcessEnv.UpdateResolver]. + ClearForNewScan() Resolver +} + +// A scanCallback controls a call to scan and receives its results. +// In general, minor errors will be silently discarded; a user should not +// expect to receive a full series of calls for everything. +type scanCallback struct { + // rootFound is called before scanning a new root dir. If it returns true, + // the root will be scanned. Returning false will not necessarily prevent + // directories from that root making it to dirFound. + rootFound func(gopathwalk.Root) bool + // dirFound is called when a directory is found that is possibly a Go package. + // pkg will be populated with everything except packageName. + // If it returns true, the package's name will be loaded. + dirFound func(pkg *pkg) bool + // packageNameLoaded is called when a package is found and its name is loaded. + // If it returns true, the package's exports will be loaded. + packageNameLoaded func(pkg *pkg) bool + // exportsLoaded is called when a package's exports have been loaded. + exportsLoaded func(pkg *pkg, exports []stdlib.Symbol) +} + +func addExternalCandidates(ctx context.Context, pass *pass, refs References, filename string) error { + ctx, done := event.Start(ctx, "imports.addExternalCandidates") + defer done() + + results, err := pass.source.ResolveReferences(ctx, filename, refs) + if err != nil { + return err + } + + for _, result := range results { + if result == nil { + continue + } + // Don't offer completions that would shadow predeclared + // names, such as github.com/coreos/etcd/error. + if types.Universe.Lookup(result.Package.Name) != nil { // predeclared + // Ideally we would skip this candidate only + // if the predeclared name is actually + // referenced by the file, but that's a lot + // trickier to compute and would still create + // an import that is likely to surprise the + // user before long. + continue + } + pass.addCandidate(result.Import, result.Package) + } + return nil +} + +// notIdentifier reports whether ch is an invalid identifier character. +func notIdentifier(ch rune) bool { + return !('a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || + '0' <= ch && ch <= '9' || + ch == '_' || + ch >= utf8.RuneSelf && (unicode.IsLetter(ch) || unicode.IsDigit(ch))) +} + +// ImportPathToAssumedName returns the assumed package name of an import path. +// It does this using only string parsing of the import path. +// It picks the last element of the path that does not look like a major +// version, and then picks the valid identifier off the start of that element. +// It is used to determine if a local rename should be added to an import for +// clarity. +// This function could be moved to a standard package and exported if we want +// for use in other tools. +func ImportPathToAssumedName(importPath string) string { + base := path.Base(importPath) + if strings.HasPrefix(base, "v") { + if _, err := strconv.Atoi(base[1:]); err == nil { + dir := path.Dir(importPath) + if dir != "." { + base = path.Base(dir) + } + } + } + base = strings.TrimPrefix(base, "go-") + if i := strings.IndexFunc(base, notIdentifier); i >= 0 { + base = base[:i] + } + return base +} + +// gopathResolver implements resolver for GOPATH workspaces. +type gopathResolver struct { + env *ProcessEnv + cache *DirInfoCache + scanSema chan struct{} // scanSema prevents concurrent scans. +} + +func newGopathResolver(env *ProcessEnv) *gopathResolver { + r := &gopathResolver{ + env: env, + cache: NewDirInfoCache(), + scanSema: make(chan struct{}, 1), + } + r.scanSema <- struct{}{} + return r +} + +func (r *gopathResolver) ClearForNewScan() Resolver { + return newGopathResolver(r.env) +} + +func (r *gopathResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) { + names := map[string]string{} + bctx, err := r.env.buildContext() + if err != nil { + return nil, err + } + for _, path := range importPaths { + names[path] = importPathToName(bctx, path, srcDir) + } + return names, nil +} + +// importPathToName finds out the actual package name, as declared in its .go files. +func importPathToName(bctx *build.Context, importPath, srcDir string) string { + // Fast path for standard library without going to disk. + if stdlib.HasPackage(importPath) { + return path.Base(importPath) // stdlib packages always match their paths. + } + + buildPkg, err := bctx.Import(importPath, srcDir, build.FindOnly) + if err != nil { + return "" + } + pkgName, err := packageDirToName(buildPkg.Dir) + if err != nil { + return "" + } + return pkgName +} + +// packageDirToName is a faster version of build.Import if +// the only thing desired is the package name. Given a directory, +// packageDirToName then only parses one file in the package, +// trusting that the files in the directory are consistent. +func packageDirToName(dir string) (packageName string, err error) { + d, err := os.Open(dir) + if err != nil { + return "", err + } + names, err := d.Readdirnames(-1) + d.Close() + if err != nil { + return "", err + } + sort.Strings(names) // to have predictable behavior + var lastErr error + var nfile int + for _, name := range names { + if !strings.HasSuffix(name, ".go") { + continue + } + if strings.HasSuffix(name, "_test.go") { + continue + } + nfile++ + fullFile := filepath.Join(dir, name) + + fset := token.NewFileSet() + f, err := parser.ParseFile(fset, fullFile, nil, parser.PackageClauseOnly) + if err != nil { + lastErr = err + continue + } + pkgName := f.Name.Name + if pkgName == "documentation" { + // Special case from go/build.ImportDir, not + // handled by ctx.MatchFile. + continue + } + if pkgName == "main" { + // Also skip package main, assuming it's a +build ignore generator or example. + // Since you can't import a package main anyway, there's no harm here. + continue + } + return pkgName, nil + } + if lastErr != nil { + return "", lastErr + } + return "", fmt.Errorf("no importable package found in %d Go files", nfile) +} + +type pkg struct { + dir string // absolute file path to pkg directory ("/usr/lib/go/src/net/http") + importPathShort string // vendorless import path ("net/http", "a/b") + packageName string // package name loaded from source if requested + relevance float64 // a weakly-defined score of how relevant a package is. 0 is most relevant. +} + +type pkgDistance struct { + pkg *pkg + distance int // relative distance to target +} + +// byDistanceOrImportPathShortLength sorts by relative distance breaking ties +// on the short import path length and then the import string itself. +type byDistanceOrImportPathShortLength []pkgDistance + +func (s byDistanceOrImportPathShortLength) Len() int { return len(s) } +func (s byDistanceOrImportPathShortLength) Less(i, j int) bool { + di, dj := s[i].distance, s[j].distance + if di == -1 { + return false + } + if dj == -1 { + return true + } + if di != dj { + return di < dj + } + + vi, vj := s[i].pkg.importPathShort, s[j].pkg.importPathShort + if len(vi) != len(vj) { + return len(vi) < len(vj) + } + return vi < vj +} +func (s byDistanceOrImportPathShortLength) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +func distance(basepath, targetpath string) int { + p, err := filepath.Rel(basepath, targetpath) + if err != nil { + return -1 + } + if p == "." { + return 0 + } + return strings.Count(p, string(filepath.Separator)) + 1 +} + +func (r *gopathResolver) scan(ctx context.Context, callback *scanCallback) error { + add := func(root gopathwalk.Root, dir string) { + // We assume cached directories have not changed. We can skip them and their + // children. + if _, ok := r.cache.Load(dir); ok { + return + } + + importpath := filepath.ToSlash(dir[len(root.Path)+len("/"):]) + info := directoryPackageInfo{ + status: directoryScanned, + dir: dir, + rootType: root.Type, + nonCanonicalImportPath: VendorlessPath(importpath), + } + r.cache.Store(dir, info) + } + processDir := func(info directoryPackageInfo) { + // Skip this directory if we were not able to get the package information successfully. + if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil { + return + } + + p := &pkg{ + importPathShort: info.nonCanonicalImportPath, + dir: info.dir, + relevance: MaxRelevance - 1, + } + if info.rootType == gopathwalk.RootGOROOT { + p.relevance = MaxRelevance + } + + if !callback.dirFound(p) { + return + } + var err error + p.packageName, err = r.cache.CachePackageName(info) + if err != nil { + return + } + + if !callback.packageNameLoaded(p) { + return + } + if _, exports, err := r.loadExports(ctx, p, false); err == nil { + callback.exportsLoaded(p, exports) + } + } + stop := r.cache.ScanAndListen(ctx, processDir) + defer stop() + + goenv, err := r.env.goEnv() + if err != nil { + return err + } + var roots []gopathwalk.Root + roots = append(roots, gopathwalk.Root{Path: filepath.Join(goenv["GOROOT"], "src"), Type: gopathwalk.RootGOROOT}) + for _, p := range filepath.SplitList(goenv["GOPATH"]) { + roots = append(roots, gopathwalk.Root{Path: filepath.Join(p, "src"), Type: gopathwalk.RootGOPATH}) + } + // The callback is not necessarily safe to use in the goroutine below. Process roots eagerly. + roots = filterRoots(roots, callback.rootFound) + // We can't cancel walks, because we need them to finish to have a usable + // cache. Instead, run them in a separate goroutine and detach. + scanDone := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + return + case <-r.scanSema: + } + defer func() { r.scanSema <- struct{}{} }() + gopathwalk.Walk(roots, add, gopathwalk.Options{Logf: r.env.Logf, ModulesEnabled: false}) + close(scanDone) + }() + select { + case <-ctx.Done(): + case <-scanDone: + } + return nil +} + +func (r *gopathResolver) scoreImportPath(ctx context.Context, path string) float64 { + if stdlib.HasPackage(path) { + return MaxRelevance + } + return MaxRelevance - 1 +} + +func filterRoots(roots []gopathwalk.Root, include func(gopathwalk.Root) bool) []gopathwalk.Root { + var result []gopathwalk.Root + for _, root := range roots { + if !include(root) { + continue + } + result = append(result, root) + } + return result +} + +func (r *gopathResolver) loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []stdlib.Symbol, error) { + if info, ok := r.cache.Load(pkg.dir); ok && !includeTest { + return r.cache.CacheExports(ctx, r.env, info) + } + return loadExportsFromFiles(ctx, r.env, pkg.dir, includeTest) +} + +// VendorlessPath returns the devendorized version of the import path ipath. +// For example, VendorlessPath("foo/bar/vendor/a/b") returns "a/b". +func VendorlessPath(ipath string) string { + // Devendorize for use in import statement. + if i := strings.LastIndex(ipath, "/vendor/"); i >= 0 { + return ipath[i+len("/vendor/"):] + } + if strings.HasPrefix(ipath, "vendor/") { + return ipath[len("vendor/"):] + } + return ipath +} + +func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, includeTest bool) (string, []stdlib.Symbol, error) { + // Look for non-test, buildable .go files which could provide exports. + all, err := os.ReadDir(dir) + if err != nil { + return "", nil, err + } + var files []fs.DirEntry + for _, fi := range all { + name := fi.Name() + if !strings.HasSuffix(name, ".go") || (!includeTest && strings.HasSuffix(name, "_test.go")) { + continue + } + match, err := env.matchFile(dir, fi.Name()) + if err != nil || !match { + continue + } + files = append(files, fi) + } + + if len(files) == 0 { + return "", nil, fmt.Errorf("dir %v contains no buildable, non-test .go files", dir) + } + + var pkgName string + var exports []stdlib.Symbol + fset := token.NewFileSet() + for _, fi := range files { + select { + case <-ctx.Done(): + return "", nil, ctx.Err() + default: + } + + fullFile := filepath.Join(dir, fi.Name()) + // Legacy ast.Object resolution is needed here. + f, err := parser.ParseFile(fset, fullFile, nil, 0) + if err != nil { + env.logf("error parsing %v: %v", fullFile, err) + continue + } + if f.Name.Name == "documentation" { + // Special case from go/build.ImportDir, not + // handled by MatchFile above. + continue + } + if includeTest && strings.HasSuffix(f.Name.Name, "_test") { + // x_test package. We want internal test files only. + continue + } + pkgName = f.Name.Name + for name, obj := range f.Scope.Objects { + if ast.IsExported(name) { + var kind stdlib.Kind + switch obj.Kind { + case ast.Con: + kind = stdlib.Const + case ast.Typ: + kind = stdlib.Type + case ast.Var: + kind = stdlib.Var + case ast.Fun: + kind = stdlib.Func + } + exports = append(exports, stdlib.Symbol{ + Name: name, + Kind: kind, + Version: 0, // unknown; be permissive + }) + } + } + } + sortSymbols(exports) + + env.logf("loaded exports in dir %v (package %v): %v", dir, pkgName, exports) + return pkgName, exports, nil +} + +func sortSymbols(syms []stdlib.Symbol) { + sort.Slice(syms, func(i, j int) bool { + return syms[i].Name < syms[j].Name + }) +} + +// A symbolSearcher searches for a package with a set of symbols, among a set +// of candidates. See [symbolSearcher.search]. +// +// The search occurs within the scope of a single file, with context captured +// in srcDir and xtest. +type symbolSearcher struct { + logf func(string, ...any) + srcDir string // directory containing the file + xtest bool // if set, the file containing is an x_test file + loadExports func(ctx context.Context, pkg *pkg, includeTest bool) (string, []stdlib.Symbol, error) +} + +// search searches the provided candidates for a package containing all +// exported symbols. +// +// If successful, returns the resulting package. +func (s *symbolSearcher) search(ctx context.Context, candidates []pkgDistance, pkgName string, symbols map[string]bool) (*pkg, error) { + // Sort the candidates by their import package length, + // assuming that shorter package names are better than long + // ones. Note that this sorts by the de-vendored name, so + // there's no "penalty" for vendoring. + sort.Sort(byDistanceOrImportPathShortLength(candidates)) + if s.logf != nil { + for i, c := range candidates { + s.logf("%s candidate %d/%d: %v in %v", pkgName, i+1, len(candidates), c.pkg.importPathShort, c.pkg.dir) + } + } + + // Arrange rescv so that we can we can await results in order of relevance + // and exit as soon as we find the first match. + // + // Search with bounded concurrency, returning as soon as the first result + // among rescv is non-nil. + rescv := make([]chan *pkg, len(candidates)) + for i := range candidates { + rescv[i] = make(chan *pkg, 1) + } + const maxConcurrentPackageImport = 4 + loadExportsSem := make(chan struct{}, maxConcurrentPackageImport) + + // Ensure that all work is completed at exit. + ctx, cancel := context.WithCancel(ctx) + var wg sync.WaitGroup + defer func() { + cancel() + wg.Wait() + }() + + // Start the search. + wg.Add(1) + go func() { + defer wg.Done() + for i, c := range candidates { + select { + case loadExportsSem <- struct{}{}: + case <-ctx.Done(): + return + } + + i := i + c := c + wg.Add(1) + go func() { + defer func() { + <-loadExportsSem + wg.Done() + }() + if s.logf != nil { + s.logf("loading exports in dir %s (seeking package %s)", c.pkg.dir, pkgName) + } + pkg, err := s.searchOne(ctx, c, symbols) + if err != nil { + if s.logf != nil && ctx.Err() == nil { + s.logf("loading exports in dir %s (seeking package %s): %v", c.pkg.dir, pkgName, err) + } + pkg = nil + } + rescv[i] <- pkg // may be nil + }() + } + }() + + // Await the first (best) result. + for _, resc := range rescv { + select { + case r := <-resc: + if r != nil { + return r, nil + } + case <-ctx.Done(): + return nil, ctx.Err() + } + } + return nil, nil +} + +func (s *symbolSearcher) searchOne(ctx context.Context, c pkgDistance, symbols map[string]bool) (*pkg, error) { + if ctx.Err() != nil { + return nil, ctx.Err() + } + // If we're considering the package under test from an x_test, load the + // test variant. + includeTest := s.xtest && c.pkg.dir == s.srcDir + _, exports, err := s.loadExports(ctx, c.pkg, includeTest) + if err != nil { + return nil, err + } + + exportsMap := make(map[string]bool, len(exports)) + for _, sym := range exports { + exportsMap[sym.Name] = true + } + for symbol := range symbols { + if !exportsMap[symbol] { + return nil, nil // no match + } + } + return c.pkg, nil +} + +// pkgIsCandidate reports whether pkg is a candidate for satisfying the +// finding which package pkgIdent in the file named by filename is trying +// to refer to. +// +// This check is purely lexical and is meant to be as fast as possible +// because it's run over all $GOPATH directories to filter out poor +// candidates in order to limit the CPU and I/O later parsing the +// exports in candidate packages. +// +// filename is the file being formatted. +// pkgIdent is the package being searched for, like "client" (if +// searching for "client.New") +func pkgIsCandidate(filename string, refs References, pkg *pkg) bool { + // Check "internal" and "vendor" visibility: + if !CanUse(filename, pkg.dir) { + return false + } + + // Speed optimization to minimize disk I/O: + // + // Use the matchesPath heuristic to filter to package paths that could + // reasonably match a dangling reference. + // + // This permits mismatch naming like directory "go-foo" being package "foo", + // or "pkg.v3" being "pkg", or directory + // "google.golang.org/api/cloudbilling/v1" being package "cloudbilling", but + // doesn't permit a directory "foo" to be package "bar", which is strongly + // discouraged anyway. There's no reason goimports needs to be slow just to + // accommodate that. + for pkgIdent := range refs { + if matchesPath(pkgIdent, pkg.importPathShort) { + return true + } + } + return false +} + +// CanUse reports whether the package in dir is usable from filename, +// respecting the Go "internal" and "vendor" visibility rules. +func CanUse(filename, dir string) bool { + // Fast path check, before any allocations. If it doesn't contain vendor + // or internal, it's not tricky: + // Note that this can false-negative on directories like "notinternal", + // but we check it correctly below. This is just a fast path. + if !strings.Contains(dir, "vendor") && !strings.Contains(dir, "internal") { + return true + } + + dirSlash := filepath.ToSlash(dir) + if !strings.Contains(dirSlash, "/vendor/") && !strings.Contains(dirSlash, "/internal/") && !strings.HasSuffix(dirSlash, "/internal") { + return true + } + // Vendor or internal directory only visible from children of parent. + // That means the path from the current directory to the target directory + // can contain ../vendor or ../internal but not ../foo/vendor or ../foo/internal + // or bar/vendor or bar/internal. + // After stripping all the leading ../, the only okay place to see vendor or internal + // is at the very beginning of the path. + absfile, err := filepath.Abs(filename) + if err != nil { + return false + } + absdir, err := filepath.Abs(dir) + if err != nil { + return false + } + rel, err := filepath.Rel(absfile, absdir) + if err != nil { + return false + } + relSlash := filepath.ToSlash(rel) + if i := strings.LastIndex(relSlash, "../"); i >= 0 { + relSlash = relSlash[i+len("../"):] + } + return !strings.Contains(relSlash, "/vendor/") && !strings.Contains(relSlash, "/internal/") && !strings.HasSuffix(relSlash, "/internal") +} + +// matchesPath reports whether ident may match a potential package name +// referred to by path, using heuristics to filter out unidiomatic package +// names. +// +// Specifically, it checks whether either of the last two '/'- or '\'-delimited +// path segments matches the identifier. The segment-matching heuristic must +// allow for various conventions around segment naming, including go-foo, +// foo-go, and foo.v3. To handle all of these, matching considers both (1) the +// entire segment, ignoring '-' and '.', as well as (2) the last subsegment +// separated by '-' or '.'. So the segment foo-go matches all of the following +// identifiers: foo, go, and foogo. All matches are case insensitive (for ASCII +// identifiers). +// +// See the docstring for [pkgIsCandidate] for an explanation of how this +// heuristic filters potential candidate packages. +func matchesPath(ident, path string) bool { + // Ignore case, for ASCII. + lowerIfASCII := func(b byte) byte { + if 'A' <= b && b <= 'Z' { + return b + ('a' - 'A') + } + return b + } + + // match reports whether path[start:end] matches ident, ignoring [.-]. + match := func(start, end int) bool { + ii := len(ident) - 1 // current byte in ident + pi := end - 1 // current byte in path + for ; pi >= start && ii >= 0; pi-- { + pb := path[pi] + if pb == '-' || pb == '.' { + continue + } + pb = lowerIfASCII(pb) + ib := lowerIfASCII(ident[ii]) + if pb != ib { + return false + } + ii-- + } + return ii < 0 && pi < start // all bytes matched + } + + // segmentEnd and subsegmentEnd hold the end points of the current segment + // and subsegment intervals. + segmentEnd := len(path) + subsegmentEnd := len(path) + + // Count slashes; we only care about the last two segments. + nslash := 0 + + for i := len(path) - 1; i >= 0; i-- { + switch b := path[i]; b { + // TODO(rfindley): we handle backlashes here only because the previous + // heuristic handled backslashes. This is perhaps overly defensive, but is + // the result of many lessons regarding Chesterton's fence and the + // goimports codebase. + // + // However, this function is only ever called with something called an + // 'importPath'. Is it possible that this is a real import path, and + // therefore we need only consider forward slashes? + case '/', '\\': + if match(i+1, segmentEnd) || match(i+1, subsegmentEnd) { + return true + } + nslash++ + if nslash == 2 { + return false // did not match above + } + segmentEnd, subsegmentEnd = i, i // reset + case '-', '.': + if match(i+1, subsegmentEnd) { + return true + } + subsegmentEnd = i + } + } + return match(0, segmentEnd) || match(0, subsegmentEnd) +} + +type visitFn func(node ast.Node) ast.Visitor + +func (fn visitFn) Visit(node ast.Node) ast.Visitor { + return fn(node) +} + +func symbolNameSet(symbols []stdlib.Symbol) map[string]bool { + names := make(map[string]bool) + for _, sym := range symbols { + switch sym.Kind { + case stdlib.Const, stdlib.Var, stdlib.Type, stdlib.Func: + names[sym.Name] = true + } + } + return names +} diff --git a/vendor/golang.org/x/tools/internal/imports/imports.go b/vendor/golang.org/x/tools/internal/imports/imports.go new file mode 100644 index 00000000..b5f5218b --- /dev/null +++ b/vendor/golang.org/x/tools/internal/imports/imports.go @@ -0,0 +1,359 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package imports implements a Go pretty-printer (like package "go/format") +// that also adds or removes import statements as necessary. +package imports + +import ( + "bufio" + "bytes" + "context" + "fmt" + "go/ast" + "go/format" + "go/parser" + "go/printer" + "go/token" + "io" + "regexp" + "strconv" + "strings" + + "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/internal/event" +) + +// Options is golang.org/x/tools/imports.Options with extra internal-only options. +type Options struct { + Env *ProcessEnv // The environment to use. Note: this contains the cached module and filesystem state. + + // LocalPrefix is a comma-separated string of import path prefixes, which, if + // set, instructs Process to sort the import paths with the given prefixes + // into another group after 3rd-party packages. + LocalPrefix string + + Fragment bool // Accept fragment of a source file (no package statement) + AllErrors bool // Report all errors (not just the first 10 on different lines) + + Comments bool // Print comments (true if nil *Options provided) + TabIndent bool // Use tabs for indent (true if nil *Options provided) + TabWidth int // Tab width (8 if nil *Options provided) + + FormatOnly bool // Disable the insertion and deletion of imports +} + +// Process implements golang.org/x/tools/imports.Process with explicit context in opt.Env. +func Process(filename string, src []byte, opt *Options) (formatted []byte, err error) { + fileSet := token.NewFileSet() + var parserMode parser.Mode + if opt.Comments { + parserMode |= parser.ParseComments + } + if opt.AllErrors { + parserMode |= parser.AllErrors + } + file, adjust, err := parse(fileSet, filename, src, parserMode, opt.Fragment) + if err != nil { + return nil, err + } + + if !opt.FormatOnly { + if err := fixImports(fileSet, file, filename, opt.Env); err != nil { + return nil, err + } + } + return formatFile(fileSet, file, src, adjust, opt) +} + +// FixImports returns a list of fixes to the imports that, when applied, +// will leave the imports in the same state as Process. src and opt must +// be specified. +// +// Note that filename's directory influences which imports can be chosen, +// so it is important that filename be accurate. +func FixImports(ctx context.Context, filename string, src []byte, goroot string, logf func(string, ...any), source Source) (fixes []*ImportFix, err error) { + ctx, done := event.Start(ctx, "imports.FixImports") + defer done() + + fileSet := token.NewFileSet() + // TODO(rfindley): these default values for ParseComments and AllErrors were + // extracted from gopls, but are they even needed? + file, _, err := parse(fileSet, filename, src, parser.ParseComments|parser.AllErrors, true) + if err != nil { + return nil, err + } + + return getFixesWithSource(ctx, fileSet, file, filename, goroot, logf, source) +} + +// ApplyFixes applies all of the fixes to the file and formats it. extraMode +// is added in when parsing the file. src and opts must be specified, but no +// env is needed. +func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, extraMode parser.Mode) (formatted []byte, err error) { + // Don't use parse() -- we don't care about fragments or statement lists + // here, and we need to work with unparsable files. + fileSet := token.NewFileSet() + parserMode := parser.SkipObjectResolution + if opt.Comments { + parserMode |= parser.ParseComments + } + if opt.AllErrors { + parserMode |= parser.AllErrors + } + parserMode |= extraMode + + file, err := parser.ParseFile(fileSet, filename, src, parserMode) + if file == nil { + return nil, err + } + + // Apply the fixes to the file. + apply(fileSet, file, fixes) + + return formatFile(fileSet, file, src, nil, opt) +} + +// formatFile formats the file syntax tree. +// It may mutate the token.FileSet and the ast.File. +// +// If an adjust function is provided, it is called after formatting +// with the original source (formatFile's src parameter) and the +// formatted file, and returns the postpocessed result. +func formatFile(fset *token.FileSet, file *ast.File, src []byte, adjust func(orig []byte, src []byte) []byte, opt *Options) ([]byte, error) { + mergeImports(file) + sortImports(opt.LocalPrefix, fset.File(file.FileStart), file) + var spacesBefore []string // import paths we need spaces before + for _, impSection := range astutil.Imports(fset, file) { + // Within each block of contiguous imports, see if any + // import lines are in different group numbers. If so, + // we'll need to put a space between them so it's + // compatible with gofmt. + lastGroup := -1 + for _, importSpec := range impSection { + importPath, _ := strconv.Unquote(importSpec.Path.Value) + groupNum := importGroup(opt.LocalPrefix, importPath) + if groupNum != lastGroup && lastGroup != -1 { + spacesBefore = append(spacesBefore, importPath) + } + lastGroup = groupNum + } + + } + + printerMode := printer.UseSpaces + if opt.TabIndent { + printerMode |= printer.TabIndent + } + printConfig := &printer.Config{Mode: printerMode, Tabwidth: opt.TabWidth} + + var buf bytes.Buffer + err := printConfig.Fprint(&buf, fset, file) + if err != nil { + return nil, err + } + out := buf.Bytes() + if adjust != nil { + out = adjust(src, out) + } + if len(spacesBefore) > 0 { + out, err = addImportSpaces(bytes.NewReader(out), spacesBefore) + if err != nil { + return nil, err + } + } + + out, err = format.Source(out) + if err != nil { + return nil, err + } + return out, nil +} + +// parse parses src, which was read from filename, +// as a Go source file or statement list. +func parse(fset *token.FileSet, filename string, src []byte, parserMode parser.Mode, fragment bool) (*ast.File, func(orig, src []byte) []byte, error) { + if parserMode&parser.SkipObjectResolution != 0 { + panic("legacy ast.Object resolution is required") + } + + // Try as whole source file. + file, err := parser.ParseFile(fset, filename, src, parserMode) + if err == nil { + return file, nil, nil + } + // If the error is that the source file didn't begin with a + // package line and we accept fragmented input, fall through to + // try as a source fragment. Stop and return on any other error. + if !fragment || !strings.Contains(err.Error(), "expected 'package'") { + return nil, nil, err + } + + // If this is a declaration list, make it a source file + // by inserting a package clause. + // Insert using a ;, not a newline, so that parse errors are on + // the correct line. + const prefix = "package main;" + psrc := append([]byte(prefix), src...) + file, err = parser.ParseFile(fset, filename, psrc, parserMode) + if err == nil { + // Gofmt will turn the ; into a \n. + // Do that ourselves now and update the file contents, + // so that positions and line numbers are correct going forward. + psrc[len(prefix)-1] = '\n' + fset.File(file.Package).SetLinesForContent(psrc) + + // If a main function exists, we will assume this is a main + // package and leave the file. + if containsMainFunc(file) { + return file, nil, nil + } + + adjust := func(orig, src []byte) []byte { + // Remove the package clause. + src = src[len(prefix):] + return matchSpace(orig, src) + } + return file, adjust, nil + } + // If the error is that the source file didn't begin with a + // declaration, fall through to try as a statement list. + // Stop and return on any other error. + if !strings.Contains(err.Error(), "expected declaration") { + return nil, nil, err + } + + // If this is a statement list, make it a source file + // by inserting a package clause and turning the list + // into a function body. This handles expressions too. + // Insert using a ;, not a newline, so that the line numbers + // in fsrc match the ones in src. + fsrc := append(append([]byte("package p; func _() {"), src...), '}') + file, err = parser.ParseFile(fset, filename, fsrc, parserMode) + if err == nil { + adjust := func(orig, src []byte) []byte { + // Remove the wrapping. + // Gofmt has turned the ; into a \n\n. + src = src[len("package p\n\nfunc _() {"):] + src = src[:len(src)-len("}\n")] + // Gofmt has also indented the function body one level. + // Remove that indent. + src = bytes.ReplaceAll(src, []byte("\n\t"), []byte("\n")) + return matchSpace(orig, src) + } + return file, adjust, nil + } + + // Failed, and out of options. + return nil, nil, err +} + +// containsMainFunc checks if a file contains a function declaration with the +// function signature 'func main()' +func containsMainFunc(file *ast.File) bool { + for _, decl := range file.Decls { + if f, ok := decl.(*ast.FuncDecl); ok { + if f.Name.Name != "main" { + continue + } + + if len(f.Type.Params.List) != 0 { + continue + } + + if f.Type.Results != nil && len(f.Type.Results.List) != 0 { + continue + } + + return true + } + } + + return false +} + +func cutSpace(b []byte) (before, middle, after []byte) { + i := 0 + for i < len(b) && (b[i] == ' ' || b[i] == '\t' || b[i] == '\n') { + i++ + } + j := len(b) + for j > 0 && (b[j-1] == ' ' || b[j-1] == '\t' || b[j-1] == '\n') { + j-- + } + if i <= j { + return b[:i], b[i:j], b[j:] + } + return nil, nil, b[j:] +} + +// matchSpace reformats src to use the same space context as orig. +// 1. If orig begins with blank lines, matchSpace inserts them at the beginning of src. +// 2. matchSpace copies the indentation of the first non-blank line in orig +// to every non-blank line in src. +// 3. matchSpace copies the trailing space from orig and uses it in place +// of src's trailing space. +func matchSpace(orig []byte, src []byte) []byte { + before, _, after := cutSpace(orig) + i := bytes.LastIndex(before, []byte{'\n'}) + before, indent := before[:i+1], before[i+1:] + + _, src, _ = cutSpace(src) + + var b bytes.Buffer + b.Write(before) + for len(src) > 0 { + line := src + if i := bytes.IndexByte(line, '\n'); i >= 0 { + line, src = line[:i+1], line[i+1:] + } else { + src = nil + } + if len(line) > 0 && line[0] != '\n' { // not blank + b.Write(indent) + } + b.Write(line) + } + b.Write(after) + return b.Bytes() +} + +var impLine = regexp.MustCompile(`^\s+(?:[\w\.]+\s+)?"(.+?)"`) + +func addImportSpaces(r io.Reader, breaks []string) ([]byte, error) { + var out bytes.Buffer + in := bufio.NewReader(r) + inImports := false + done := false + for { + s, err := in.ReadString('\n') + if err == io.EOF { + break + } else if err != nil { + return nil, err + } + + if !inImports && !done && strings.HasPrefix(s, "import") { + inImports = true + } + if inImports && (strings.HasPrefix(s, "var") || + strings.HasPrefix(s, "func") || + strings.HasPrefix(s, "const") || + strings.HasPrefix(s, "type")) { + done = true + inImports = false + } + if inImports && len(breaks) > 0 { + if m := impLine.FindStringSubmatch(s); m != nil { + if m[1] == breaks[0] { + out.WriteByte('\n') + breaks = breaks[1:] + } + } + } + + fmt.Fprint(&out, s) + } + return out.Bytes(), nil +} diff --git a/vendor/golang.org/x/tools/internal/imports/mod.go b/vendor/golang.org/x/tools/internal/imports/mod.go new file mode 100644 index 00000000..df94ec81 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/imports/mod.go @@ -0,0 +1,841 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package imports + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "os" + "path" + "path/filepath" + "regexp" + "slices" + "sort" + "strconv" + "strings" + + "golang.org/x/mod/module" + "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/gocommand" + "golang.org/x/tools/internal/gopathwalk" + "golang.org/x/tools/internal/stdlib" +) + +// Notes(rfindley): ModuleResolver appears to be heavily optimized for scanning +// as fast as possible, which is desirable for a call to goimports from the +// command line, but it doesn't work as well for gopls, where it suffers from +// slow startup (golang/go#44863) and intermittent hanging (golang/go#59216), +// both caused by populating the cache, albeit in slightly different ways. +// +// A high level list of TODOs: +// - Optimize the scan itself, as there is some redundancy statting and +// reading go.mod files. +// - Invert the relationship between ProcessEnv and Resolver (see the +// docstring of ProcessEnv). +// - Make it easier to use an external resolver implementation. +// +// Smaller TODOs are annotated in the code below. + +// ModuleResolver implements the Resolver interface for a workspace using +// modules. +// +// A goal of the ModuleResolver is to invoke the Go command as little as +// possible. To this end, it runs the Go command only for listing module +// information (i.e. `go list -m -e -json ...`). Package scanning, the process +// of loading package information for the modules, is implemented internally +// via the scan method. +// +// It has two types of state: the state derived from the go command, which +// is populated by init, and the state derived from scans, which is populated +// via scan. A root is considered scanned if it has been walked to discover +// directories. However, if the scan did not require additional information +// from the directory (such as package name or exports), the directory +// information itself may be partially populated. It will be lazily filled in +// as needed by scans, using the scanCallback. +type ModuleResolver struct { + env *ProcessEnv + + // Module state, populated during construction + dummyVendorMod *gocommand.ModuleJSON // if vendoring is enabled, a pseudo-module to represent the /vendor directory + moduleCacheDir string // GOMODCACHE, inferred from GOPATH if unset + roots []gopathwalk.Root // roots to scan, in approximate order of importance + mains []*gocommand.ModuleJSON // main modules + mainByDir map[string]*gocommand.ModuleJSON // module information by dir, to join with roots + modsByModPath []*gocommand.ModuleJSON // all modules, ordered by # of path components in their module path + modsByDir []*gocommand.ModuleJSON // ...or by the number of path components in their Dir. + + // Scanning state, populated by scan + + // scanSema prevents concurrent scans, and guards scannedRoots and the cache + // fields below (though the caches themselves are concurrency safe). + // Receive to acquire, send to release. + scanSema chan struct{} + scannedRoots map[gopathwalk.Root]bool // if true, root has been walked + + // Caches of directory info, populated by scans and scan callbacks + // + // moduleCacheCache stores cached information about roots in the module + // cache, which are immutable and therefore do not need to be invalidated. + // + // otherCache stores information about all other roots (even GOROOT), which + // may change. + moduleCacheCache *DirInfoCache + otherCache *DirInfoCache +} + +// newModuleResolver returns a new module-aware goimports resolver. +// +// Note: use caution when modifying this constructor: changes must also be +// reflected in ModuleResolver.ClearForNewScan. +func newModuleResolver(e *ProcessEnv, moduleCacheCache *DirInfoCache) (*ModuleResolver, error) { + r := &ModuleResolver{ + env: e, + scanSema: make(chan struct{}, 1), + } + r.scanSema <- struct{}{} // release + + goenv, err := r.env.goEnv() + if err != nil { + return nil, err + } + + // TODO(rfindley): can we refactor to share logic with r.env.invokeGo? + inv := gocommand.Invocation{ + BuildFlags: r.env.BuildFlags, + ModFlag: r.env.ModFlag, + Env: r.env.env(), + Logf: r.env.Logf, + WorkingDir: r.env.WorkingDir, + } + + vendorEnabled := false + var mainModVendor *gocommand.ModuleJSON // for module vendoring + var mainModsVendor []*gocommand.ModuleJSON // for workspace vendoring + + goWork := r.env.Env["GOWORK"] + if len(goWork) == 0 { + // TODO(rfindley): VendorEnabled runs the go command to get GOFLAGS, but + // they should be available from the ProcessEnv. Can we avoid the redundant + // invocation? + vendorEnabled, mainModVendor, err = gocommand.VendorEnabled(context.TODO(), inv, r.env.GocmdRunner) + if err != nil { + return nil, err + } + } else { + vendorEnabled, mainModsVendor, err = gocommand.WorkspaceVendorEnabled(context.Background(), inv, r.env.GocmdRunner) + if err != nil { + return nil, err + } + } + + if vendorEnabled { + if mainModVendor != nil { + // Module vendor mode is on, so all the non-Main modules are irrelevant, + // and we need to search /vendor for everything. + r.mains = []*gocommand.ModuleJSON{mainModVendor} + r.dummyVendorMod = &gocommand.ModuleJSON{ + Path: "", + Dir: filepath.Join(mainModVendor.Dir, "vendor"), + } + r.modsByModPath = []*gocommand.ModuleJSON{mainModVendor, r.dummyVendorMod} + r.modsByDir = []*gocommand.ModuleJSON{mainModVendor, r.dummyVendorMod} + } else { + // Workspace vendor mode is on, so all the non-Main modules are irrelevant, + // and we need to search /vendor for everything. + r.mains = mainModsVendor + r.dummyVendorMod = &gocommand.ModuleJSON{ + Path: "", + Dir: filepath.Join(filepath.Dir(goWork), "vendor"), + } + r.modsByModPath = append(slices.Clone(mainModsVendor), r.dummyVendorMod) + r.modsByDir = append(slices.Clone(mainModsVendor), r.dummyVendorMod) + } + } else { + // Vendor mode is off, so run go list -m ... to find everything. + err := r.initAllMods() + // We expect an error when running outside of a module with + // GO111MODULE=on. Other errors are fatal. + if err != nil { + if errMsg := err.Error(); !strings.Contains(errMsg, "working directory is not part of a module") && !strings.Contains(errMsg, "go.mod file not found") { + return nil, err + } + } + } + + r.moduleCacheDir = gomodcacheForEnv(goenv) + if r.moduleCacheDir == "" { + return nil, fmt.Errorf("cannot resolve GOMODCACHE") + } + + sort.Slice(r.modsByModPath, func(i, j int) bool { + count := func(x int) int { + return strings.Count(r.modsByModPath[x].Path, "/") + } + return count(j) < count(i) // descending order + }) + sort.Slice(r.modsByDir, func(i, j int) bool { + count := func(x int) int { + return strings.Count(r.modsByDir[x].Dir, string(filepath.Separator)) + } + return count(j) < count(i) // descending order + }) + + r.roots = []gopathwalk.Root{} + if goenv["GOROOT"] != "" { // "" happens in tests + r.roots = append(r.roots, gopathwalk.Root{Path: filepath.Join(goenv["GOROOT"], "/src"), Type: gopathwalk.RootGOROOT}) + } + r.mainByDir = make(map[string]*gocommand.ModuleJSON) + for _, main := range r.mains { + r.roots = append(r.roots, gopathwalk.Root{Path: main.Dir, Type: gopathwalk.RootCurrentModule}) + r.mainByDir[main.Dir] = main + } + if vendorEnabled { + r.roots = append(r.roots, gopathwalk.Root{Path: r.dummyVendorMod.Dir, Type: gopathwalk.RootOther}) + } else { + addDep := func(mod *gocommand.ModuleJSON) { + if mod.Replace == nil { + // This is redundant with the cache, but we'll skip it cheaply enough + // when we encounter it in the module cache scan. + // + // Including it at a lower index in r.roots than the module cache dir + // helps prioritize matches from within existing dependencies. + r.roots = append(r.roots, gopathwalk.Root{Path: mod.Dir, Type: gopathwalk.RootModuleCache}) + } else { + r.roots = append(r.roots, gopathwalk.Root{Path: mod.Dir, Type: gopathwalk.RootOther}) + } + } + // Walk dependent modules before scanning the full mod cache, direct deps first. + for _, mod := range r.modsByModPath { + if !mod.Indirect && !mod.Main { + addDep(mod) + } + } + for _, mod := range r.modsByModPath { + if mod.Indirect && !mod.Main { + addDep(mod) + } + } + // If provided, share the moduleCacheCache. + // + // TODO(rfindley): The module cache is immutable. However, the loaded + // exports do depend on GOOS and GOARCH. Fortunately, the + // ProcessEnv.buildContext does not adjust these from build.DefaultContext + // (even though it should). So for now, this is OK to share, but we need to + // add logic for handling GOOS/GOARCH. + r.moduleCacheCache = moduleCacheCache + r.roots = append(r.roots, gopathwalk.Root{Path: r.moduleCacheDir, Type: gopathwalk.RootModuleCache}) + } + + r.scannedRoots = map[gopathwalk.Root]bool{} + if r.moduleCacheCache == nil { + r.moduleCacheCache = NewDirInfoCache() + } + r.otherCache = NewDirInfoCache() + return r, nil +} + +// gomodcacheForEnv returns the GOMODCACHE value to use based on the given env +// map, which must have GOMODCACHE and GOPATH populated. +// +// TODO(rfindley): this is defensive refactoring. +// 1. Is this even relevant anymore? Can't we just read GOMODCACHE. +// 2. Use this to separate module cache scanning from other scanning. +func gomodcacheForEnv(goenv map[string]string) string { + if gmc := goenv["GOMODCACHE"]; gmc != "" { + // golang/go#67156: ensure that the module cache is clean, since it is + // assumed as a prefix to directories scanned by gopathwalk, which are + // themselves clean. + return filepath.Clean(gmc) + } + gopaths := filepath.SplitList(goenv["GOPATH"]) + if len(gopaths) == 0 { + return "" + } + return filepath.Join(gopaths[0], "/pkg/mod") +} + +func (r *ModuleResolver) initAllMods() error { + stdout, err := r.env.invokeGo(context.TODO(), "list", "-m", "-e", "-json", "...") + if err != nil { + return err + } + for dec := json.NewDecoder(stdout); dec.More(); { + mod := &gocommand.ModuleJSON{} + if err := dec.Decode(mod); err != nil { + return err + } + if mod.Dir == "" { + r.env.logf("module %v has not been downloaded and will be ignored", mod.Path) + // Can't do anything with a module that's not downloaded. + continue + } + // golang/go#36193: the go command doesn't always clean paths. + mod.Dir = filepath.Clean(mod.Dir) + r.modsByModPath = append(r.modsByModPath, mod) + r.modsByDir = append(r.modsByDir, mod) + if mod.Main { + r.mains = append(r.mains, mod) + } + } + return nil +} + +// ClearForNewScan invalidates the last scan. +// +// It preserves the set of roots, but forgets about the set of directories. +// Though it forgets the set of module cache directories, it remembers their +// contents, since they are assumed to be immutable. +func (r *ModuleResolver) ClearForNewScan() Resolver { + <-r.scanSema // acquire r, to guard scannedRoots + r2 := &ModuleResolver{ + env: r.env, + dummyVendorMod: r.dummyVendorMod, + moduleCacheDir: r.moduleCacheDir, + roots: r.roots, + mains: r.mains, + mainByDir: r.mainByDir, + modsByModPath: r.modsByModPath, + + scanSema: make(chan struct{}, 1), + scannedRoots: make(map[gopathwalk.Root]bool), + otherCache: NewDirInfoCache(), + moduleCacheCache: r.moduleCacheCache, + } + r2.scanSema <- struct{}{} // r2 must start released + // Invalidate root scans. We don't need to invalidate module cache roots, + // because they are immutable. + // (We don't support a use case where GOMODCACHE is cleaned in the middle of + // e.g. a gopls session: the user must restart gopls to get accurate + // imports.) + // + // Scanning for new directories in GOMODCACHE should be handled elsewhere, + // via a call to ScanModuleCache. + for _, root := range r.roots { + if root.Type == gopathwalk.RootModuleCache && r.scannedRoots[root] { + r2.scannedRoots[root] = true + } + } + r.scanSema <- struct{}{} // release r + return r2 +} + +// ClearModuleInfo invalidates resolver state that depends on go.mod file +// contents (essentially, the output of go list -m -json ...). +// +// Notably, it does not forget directory contents, which are reset +// asynchronously via ClearForNewScan. +// +// If the ProcessEnv is a GOPATH environment, ClearModuleInfo is a no op. +// +// TODO(rfindley): move this to a new env.go, consolidating ProcessEnv methods. +func (e *ProcessEnv) ClearModuleInfo() { + if r, ok := e.resolver.(*ModuleResolver); ok { + resolver, err := newModuleResolver(e, e.ModCache) + if err != nil { + e.resolver = nil + e.resolverErr = err + return + } + + <-r.scanSema // acquire (guards caches) + resolver.moduleCacheCache = r.moduleCacheCache + resolver.otherCache = r.otherCache + r.scanSema <- struct{}{} // release + + e.UpdateResolver(resolver) + } +} + +// UpdateResolver sets the resolver for the ProcessEnv to use in imports +// operations. Only for use with the result of [Resolver.ClearForNewScan]. +// +// TODO(rfindley): this awkward API is a result of the (arguably) inverted +// relationship between configuration and state described in the doc comment +// for [ProcessEnv]. +func (e *ProcessEnv) UpdateResolver(r Resolver) { + e.resolver = r + e.resolverErr = nil +} + +// findPackage returns the module and directory from within the main modules +// and their dependencies that contains the package at the given import path, +// or returns nil, "" if no module is in scope. +func (r *ModuleResolver) findPackage(importPath string) (*gocommand.ModuleJSON, string) { + // This can't find packages in the stdlib, but that's harmless for all + // the existing code paths. + for _, m := range r.modsByModPath { + if !strings.HasPrefix(importPath, m.Path) { + continue + } + pathInModule := importPath[len(m.Path):] + pkgDir := filepath.Join(m.Dir, pathInModule) + if r.dirIsNestedModule(pkgDir, m) { + continue + } + + if info, ok := r.cacheLoad(pkgDir); ok { + if loaded, err := info.reachedStatus(nameLoaded); loaded { + if err != nil { + continue // No package in this dir. + } + return m, pkgDir + } + if scanned, err := info.reachedStatus(directoryScanned); scanned && err != nil { + continue // Dir is unreadable, etc. + } + // This is slightly wrong: a directory doesn't have to have an + // importable package to count as a package for package-to-module + // resolution. package main or _test files should count but + // don't. + // TODO(heschi): fix this. + if _, err := r.cachePackageName(info); err == nil { + return m, pkgDir + } + } + + // Not cached. Read the filesystem. + pkgFiles, err := os.ReadDir(pkgDir) + if err != nil { + continue + } + // A module only contains a package if it has buildable go + // files in that directory. If not, it could be provided by an + // outer module. See #29736. + for _, fi := range pkgFiles { + if ok, _ := r.env.matchFile(pkgDir, fi.Name()); ok { + return m, pkgDir + } + } + } + return nil, "" +} + +func (r *ModuleResolver) cacheLoad(dir string) (directoryPackageInfo, bool) { + if info, ok := r.moduleCacheCache.Load(dir); ok { + return info, ok + } + return r.otherCache.Load(dir) +} + +func (r *ModuleResolver) cacheStore(info directoryPackageInfo) { + if info.rootType == gopathwalk.RootModuleCache { + r.moduleCacheCache.Store(info.dir, info) + } else { + r.otherCache.Store(info.dir, info) + } +} + +// cachePackageName caches the package name for a dir already in the cache. +func (r *ModuleResolver) cachePackageName(info directoryPackageInfo) (string, error) { + if info.rootType == gopathwalk.RootModuleCache { + return r.moduleCacheCache.CachePackageName(info) + } + return r.otherCache.CachePackageName(info) +} + +func (r *ModuleResolver) cacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []stdlib.Symbol, error) { + if info.rootType == gopathwalk.RootModuleCache { + return r.moduleCacheCache.CacheExports(ctx, env, info) + } + return r.otherCache.CacheExports(ctx, env, info) +} + +// findModuleByDir returns the module that contains dir, or nil if no such +// module is in scope. +func (r *ModuleResolver) findModuleByDir(dir string) *gocommand.ModuleJSON { + // This is quite tricky and may not be correct. dir could be: + // - a package in the main module. + // - a replace target underneath the main module's directory. + // - a nested module in the above. + // - a replace target somewhere totally random. + // - a nested module in the above. + // - in the mod cache. + // - in /vendor/ in -mod=vendor mode. + // - nested module? Dunno. + // Rumor has it that replace targets cannot contain other replace targets. + // + // Note that it is critical here that modsByDir is sorted to have deeper dirs + // first. This ensures that findModuleByDir finds the innermost module. + // See also golang/go#56291. + for _, m := range r.modsByDir { + if !strings.HasPrefix(dir, m.Dir) { + continue + } + + if r.dirIsNestedModule(dir, m) { + continue + } + + return m + } + return nil +} + +// dirIsNestedModule reports if dir is contained in a nested module underneath +// mod, not actually in mod. +func (r *ModuleResolver) dirIsNestedModule(dir string, mod *gocommand.ModuleJSON) bool { + if !strings.HasPrefix(dir, mod.Dir) { + return false + } + if r.dirInModuleCache(dir) { + // Nested modules in the module cache are pruned, + // so it cannot be a nested module. + return false + } + if mod != nil && mod == r.dummyVendorMod { + // The /vendor pseudomodule is flattened and doesn't actually count. + return false + } + modDir, _ := r.modInfo(dir) + if modDir == "" { + return false + } + return modDir != mod.Dir +} + +func readModName(modFile string) string { + modBytes, err := os.ReadFile(modFile) + if err != nil { + return "" + } + return modulePath(modBytes) +} + +func (r *ModuleResolver) modInfo(dir string) (modDir, modName string) { + if r.dirInModuleCache(dir) { + if matches := modCacheRegexp.FindStringSubmatch(dir); len(matches) == 3 { + index := strings.Index(dir, matches[1]+"@"+matches[2]) + modDir := filepath.Join(dir[:index], matches[1]+"@"+matches[2]) + return modDir, readModName(filepath.Join(modDir, "go.mod")) + } + } + for { + if info, ok := r.cacheLoad(dir); ok { + return info.moduleDir, info.moduleName + } + f := filepath.Join(dir, "go.mod") + info, err := os.Stat(f) + if err == nil && !info.IsDir() { + return dir, readModName(f) + } + + d := filepath.Dir(dir) + if len(d) >= len(dir) { + return "", "" // reached top of file system, no go.mod + } + dir = d + } +} + +func (r *ModuleResolver) dirInModuleCache(dir string) bool { + if r.moduleCacheDir == "" { + return false + } + return strings.HasPrefix(dir, r.moduleCacheDir) +} + +func (r *ModuleResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) { + names := map[string]string{} + for _, path := range importPaths { + // TODO(rfindley): shouldn't this use the dirInfoCache? + _, packageDir := r.findPackage(path) + if packageDir == "" { + continue + } + name, err := packageDirToName(packageDir) + if err != nil { + continue + } + names[path] = name + } + return names, nil +} + +func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error { + ctx, done := event.Start(ctx, "imports.ModuleResolver.scan") + defer done() + + processDir := func(info directoryPackageInfo) { + // Skip this directory if we were not able to get the package information successfully. + if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil { + return + } + pkg, err := r.canonicalize(info) + if err != nil { + return + } + if !callback.dirFound(pkg) { + return + } + + pkg.packageName, err = r.cachePackageName(info) + if err != nil { + return + } + if !callback.packageNameLoaded(pkg) { + return + } + + _, exports, err := r.loadExports(ctx, pkg, false) + if err != nil { + return + } + callback.exportsLoaded(pkg, exports) + } + + // Start processing everything in the cache, and listen for the new stuff + // we discover in the walk below. + stop1 := r.moduleCacheCache.ScanAndListen(ctx, processDir) + defer stop1() + stop2 := r.otherCache.ScanAndListen(ctx, processDir) + defer stop2() + + // We assume cached directories are fully cached, including all their + // children, and have not changed. We can skip them. + skip := func(root gopathwalk.Root, dir string) bool { + if r.env.SkipPathInScan != nil && root.Type == gopathwalk.RootCurrentModule { + if root.Path == dir { + return false + } + + if r.env.SkipPathInScan(filepath.Clean(dir)) { + return true + } + } + + info, ok := r.cacheLoad(dir) + if !ok { + return false + } + // This directory can be skipped as long as we have already scanned it. + // Packages with errors will continue to have errors, so there is no need + // to rescan them. + packageScanned, _ := info.reachedStatus(directoryScanned) + return packageScanned + } + + add := func(root gopathwalk.Root, dir string) { + r.cacheStore(r.scanDirForPackage(root, dir)) + } + + // r.roots and the callback are not necessarily safe to use in the + // goroutine below. Process them eagerly. + roots := filterRoots(r.roots, callback.rootFound) + // We can't cancel walks, because we need them to finish to have a usable + // cache. Instead, run them in a separate goroutine and detach. + scanDone := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + return + case <-r.scanSema: // acquire + } + defer func() { r.scanSema <- struct{}{} }() // release + // We have the lock on r.scannedRoots, and no other scans can run. + for _, root := range roots { + if ctx.Err() != nil { + return + } + + if r.scannedRoots[root] { + continue + } + gopathwalk.WalkSkip([]gopathwalk.Root{root}, add, skip, gopathwalk.Options{Logf: r.env.Logf, ModulesEnabled: true}) + r.scannedRoots[root] = true + } + close(scanDone) + }() + select { + case <-ctx.Done(): + case <-scanDone: + } + return nil +} + +func (r *ModuleResolver) scoreImportPath(ctx context.Context, path string) float64 { + if stdlib.HasPackage(path) { + return MaxRelevance + } + mod, _ := r.findPackage(path) + return modRelevance(mod) +} + +func modRelevance(mod *gocommand.ModuleJSON) float64 { + var relevance float64 + switch { + case mod == nil: // out of scope + return MaxRelevance - 4 + case mod.Indirect: + relevance = MaxRelevance - 3 + case !mod.Main: + relevance = MaxRelevance - 2 + default: + relevance = MaxRelevance - 1 // main module ties with stdlib + } + + _, versionString, ok := module.SplitPathVersion(mod.Path) + if ok { + index := strings.Index(versionString, "v") + if index == -1 { + return relevance + } + if versionNumber, err := strconv.ParseFloat(versionString[index+1:], 64); err == nil { + relevance += versionNumber / 1000 + } + } + + return relevance +} + +// canonicalize gets the result of canonicalizing the packages using the results +// of initializing the resolver from 'go list -m'. +func (r *ModuleResolver) canonicalize(info directoryPackageInfo) (*pkg, error) { + // Packages in GOROOT are already canonical, regardless of the std/cmd modules. + if info.rootType == gopathwalk.RootGOROOT { + return &pkg{ + importPathShort: info.nonCanonicalImportPath, + dir: info.dir, + packageName: path.Base(info.nonCanonicalImportPath), + relevance: MaxRelevance, + }, nil + } + + importPath := info.nonCanonicalImportPath + mod := r.findModuleByDir(info.dir) + // Check if the directory is underneath a module that's in scope. + if mod != nil { + // It is. If dir is the target of a replace directive, + // our guessed import path is wrong. Use the real one. + if mod.Dir == info.dir { + importPath = mod.Path + } else { + dirInMod := info.dir[len(mod.Dir)+len("/"):] + importPath = path.Join(mod.Path, filepath.ToSlash(dirInMod)) + } + } else if !strings.HasPrefix(importPath, info.moduleName) { + // The module's name doesn't match the package's import path. It + // probably needs a replace directive we don't have. + return nil, fmt.Errorf("package in %q is not valid without a replace statement", info.dir) + } + + res := &pkg{ + importPathShort: importPath, + dir: info.dir, + relevance: modRelevance(mod), + } + // We may have discovered a package that has a different version + // in scope already. Canonicalize to that one if possible. + if _, canonicalDir := r.findPackage(importPath); canonicalDir != "" { + res.dir = canonicalDir + } + return res, nil +} + +func (r *ModuleResolver) loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []stdlib.Symbol, error) { + if info, ok := r.cacheLoad(pkg.dir); ok && !includeTest { + return r.cacheExports(ctx, r.env, info) + } + return loadExportsFromFiles(ctx, r.env, pkg.dir, includeTest) +} + +func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) directoryPackageInfo { + subdir := "" + if prefix := root.Path + string(filepath.Separator); strings.HasPrefix(dir, prefix) { + subdir = dir[len(prefix):] + } + importPath := filepath.ToSlash(subdir) + if strings.HasPrefix(importPath, "vendor/") { + // Only enter vendor directories if they're explicitly requested as a root. + return directoryPackageInfo{ + status: directoryScanned, + err: fmt.Errorf("unwanted vendor directory"), + } + } + switch root.Type { + case gopathwalk.RootCurrentModule: + importPath = path.Join(r.mainByDir[root.Path].Path, filepath.ToSlash(subdir)) + case gopathwalk.RootModuleCache: + matches := modCacheRegexp.FindStringSubmatch(subdir) + if len(matches) == 0 { + return directoryPackageInfo{ + status: directoryScanned, + err: fmt.Errorf("invalid module cache path: %v", subdir), + } + } + modPath, err := module.UnescapePath(filepath.ToSlash(matches[1])) + if err != nil { + r.env.logf("decoding module cache path %q: %v", subdir, err) + return directoryPackageInfo{ + status: directoryScanned, + err: fmt.Errorf("decoding module cache path %q: %v", subdir, err), + } + } + importPath = path.Join(modPath, filepath.ToSlash(matches[3])) + } + + modDir, modName := r.modInfo(dir) + result := directoryPackageInfo{ + status: directoryScanned, + dir: dir, + rootType: root.Type, + nonCanonicalImportPath: importPath, + moduleDir: modDir, + moduleName: modName, + } + if root.Type == gopathwalk.RootGOROOT { + // stdlib packages are always in scope, despite the confusing go.mod + return result + } + return result +} + +// modCacheRegexp splits a path in a module cache into module, module version, and package. +var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`) + +var ( + slashSlash = []byte("//") + moduleStr = []byte("module") +) + +// modulePath returns the module path from the gomod file text. +// If it cannot find a module path, it returns an empty string. +// It is tolerant of unrelated problems in the go.mod file. +// +// Copied from cmd/go/internal/modfile. +func modulePath(mod []byte) string { + for len(mod) > 0 { + line := mod + mod = nil + if i := bytes.IndexByte(line, '\n'); i >= 0 { + line, mod = line[:i], line[i+1:] + } + if i := bytes.Index(line, slashSlash); i >= 0 { + line = line[:i] + } + line = bytes.TrimSpace(line) + if !bytes.HasPrefix(line, moduleStr) { + continue + } + line = line[len(moduleStr):] + n := len(line) + line = bytes.TrimSpace(line) + if len(line) == n || len(line) == 0 { + continue + } + + if line[0] == '"' || line[0] == '`' { + p, err := strconv.Unquote(string(line)) + if err != nil { + return "" // malformed quoted string or multiline module path + } + return p + } + + return string(line) + } + return "" // missing module path +} diff --git a/vendor/golang.org/x/tools/internal/imports/mod_cache.go b/vendor/golang.org/x/tools/internal/imports/mod_cache.go new file mode 100644 index 00000000..b96c9d4b --- /dev/null +++ b/vendor/golang.org/x/tools/internal/imports/mod_cache.go @@ -0,0 +1,331 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package imports + +import ( + "context" + "fmt" + "path" + "path/filepath" + "strings" + "sync" + + "golang.org/x/mod/module" + "golang.org/x/tools/internal/gopathwalk" + "golang.org/x/tools/internal/stdlib" +) + +// To find packages to import, the resolver needs to know about all of +// the packages that could be imported. This includes packages that are +// already in modules that are in (1) the current module, (2) replace targets, +// and (3) packages in the module cache. Packages in (1) and (2) may change over +// time, as the client may edit the current module and locally replaced modules. +// The module cache (which includes all of the packages in (3)) can only +// ever be added to. +// +// The resolver can thus save state about packages in the module cache +// and guarantee that this will not change over time. To obtain information +// about new modules added to the module cache, the module cache should be +// rescanned. +// +// It is OK to serve information about modules that have been deleted, +// as they do still exist. +// TODO(suzmue): can we share information with the caller about +// what module needs to be downloaded to import this package? + +type directoryPackageStatus int + +const ( + _ directoryPackageStatus = iota + directoryScanned + nameLoaded + exportsLoaded +) + +// directoryPackageInfo holds (possibly incomplete) information about packages +// contained in a given directory. +type directoryPackageInfo struct { + // status indicates the extent to which this struct has been filled in. + status directoryPackageStatus + // err is non-nil when there was an error trying to reach status. + err error + + // Set when status >= directoryScanned. + + // dir is the absolute directory of this package. + dir string + rootType gopathwalk.RootType + // nonCanonicalImportPath is the package's expected import path. It may + // not actually be importable at that path. + nonCanonicalImportPath string + + // Module-related information. + moduleDir string // The directory that is the module root of this dir. + moduleName string // The module name that contains this dir. + + // Set when status >= nameLoaded. + + packageName string // the package name, as declared in the source. + + // Set when status >= exportsLoaded. + // TODO(rfindley): it's hard to see this, but exports depend implicitly on + // the default build context GOOS and GOARCH. + // + // We can make this explicit, and key exports by GOOS, GOARCH. + exports []stdlib.Symbol +} + +// reachedStatus returns true when info has a status at least target and any error associated with +// an attempt to reach target. +func (info *directoryPackageInfo) reachedStatus(target directoryPackageStatus) (bool, error) { + if info.err == nil { + return info.status >= target, nil + } + if info.status == target { + return true, info.err + } + return true, nil +} + +// DirInfoCache is a concurrency-safe map for storing information about +// directories that may contain packages. +// +// The information in this cache is built incrementally. Entries are initialized in scan. +// No new keys should be added in any other functions, as all directories containing +// packages are identified in scan. +// +// Other functions, including loadExports and findPackage, may update entries in this cache +// as they discover new things about the directory. +// +// The information in the cache is not expected to change for the cache's +// lifetime, so there is no protection against competing writes. Users should +// take care not to hold the cache across changes to the underlying files. +type DirInfoCache struct { + mu sync.Mutex + // dirs stores information about packages in directories, keyed by absolute path. + dirs map[string]*directoryPackageInfo + listeners map[*int]cacheListener +} + +func NewDirInfoCache() *DirInfoCache { + return &DirInfoCache{ + dirs: make(map[string]*directoryPackageInfo), + listeners: make(map[*int]cacheListener), + } +} + +type cacheListener func(directoryPackageInfo) + +// ScanAndListen calls listener on all the items in the cache, and on anything +// newly added. The returned stop function waits for all in-flight callbacks to +// finish and blocks new ones. +func (d *DirInfoCache) ScanAndListen(ctx context.Context, listener cacheListener) func() { + ctx, cancel := context.WithCancel(ctx) + + // Flushing out all the callbacks is tricky without knowing how many there + // are going to be. Setting an arbitrary limit makes it much easier. + const maxInFlight = 10 + sema := make(chan struct{}, maxInFlight) + for range maxInFlight { + sema <- struct{}{} + } + + cookie := new(int) // A unique ID we can use for the listener. + + // We can't hold mu while calling the listener. + d.mu.Lock() + var keys []string + for key := range d.dirs { + keys = append(keys, key) + } + d.listeners[cookie] = func(info directoryPackageInfo) { + select { + case <-ctx.Done(): + return + case <-sema: + } + listener(info) + sema <- struct{}{} + } + d.mu.Unlock() + + stop := func() { + cancel() + d.mu.Lock() + delete(d.listeners, cookie) + d.mu.Unlock() + for range maxInFlight { + <-sema + } + } + + // Process the pre-existing keys. + for _, k := range keys { + select { + case <-ctx.Done(): + return stop + default: + } + if v, ok := d.Load(k); ok { + listener(v) + } + } + + return stop +} + +// Store stores the package info for dir. +func (d *DirInfoCache) Store(dir string, info directoryPackageInfo) { + d.mu.Lock() + // TODO(rfindley, golang/go#59216): should we overwrite an existing entry? + // That seems incorrect as the cache should be idempotent. + _, old := d.dirs[dir] + d.dirs[dir] = &info + var listeners []cacheListener + for _, l := range d.listeners { + listeners = append(listeners, l) + } + d.mu.Unlock() + + if !old { + for _, l := range listeners { + l(info) + } + } +} + +// Load returns a copy of the directoryPackageInfo for absolute directory dir. +func (d *DirInfoCache) Load(dir string) (directoryPackageInfo, bool) { + d.mu.Lock() + defer d.mu.Unlock() + info, ok := d.dirs[dir] + if !ok { + return directoryPackageInfo{}, false + } + return *info, true +} + +// Keys returns the keys currently present in d. +func (d *DirInfoCache) Keys() (keys []string) { + d.mu.Lock() + defer d.mu.Unlock() + for key := range d.dirs { + keys = append(keys, key) + } + return keys +} + +func (d *DirInfoCache) CachePackageName(info directoryPackageInfo) (string, error) { + if loaded, err := info.reachedStatus(nameLoaded); loaded { + return info.packageName, err + } + if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil { + return "", fmt.Errorf("cannot read package name, scan error: %v", err) + } + info.packageName, info.err = packageDirToName(info.dir) + info.status = nameLoaded + d.Store(info.dir, info) + return info.packageName, info.err +} + +func (d *DirInfoCache) CacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []stdlib.Symbol, error) { + if reached, _ := info.reachedStatus(exportsLoaded); reached { + return info.packageName, info.exports, info.err + } + if reached, err := info.reachedStatus(nameLoaded); reached && err != nil { + return "", nil, err + } + info.packageName, info.exports, info.err = loadExportsFromFiles(ctx, env, info.dir, false) + if info.err == context.Canceled || info.err == context.DeadlineExceeded { + return info.packageName, info.exports, info.err + } + // The cache structure wants things to proceed linearly. We can skip a + // step here, but only if we succeed. + if info.status == nameLoaded || info.err == nil { + info.status = exportsLoaded + } else { + info.status = nameLoaded + } + d.Store(info.dir, info) + return info.packageName, info.exports, info.err +} + +// ScanModuleCache walks the given directory, which must be a GOMODCACHE value, +// for directory package information, storing the results in cache. +func ScanModuleCache(dir string, cache *DirInfoCache, logf func(string, ...any)) { + // Note(rfindley): it's hard to see, but this function attempts to implement + // just the side effects on cache of calling PrimeCache with a ProcessEnv + // that has the given dir as its GOMODCACHE. + // + // Teasing out the control flow, we see that we can avoid any handling of + // vendor/ and can infer module info entirely from the path, simplifying the + // logic here. + + root := gopathwalk.Root{ + Path: filepath.Clean(dir), + Type: gopathwalk.RootModuleCache, + } + + directoryInfo := func(root gopathwalk.Root, dir string) directoryPackageInfo { + // This is a copy of ModuleResolver.scanDirForPackage, trimmed down to + // logic that applies to a module cache directory. + + subdir := "" + if dir != root.Path { + subdir = dir[len(root.Path)+len("/"):] + } + + matches := modCacheRegexp.FindStringSubmatch(subdir) + if len(matches) == 0 { + return directoryPackageInfo{ + status: directoryScanned, + err: fmt.Errorf("invalid module cache path: %v", subdir), + } + } + modPath, err := module.UnescapePath(filepath.ToSlash(matches[1])) + if err != nil { + if logf != nil { + logf("decoding module cache path %q: %v", subdir, err) + } + return directoryPackageInfo{ + status: directoryScanned, + err: fmt.Errorf("decoding module cache path %q: %v", subdir, err), + } + } + importPath := path.Join(modPath, filepath.ToSlash(matches[3])) + index := strings.Index(dir, matches[1]+"@"+matches[2]) + modDir := filepath.Join(dir[:index], matches[1]+"@"+matches[2]) + modName := readModName(filepath.Join(modDir, "go.mod")) + return directoryPackageInfo{ + status: directoryScanned, + dir: dir, + rootType: root.Type, + nonCanonicalImportPath: importPath, + moduleDir: modDir, + moduleName: modName, + } + } + + add := func(root gopathwalk.Root, dir string) { + info := directoryInfo(root, dir) + cache.Store(info.dir, info) + } + + skip := func(_ gopathwalk.Root, dir string) bool { + // Skip directories that have already been scanned. + // + // Note that gopathwalk only adds "package" directories, which must contain + // a .go file, and all such package directories in the module cache are + // immutable. So if we can load a dir, it can be skipped. + info, ok := cache.Load(dir) + if !ok { + return false + } + packageScanned, _ := info.reachedStatus(directoryScanned) + return packageScanned + } + + gopathwalk.WalkSkip([]gopathwalk.Root{root}, add, skip, gopathwalk.Options{Logf: logf, ModulesEnabled: true}) +} diff --git a/vendor/golang.org/x/tools/internal/imports/sortimports.go b/vendor/golang.org/x/tools/internal/imports/sortimports.go new file mode 100644 index 00000000..67c17bc4 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/imports/sortimports.go @@ -0,0 +1,298 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Hacked up copy of go/ast/import.go +// Modified to use a single token.File in preference to a FileSet. + +package imports + +import ( + "go/ast" + "go/token" + "log" + "slices" + "sort" + "strconv" +) + +// sortImports sorts runs of consecutive import lines in import blocks in f. +// It also removes duplicate imports when it is possible to do so without data loss. +// +// It may mutate the token.File and the ast.File. +func sortImports(localPrefix string, tokFile *token.File, f *ast.File) { + for i, d := range f.Decls { + d, ok := d.(*ast.GenDecl) + if !ok || d.Tok != token.IMPORT { + // Not an import declaration, so we're done. + // Imports are always first. + break + } + + if len(d.Specs) == 0 { + // Empty import block, remove it. + f.Decls = slices.Delete(f.Decls, i, i+1) + } + + if !d.Lparen.IsValid() { + // Not a block: sorted by default. + continue + } + + // Identify and sort runs of specs on successive lines. + i := 0 + specs := d.Specs[:0] + for j, s := range d.Specs { + if j > i && tokFile.Line(s.Pos()) > 1+tokFile.Line(d.Specs[j-1].End()) { + // j begins a new run. End this one. + specs = append(specs, sortSpecs(localPrefix, tokFile, f, d.Specs[i:j])...) + i = j + } + } + specs = append(specs, sortSpecs(localPrefix, tokFile, f, d.Specs[i:])...) + d.Specs = specs + + // Deduping can leave a blank line before the rparen; clean that up. + // Ignore line directives. + if len(d.Specs) > 0 { + lastSpec := d.Specs[len(d.Specs)-1] + lastLine := tokFile.PositionFor(lastSpec.Pos(), false).Line + if rParenLine := tokFile.PositionFor(d.Rparen, false).Line; rParenLine > lastLine+1 { + tokFile.MergeLine(rParenLine - 1) // has side effects! + } + } + } +} + +// mergeImports merges all the import declarations into the first one. +// Taken from golang.org/x/tools/ast/astutil. +// This does not adjust line numbers properly +func mergeImports(f *ast.File) { + if len(f.Decls) <= 1 { + return + } + + // Merge all the import declarations into the first one. + var first *ast.GenDecl + for i := 0; i < len(f.Decls); i++ { + decl := f.Decls[i] + gen, ok := decl.(*ast.GenDecl) + if !ok || gen.Tok != token.IMPORT || declImports(gen, "C") { + continue + } + if first == nil { + first = gen + continue // Don't touch the first one. + } + // We now know there is more than one package in this import + // declaration. Ensure that it ends up parenthesized. + first.Lparen = first.Pos() + // Move the imports of the other import declaration to the first one. + for _, spec := range gen.Specs { + spec.(*ast.ImportSpec).Path.ValuePos = first.Pos() + first.Specs = append(first.Specs, spec) + } + f.Decls = slices.Delete(f.Decls, i, i+1) + i-- + } +} + +// declImports reports whether gen contains an import of path. +// Taken from golang.org/x/tools/ast/astutil. +func declImports(gen *ast.GenDecl, path string) bool { + if gen.Tok != token.IMPORT { + return false + } + for _, spec := range gen.Specs { + impspec := spec.(*ast.ImportSpec) + if importPath(impspec) == path { + return true + } + } + return false +} + +func importPath(s ast.Spec) string { + t, err := strconv.Unquote(s.(*ast.ImportSpec).Path.Value) + if err == nil { + return t + } + return "" +} + +func importName(s ast.Spec) string { + n := s.(*ast.ImportSpec).Name + if n == nil { + return "" + } + return n.Name +} + +func importComment(s ast.Spec) string { + c := s.(*ast.ImportSpec).Comment + if c == nil { + return "" + } + return c.Text() +} + +// collapse indicates whether prev may be removed, leaving only next. +func collapse(prev, next ast.Spec) bool { + if importPath(next) != importPath(prev) || importName(next) != importName(prev) { + return false + } + return prev.(*ast.ImportSpec).Comment == nil +} + +type posSpan struct { + Start token.Pos + End token.Pos +} + +// sortSpecs sorts the import specs within each import decl. +// It may mutate the token.File. +func sortSpecs(localPrefix string, tokFile *token.File, f *ast.File, specs []ast.Spec) []ast.Spec { + // Can't short-circuit here even if specs are already sorted, + // since they might yet need deduplication. + // A lone import, however, may be safely ignored. + if len(specs) <= 1 { + return specs + } + + // Record positions for specs. + pos := make([]posSpan, len(specs)) + for i, s := range specs { + pos[i] = posSpan{s.Pos(), s.End()} + } + + // Identify comments in this range. + // Any comment from pos[0].Start to the final line counts. + lastLine := tokFile.Line(pos[len(pos)-1].End) + cstart := len(f.Comments) + cend := len(f.Comments) + for i, g := range f.Comments { + if g.Pos() < pos[0].Start { + continue + } + if i < cstart { + cstart = i + } + if tokFile.Line(g.End()) > lastLine { + cend = i + break + } + } + comments := f.Comments[cstart:cend] + + // Assign each comment to the import spec preceding it. + importComment := map[*ast.ImportSpec][]*ast.CommentGroup{} + specIndex := 0 + for _, g := range comments { + for specIndex+1 < len(specs) && pos[specIndex+1].Start <= g.Pos() { + specIndex++ + } + s := specs[specIndex].(*ast.ImportSpec) + importComment[s] = append(importComment[s], g) + } + + // Sort the import specs by import path. + // Remove duplicates, when possible without data loss. + // Reassign the import paths to have the same position sequence. + // Reassign each comment to abut the end of its spec. + // Sort the comments by new position. + sort.Sort(byImportSpec{localPrefix, specs}) + + // Dedup. Thanks to our sorting, we can just consider + // adjacent pairs of imports. + deduped := specs[:0] + for i, s := range specs { + if i == len(specs)-1 || !collapse(s, specs[i+1]) { + deduped = append(deduped, s) + } else { + p := s.Pos() + tokFile.MergeLine(tokFile.Line(p)) // has side effects! + } + } + specs = deduped + + // Fix up comment positions + for i, s := range specs { + s := s.(*ast.ImportSpec) + if s.Name != nil { + s.Name.NamePos = pos[i].Start + } + s.Path.ValuePos = pos[i].Start + s.EndPos = pos[i].End + nextSpecPos := pos[i].End + + for _, g := range importComment[s] { + for _, c := range g.List { + c.Slash = pos[i].End + nextSpecPos = c.End() + } + } + if i < len(specs)-1 { + pos[i+1].Start = nextSpecPos + pos[i+1].End = nextSpecPos + } + } + + sort.Sort(byCommentPos(comments)) + + // Fixup comments can insert blank lines, because import specs are on different lines. + // We remove those blank lines here by merging import spec to the first import spec line. + firstSpecLine := tokFile.Line(specs[0].Pos()) + for _, s := range specs[1:] { + p := s.Pos() + line := tokFile.Line(p) + for previousLine := line - 1; previousLine >= firstSpecLine; { + // MergeLine can panic. Avoid the panic at the cost of not removing the blank line + // golang/go#50329 + if previousLine > 0 && previousLine < tokFile.LineCount() { + tokFile.MergeLine(previousLine) // has side effects! + previousLine-- + } else { + // try to gather some data to diagnose how this could happen + req := "Please report what the imports section of your go file looked like." + log.Printf("panic avoided: first:%d line:%d previous:%d max:%d. %s", + firstSpecLine, line, previousLine, tokFile.LineCount(), req) + } + } + } + return specs +} + +type byImportSpec struct { + localPrefix string + specs []ast.Spec // slice of *ast.ImportSpec +} + +func (x byImportSpec) Len() int { return len(x.specs) } +func (x byImportSpec) Swap(i, j int) { x.specs[i], x.specs[j] = x.specs[j], x.specs[i] } +func (x byImportSpec) Less(i, j int) bool { + ipath := importPath(x.specs[i]) + jpath := importPath(x.specs[j]) + + igroup := importGroup(x.localPrefix, ipath) + jgroup := importGroup(x.localPrefix, jpath) + if igroup != jgroup { + return igroup < jgroup + } + + if ipath != jpath { + return ipath < jpath + } + iname := importName(x.specs[i]) + jname := importName(x.specs[j]) + + if iname != jname { + return iname < jname + } + return importComment(x.specs[i]) < importComment(x.specs[j]) +} + +type byCommentPos []*ast.CommentGroup + +func (x byCommentPos) Len() int { return len(x) } +func (x byCommentPos) Swap(i, j int) { x[i], x[j] = x[j], x[i] } +func (x byCommentPos) Less(i, j int) bool { return x[i].Pos() < x[j].Pos() } diff --git a/vendor/golang.org/x/tools/internal/imports/source.go b/vendor/golang.org/x/tools/internal/imports/source.go new file mode 100644 index 00000000..cbe4f3c5 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/imports/source.go @@ -0,0 +1,63 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package imports + +import "context" + +// These types document the APIs below. +// +// TODO(rfindley): consider making these defined types rather than aliases. +type ( + ImportPath = string + PackageName = string + Symbol = string + + // References is set of References found in a Go file. The first map key is the + // left hand side of a selector expression, the second key is the right hand + // side, and the value should always be true. + References = map[PackageName]map[Symbol]bool +) + +// A Result satisfies a missing import. +// +// The Import field describes the missing import spec, and the Package field +// summarizes the package exports. +type Result struct { + Import *ImportInfo + Package *PackageInfo +} + +// An ImportInfo represents a single import statement. +type ImportInfo struct { + ImportPath string // import path, e.g. "crypto/rand". + Name string // import name, e.g. "crand", or "" if none. +} + +// A PackageInfo represents what's known about a package. +type PackageInfo struct { + Name string // package name in the package declaration, if known + Exports map[string]bool // set of names of known package level sortSymbols +} + +// A Source provides imports to satisfy unresolved references in the file being +// fixed. +type Source interface { + // LoadPackageNames queries PackageName information for the requested import + // paths, when operating from the provided srcDir. + // + // TODO(rfindley): try to refactor to remove this operation. + LoadPackageNames(ctx context.Context, srcDir string, paths []ImportPath) (map[ImportPath]PackageName, error) + + // ResolveReferences asks the Source for the best package name to satisfy + // each of the missing references, in the context of fixing the given + // filename. + // + // Returns a map from package name to a [Result] for that package name that + // provides the required symbols. Keys may be omitted in the map if no + // candidates satisfy all missing references for that package name. It is up + // to each data source to select the best result for each entry in the + // missing map. + ResolveReferences(ctx context.Context, filename string, missing References) ([]*Result, error) +} diff --git a/vendor/golang.org/x/tools/internal/imports/source_env.go b/vendor/golang.org/x/tools/internal/imports/source_env.go new file mode 100644 index 00000000..ec996c3c --- /dev/null +++ b/vendor/golang.org/x/tools/internal/imports/source_env.go @@ -0,0 +1,129 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package imports + +import ( + "context" + "path/filepath" + "strings" + "sync" + + "golang.org/x/sync/errgroup" + "golang.org/x/tools/internal/gopathwalk" +) + +// ProcessEnvSource implements the [Source] interface using the legacy +// [ProcessEnv] abstraction. +type ProcessEnvSource struct { + env *ProcessEnv + srcDir string + filename string + pkgName string +} + +// NewProcessEnvSource returns a [ProcessEnvSource] wrapping the given +// env, to be used for fixing imports in the file with name filename in package +// named pkgName. +func NewProcessEnvSource(env *ProcessEnv, filename, pkgName string) (*ProcessEnvSource, error) { + abs, err := filepath.Abs(filename) + if err != nil { + return nil, err + } + srcDir := filepath.Dir(abs) + return &ProcessEnvSource{ + env: env, + srcDir: srcDir, + filename: filename, + pkgName: pkgName, + }, nil +} + +func (s *ProcessEnvSource) LoadPackageNames(ctx context.Context, srcDir string, unknown []string) (map[string]string, error) { + r, err := s.env.GetResolver() + if err != nil { + return nil, err + } + return r.loadPackageNames(unknown, srcDir) +} + +func (s *ProcessEnvSource) ResolveReferences(ctx context.Context, filename string, refs map[string]map[string]bool) ([]*Result, error) { + var mu sync.Mutex + found := make(map[string][]pkgDistance) + callback := &scanCallback{ + rootFound: func(gopathwalk.Root) bool { + return true // We want everything. + }, + dirFound: func(pkg *pkg) bool { + return pkgIsCandidate(filename, refs, pkg) + }, + packageNameLoaded: func(pkg *pkg) bool { + if _, want := refs[pkg.packageName]; !want { + return false + } + if pkg.dir == s.srcDir && s.pkgName == pkg.packageName { + // The candidate is in the same directory and has the + // same package name. Don't try to import ourselves. + return false + } + if !CanUse(filename, pkg.dir) { + return false + } + mu.Lock() + defer mu.Unlock() + found[pkg.packageName] = append(found[pkg.packageName], pkgDistance{pkg, distance(s.srcDir, pkg.dir)}) + return false // We'll do our own loading after we sort. + }, + } + resolver, err := s.env.GetResolver() + if err != nil { + return nil, err + } + if err := resolver.scan(ctx, callback); err != nil { + return nil, err + } + + g, ctx := errgroup.WithContext(ctx) + + searcher := symbolSearcher{ + logf: s.env.logf, + srcDir: s.srcDir, + xtest: strings.HasSuffix(s.pkgName, "_test"), + loadExports: resolver.loadExports, + } + + var resultMu sync.Mutex + results := make(map[string]*Result, len(refs)) + for pkgName, symbols := range refs { + g.Go(func() error { + found, err := searcher.search(ctx, found[pkgName], pkgName, symbols) + if err != nil { + return err + } + if found == nil { + return nil // No matching package. + } + + imp := &ImportInfo{ + ImportPath: found.importPathShort, + } + pkg := &PackageInfo{ + Name: pkgName, + Exports: symbols, + } + resultMu.Lock() + results[pkgName] = &Result{Import: imp, Package: pkg} + resultMu.Unlock() + return nil + }) + } + if err := g.Wait(); err != nil { + return nil, err + } + var ans []*Result + for _, x := range results { + ans = append(ans, x) + } + return ans, nil +} diff --git a/vendor/golang.org/x/tools/internal/imports/source_modindex.go b/vendor/golang.org/x/tools/internal/imports/source_modindex.go new file mode 100644 index 00000000..ca745d4a --- /dev/null +++ b/vendor/golang.org/x/tools/internal/imports/source_modindex.go @@ -0,0 +1,100 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package imports + +import ( + "context" + "sync" + "time" + + "golang.org/x/tools/internal/modindex" +) + +// This code is here rather than in the modindex package +// to avoid import loops + +// TODO(adonovan): this code is only used by a test in this package. +// Can we delete it? Or is there a plan to call NewIndexSource from +// cmd/goimports? + +// implements Source using modindex, so only for module cache. +// +// this is perhaps over-engineered. A new Index is read at first use. +// And then Update is called after every 15 minutes, and a new Index +// is read if the index changed. It is not clear the Mutex is needed. +type IndexSource struct { + modcachedir string + mu sync.Mutex + index *modindex.Index // (access via getIndex) + expires time.Time +} + +// create a new Source. Called from NewView in cache/session.go. +func NewIndexSource(cachedir string) *IndexSource { + return &IndexSource{modcachedir: cachedir} +} + +func (s *IndexSource) LoadPackageNames(ctx context.Context, srcDir string, paths []ImportPath) (map[ImportPath]PackageName, error) { + /// This is used by goimports to resolve the package names of imports of the + // current package, which is irrelevant for the module cache. + return nil, nil +} + +func (s *IndexSource) ResolveReferences(ctx context.Context, filename string, missing References) ([]*Result, error) { + index, err := s.getIndex() + if err != nil { + return nil, err + } + var cs []modindex.Candidate + for pkg, nms := range missing { + for nm := range nms { + x := index.Lookup(pkg, nm, false) + cs = append(cs, x...) + } + } + found := make(map[string]*Result) + for _, c := range cs { + var x *Result + if x = found[c.ImportPath]; x == nil { + x = &Result{ + Import: &ImportInfo{ + ImportPath: c.ImportPath, + Name: "", + }, + Package: &PackageInfo{ + Name: c.PkgName, + Exports: make(map[string]bool), + }, + } + found[c.ImportPath] = x + } + x.Package.Exports[c.Name] = true + } + var ans []*Result + for _, x := range found { + ans = append(ans, x) + } + return ans, nil +} + +func (s *IndexSource) getIndex() (*modindex.Index, error) { + s.mu.Lock() + defer s.mu.Unlock() + + // (s.index = nil => s.expires is zero, + // so the first condition is strictly redundant. + // But it makes the postcondition very clear.) + if s.index == nil || time.Now().After(s.expires) { + index, err := modindex.Update(s.modcachedir) + if err != nil { + return nil, err + } + s.index = index + s.expires = index.ValidAt.Add(15 * time.Minute) // (refresh period) + } + // Inv: s.index != nil + + return s.index, nil +} diff --git a/vendor/golang.org/x/tools/internal/modindex/directories.go b/vendor/golang.org/x/tools/internal/modindex/directories.go new file mode 100644 index 00000000..9a963744 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/modindex/directories.go @@ -0,0 +1,131 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modindex + +import ( + "fmt" + "log" + "os" + "path/filepath" + "regexp" + "strings" + "sync" + "time" + + "golang.org/x/mod/semver" + "golang.org/x/tools/internal/gopathwalk" +) + +type directory struct { + path string // relative to GOMODCACHE + importPath string + version string // semantic version +} + +// bestDirByImportPath returns the best directory for each import +// path, where "best" means most recent semantic version. These import +// paths are inferred from the GOMODCACHE-relative dir names in dirs. +func bestDirByImportPath(dirs []string) (map[string]directory, error) { + dirsByPath := make(map[string]directory) + for _, dir := range dirs { + importPath, version, err := dirToImportPathVersion(dir) + if err != nil { + return nil, err + } + new := directory{ + path: dir, + importPath: importPath, + version: version, + } + if old, ok := dirsByPath[importPath]; !ok || compareDirectory(new, old) < 0 { + dirsByPath[importPath] = new + } + } + return dirsByPath, nil +} + +// compareDirectory defines an ordering of path@version directories, +// by descending version, then by ascending path. +func compareDirectory(x, y directory) int { + if sign := -semver.Compare(x.version, y.version); sign != 0 { + return sign // latest first + } + return strings.Compare(string(x.path), string(y.path)) +} + +// modCacheRegexp splits a relpathpath into module, module version, and package. +var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`) + +// dirToImportPathVersion computes import path and semantic version +// from a GOMODCACHE-relative directory name. +func dirToImportPathVersion(dir string) (string, string, error) { + m := modCacheRegexp.FindStringSubmatch(string(dir)) + // m[1] is the module path + // m[2] is the version major.minor.patch(-
 1 && flds[1][1] == 'D',
+			}
+			if px.Type == Func {
+				n, err := strconv.Atoi(flds[2])
+				if err != nil {
+					continue // should never happen
+				}
+				px.Results = int16(n)
+				if len(flds) >= 4 {
+					sig := strings.Split(flds[3], " ")
+					for i := range sig {
+						// $ cannot otherwise occur. removing the spaces
+						// almost works, but for chan struct{}, e.g.
+						sig[i] = strings.Replace(sig[i], "$", " ", -1)
+					}
+					px.Sig = toFields(sig)
+				}
+			}
+			ans = append(ans, px)
+		}
+	}
+	return ans
+}
+
+func toFields(sig []string) []Field {
+	ans := make([]Field, len(sig)/2)
+	for i := range ans {
+		ans[i] = Field{Arg: sig[2*i], Type: sig[2*i+1]}
+	}
+	return ans
+}
+
+// benchmarks show this is measurably better than strings.Split
+// split into first 4 fields separated by single space
+func fastSplit(x string) []string {
+	ans := make([]string, 0, 4)
+	nxt := 0
+	start := 0
+	for i := 0; i < len(x); i++ {
+		if x[i] != ' ' {
+			continue
+		}
+		ans = append(ans, x[start:i])
+		nxt++
+		start = i + 1
+		if nxt >= 3 {
+			break
+		}
+	}
+	ans = append(ans, x[start:])
+	return ans
+}
+
+func asLexType(c byte) LexType {
+	switch c {
+	case 'C':
+		return Const
+	case 'V':
+		return Var
+	case 'T':
+		return Type
+	case 'F':
+		return Func
+	}
+	return -1
+}
diff --git a/vendor/golang.org/x/tools/internal/modindex/modindex.go b/vendor/golang.org/x/tools/internal/modindex/modindex.go
new file mode 100644
index 00000000..5fa285d9
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/modindex/modindex.go
@@ -0,0 +1,119 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package modindex contains code for building and searching an
+// [Index] of the Go module cache.
+package modindex
+
+// The directory containing the index, returned by
+// [IndexDir], contains a file index-name- that contains the name
+// of the current index. We believe writing that short file is atomic.
+// [Read] reads that file to get the file name of the index.
+// WriteIndex writes an index with a unique name and then
+// writes that name into a new version of index-name-.
+// ( stands for the CurrentVersion of the index format.)
+
+import (
+	"maps"
+	"os"
+	"path/filepath"
+	"slices"
+	"strings"
+	"time"
+
+	"golang.org/x/mod/semver"
+)
+
+// Update updates the index for the specified Go
+// module cache directory, creating it as needed.
+// On success it returns the current index.
+func Update(gomodcache string) (*Index, error) {
+	prev, err := Read(gomodcache)
+	if err != nil {
+		if !os.IsNotExist(err) {
+			return nil, err
+		}
+		prev = nil
+	}
+	return update(gomodcache, prev)
+}
+
+// update builds, writes, and returns the current index.
+//
+// If old is nil, the new index is built from all of GOMODCACHE;
+// otherwise it is built from the old index plus cache updates
+// since the previous index's time.
+func update(gomodcache string, old *Index) (*Index, error) {
+	gomodcache, err := filepath.Abs(gomodcache)
+	if err != nil {
+		return nil, err
+	}
+	new, changed, err := build(gomodcache, old)
+	if err != nil {
+		return nil, err
+	}
+	if old == nil || changed {
+		if err := write(gomodcache, new); err != nil {
+			return nil, err
+		}
+	}
+	return new, nil
+}
+
+// build returns a new index for the specified Go module cache (an
+// absolute path).
+//
+// If an old index is provided, only directories more recent than it
+// that it are scanned; older directories are provided by the old
+// Index.
+//
+// The boolean result indicates whether new entries were found.
+func build(gomodcache string, old *Index) (*Index, bool, error) {
+	// Set the time window.
+	var start time.Time // = dawn of time
+	if old != nil {
+		start = old.ValidAt
+	}
+	now := time.Now()
+	end := now.Add(24 * time.Hour) // safely in the future
+
+	// Enumerate GOMODCACHE package directories.
+	// Choose the best (latest) package for each import path.
+	pkgDirs := findDirs(gomodcache, start, end)
+	dirByPath, err := bestDirByImportPath(pkgDirs)
+	if err != nil {
+		return nil, false, err
+	}
+
+	// For each import path it might occur only in
+	// dirByPath, only in old, or in both.
+	// If both, use the semantically later one.
+	var entries []Entry
+	if old != nil {
+		for _, entry := range old.Entries {
+			dir, ok := dirByPath[entry.ImportPath]
+			if !ok || semver.Compare(dir.version, entry.Version) <= 0 {
+				// New dir is missing or not more recent; use old entry.
+				entries = append(entries, entry)
+				delete(dirByPath, entry.ImportPath)
+			}
+		}
+	}
+
+	// Extract symbol information for all the new directories.
+	newEntries := extractSymbols(gomodcache, maps.Values(dirByPath))
+	entries = append(entries, newEntries...)
+	slices.SortFunc(entries, func(x, y Entry) int {
+		if n := strings.Compare(x.PkgName, y.PkgName); n != 0 {
+			return n
+		}
+		return strings.Compare(x.ImportPath, y.ImportPath)
+	})
+
+	return &Index{
+		GOMODCACHE: gomodcache,
+		ValidAt:    now, // time before the directories were scanned
+		Entries:    entries,
+	}, len(newEntries) > 0, nil
+}
diff --git a/vendor/golang.org/x/tools/internal/modindex/symbols.go b/vendor/golang.org/x/tools/internal/modindex/symbols.go
new file mode 100644
index 00000000..8e9702d8
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/modindex/symbols.go
@@ -0,0 +1,244 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modindex
+
+import (
+	"fmt"
+	"go/ast"
+	"go/parser"
+	"go/token"
+	"go/types"
+	"iter"
+	"os"
+	"path/filepath"
+	"runtime"
+	"slices"
+	"strings"
+	"sync"
+
+	"golang.org/x/sync/errgroup"
+)
+
+// The name of a symbol contains information about the symbol:
+//  T for types, TD if the type is deprecated
+//  C for consts, CD if the const is deprecated
+//  V for vars, VD if the var is deprecated
+// and for funcs:  F  ( )*
+// any spaces in  are replaced by $s so that the fields
+// of the name are space separated. F is replaced by FD if the func
+// is deprecated.
+type symbol struct {
+	pkg  string // name of the symbols's package
+	name string // declared name
+	kind string // T, C, V, or F, followed by D if deprecated
+	sig  string // signature information, for F
+}
+
+// extractSymbols returns a (new, unordered) array of Entries, one for
+// each provided package directory, describing its exported symbols.
+func extractSymbols(cwd string, dirs iter.Seq[directory]) []Entry {
+	var (
+		mu      sync.Mutex
+		entries []Entry
+	)
+
+	var g errgroup.Group
+	g.SetLimit(max(2, runtime.GOMAXPROCS(0)/2))
+	for dir := range dirs {
+		g.Go(func() error {
+			thedir := filepath.Join(cwd, string(dir.path))
+			mode := parser.SkipObjectResolution | parser.ParseComments
+
+			// Parse all Go files in dir and extract symbols.
+			dirents, err := os.ReadDir(thedir)
+			if err != nil {
+				return nil // log this someday?
+			}
+			var syms []symbol
+			for _, dirent := range dirents {
+				if !strings.HasSuffix(dirent.Name(), ".go") ||
+					strings.HasSuffix(dirent.Name(), "_test.go") {
+					continue
+				}
+				fname := filepath.Join(thedir, dirent.Name())
+				tr, err := parser.ParseFile(token.NewFileSet(), fname, nil, mode)
+				if err != nil {
+					continue // ignore errors, someday log them?
+				}
+				syms = append(syms, getFileExports(tr)...)
+			}
+
+			// Create an entry for the package.
+			pkg, names := processSyms(syms)
+			if pkg != "" {
+				mu.Lock()
+				defer mu.Unlock()
+				entries = append(entries, Entry{
+					PkgName:    pkg,
+					Dir:        dir.path,
+					ImportPath: dir.importPath,
+					Version:    dir.version,
+					Names:      names,
+				})
+			}
+
+			return nil
+		})
+	}
+	g.Wait() // ignore error
+
+	return entries
+}
+
+func getFileExports(f *ast.File) []symbol {
+	pkg := f.Name.Name
+	if pkg == "main" || pkg == "" {
+		return nil
+	}
+	var ans []symbol
+	// should we look for //go:build ignore?
+	for _, decl := range f.Decls {
+		switch decl := decl.(type) {
+		case *ast.FuncDecl:
+			if decl.Recv != nil {
+				// ignore methods, as we are completing package selections
+				continue
+			}
+			name := decl.Name.Name
+			dtype := decl.Type
+			// not looking at dtype.TypeParams. That is, treating
+			// generic functions just like non-generic ones.
+			sig := dtype.Params
+			kind := "F"
+			if isDeprecated(decl.Doc) {
+				kind += "D"
+			}
+			result := []string{fmt.Sprintf("%d", dtype.Results.NumFields())}
+			for _, x := range sig.List {
+				// This code creates a string representing the type.
+				// TODO(pjw): it may be fragile:
+				// 1. x.Type could be nil, perhaps in ill-formed code
+				// 2. ExprString might someday change incompatibly to
+				//    include struct tags, which can be arbitrary strings
+				if x.Type == nil {
+					// Can this happen without a parse error? (Files with parse
+					// errors are ignored in getSymbols)
+					continue // maybe report this someday
+				}
+				tp := types.ExprString(x.Type)
+				if len(tp) == 0 {
+					// Can this happen?
+					continue // maybe report this someday
+				}
+				// This is only safe if ExprString never returns anything with a $
+				// The only place a $ can occur seems to be in a struct tag, which
+				// can be an arbitrary string literal, and ExprString does not presently
+				// print struct tags. So for this to happen the type of a formal parameter
+				// has to be a explicit struct, e.g. foo(x struct{a int "$"}) and ExprString
+				// would have to show the struct tag. Even testing for this case seems
+				// a waste of effort, but let's remember the possibility
+				if strings.Contains(tp, "$") {
+					continue
+				}
+				tp = strings.Replace(tp, " ", "$", -1)
+				if len(x.Names) == 0 {
+					result = append(result, "_")
+					result = append(result, tp)
+				} else {
+					for _, y := range x.Names {
+						result = append(result, y.Name)
+						result = append(result, tp)
+					}
+				}
+			}
+			sigs := strings.Join(result, " ")
+			if s := newsym(pkg, name, kind, sigs); s != nil {
+				ans = append(ans, *s)
+			}
+		case *ast.GenDecl:
+			depr := isDeprecated(decl.Doc)
+			switch decl.Tok {
+			case token.CONST, token.VAR:
+				tp := "V"
+				if decl.Tok == token.CONST {
+					tp = "C"
+				}
+				if depr {
+					tp += "D"
+				}
+				for _, sp := range decl.Specs {
+					for _, x := range sp.(*ast.ValueSpec).Names {
+						if s := newsym(pkg, x.Name, tp, ""); s != nil {
+							ans = append(ans, *s)
+						}
+					}
+				}
+			case token.TYPE:
+				tp := "T"
+				if depr {
+					tp += "D"
+				}
+				for _, sp := range decl.Specs {
+					if s := newsym(pkg, sp.(*ast.TypeSpec).Name.Name, tp, ""); s != nil {
+						ans = append(ans, *s)
+					}
+				}
+			}
+		}
+	}
+	return ans
+}
+
+func newsym(pkg, name, kind, sig string) *symbol {
+	if len(name) == 0 || !ast.IsExported(name) {
+		return nil
+	}
+	sym := symbol{pkg: pkg, name: name, kind: kind, sig: sig}
+	return &sym
+}
+
+func isDeprecated(doc *ast.CommentGroup) bool {
+	if doc == nil {
+		return false
+	}
+	// go.dev/wiki/Deprecated Paragraph starting 'Deprecated:'
+	// This code fails for /* Deprecated: */, but it's the code from
+	// gopls/internal/analysis/deprecated
+	for line := range strings.SplitSeq(doc.Text(), "\n\n") {
+		if strings.HasPrefix(line, "Deprecated:") {
+			return true
+		}
+	}
+	return false
+}
+
+// return the package name and the value for the symbols.
+// if there are multiple packages, choose one arbitrarily
+// the returned slice is sorted lexicographically
+func processSyms(syms []symbol) (string, []string) {
+	if len(syms) == 0 {
+		return "", nil
+	}
+	slices.SortFunc(syms, func(l, r symbol) int {
+		return strings.Compare(l.name, r.name)
+	})
+	pkg := syms[0].pkg
+	var names []string
+	for _, s := range syms {
+		if s.pkg != pkg {
+			// Symbols came from two files in same dir
+			// with different package declarations.
+			continue
+		}
+		var nx string
+		if s.sig != "" {
+			nx = fmt.Sprintf("%s %s %s", s.name, s.kind, s.sig)
+		} else {
+			nx = fmt.Sprintf("%s %s", s.name, s.kind)
+		}
+		names = append(names, nx)
+	}
+	return pkg, names
+}
diff --git a/vendor/golang.org/x/tools/internal/stdlib/deps.go b/vendor/golang.org/x/tools/internal/stdlib/deps.go
new file mode 100644
index 00000000..581784da
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/stdlib/deps.go
@@ -0,0 +1,519 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate.go. DO NOT EDIT.
+
+package stdlib
+
+type pkginfo struct {
+	name string
+	deps string // list of indices of dependencies, as varint-encoded deltas
+}
+
+var deps = [...]pkginfo{
+	{"archive/tar", "\x03n\x03E<\x01\n\x01$\x01\x01\x02\x05\b\x02\x01\x02\x02\f"},
+	{"archive/zip", "\x02\x04d\a\x03\x12\x021<\x01+\x05\x01\x0f\x03\x02\x0e\x04"},
+	{"bufio", "\x03n\x84\x01D\x14"},
+	{"bytes", "q*Z\x03\fG\x02\x02"},
+	{"cmp", ""},
+	{"compress/bzip2", "\x02\x02\xf1\x01A"},
+	{"compress/flate", "\x02o\x03\x81\x01\f\x033\x01\x03"},
+	{"compress/gzip", "\x02\x04d\a\x03\x14mT"},
+	{"compress/lzw", "\x02o\x03\x81\x01"},
+	{"compress/zlib", "\x02\x04d\a\x03\x12\x01n"},
+	{"container/heap", "\xb7\x02"},
+	{"container/list", ""},
+	{"container/ring", ""},
+	{"context", "q[o\x01\r"},
+	{"crypto", "\x86\x01oC"},
+	{"crypto/aes", "\x10\n\t\x95\x02"},
+	{"crypto/cipher", "\x03 \x01\x01\x1f\x11\x1c+Y"},
+	{"crypto/des", "\x10\x15\x1f-+\x9c\x01\x03"},
+	{"crypto/dsa", "D\x04)\x84\x01\r"},
+	{"crypto/ecdh", "\x03\v\f\x10\x04\x16\x04\r\x1c\x84\x01"},
+	{"crypto/ecdsa", "\x0e\x05\x03\x04\x01\x10\a\v\x06\x01\x04\f\x01\x1c\x84\x01\r\x05K\x01"},
+	{"crypto/ed25519", "\x0e\x1e\x11\a\n\a\x1c\x84\x01C"},
+	{"crypto/elliptic", "2?\x84\x01\r9"},
+	{"crypto/fips140", "\"\x05"},
+	{"crypto/hkdf", "/\x14\x01-\x15"},
+	{"crypto/hmac", "\x1a\x16\x13\x01\x111"},
+	{"crypto/internal/boring", "\x0e\x02\ri"},
+	{"crypto/internal/boring/bbig", "\x1a\xe8\x01M"},
+	{"crypto/internal/boring/bcache", "\xbc\x02\x13"},
+	{"crypto/internal/boring/sig", ""},
+	{"crypto/internal/constanttime", ""},
+	{"crypto/internal/cryptotest", "\x03\r\n\b%\x0e\x19\x06\x12\x12 \x04\x06\t\x18\x01\x11\x11\x1b\x01\a\x05\b\x03\x05\v"},
+	{"crypto/internal/entropy", "I"},
+	{"crypto/internal/entropy/v1.0.0", "B/\x93\x018\x13"},
+	{"crypto/internal/fips140", "A0\xbd\x01\v\x16"},
+	{"crypto/internal/fips140/aes", "\x03\x1f\x03\x02\x13\x05\x01\x01\x06*\x93\x014"},
+	{"crypto/internal/fips140/aes/gcm", "\"\x01\x02\x02\x02\x11\x05\x01\a*\x90\x01"},
+	{"crypto/internal/fips140/alias", "\xcf\x02"},
+	{"crypto/internal/fips140/bigmod", "'\x18\x01\a*\x93\x01"},
+	{"crypto/internal/fips140/check", "\"\x0e\x06\t\x02\xb4\x01Z"},
+	{"crypto/internal/fips140/check/checktest", "'\x87\x02!"},
+	{"crypto/internal/fips140/drbg", "\x03\x1e\x01\x01\x04\x13\x05\t\x01(\x84\x01\x0f7\x01"},
+	{"crypto/internal/fips140/ecdh", "\x03\x1f\x05\x02\t\r2\x84\x01\x0f7"},
+	{"crypto/internal/fips140/ecdsa", "\x03\x1f\x04\x01\x02\a\x02\x069\x15oF"},
+	{"crypto/internal/fips140/ed25519", "\x03\x1f\x05\x02\x04\v9\xc7\x01\x03"},
+	{"crypto/internal/fips140/edwards25519", "\x1e\t\a\x112\x93\x017"},
+	{"crypto/internal/fips140/edwards25519/field", "'\x13\x052\x93\x01"},
+	{"crypto/internal/fips140/hkdf", "\x03\x1f\x05\t\x06;\x15"},
+	{"crypto/internal/fips140/hmac", "\x03\x1f\x14\x01\x019\x15"},
+	{"crypto/internal/fips140/mlkem", "\x03\x1f\x05\x02\x0e\x03\x052\xca\x01"},
+	{"crypto/internal/fips140/nistec", "\x1e\t\f\f2\x93\x01*\r\x14"},
+	{"crypto/internal/fips140/nistec/fiat", "'\x137\x93\x01"},
+	{"crypto/internal/fips140/pbkdf2", "\x03\x1f\x05\t\x06;\x15"},
+	{"crypto/internal/fips140/rsa", "\x03\x1b\x04\x04\x01\x02\r\x01\x01\x027\x15oF"},
+	{"crypto/internal/fips140/sha256", "\x03\x1f\x1d\x01\a*\x15~"},
+	{"crypto/internal/fips140/sha3", "\x03\x1f\x18\x05\x011\x93\x01K"},
+	{"crypto/internal/fips140/sha512", "\x03\x1f\x1d\x01\a*\x15~"},
+	{"crypto/internal/fips140/ssh", "'_"},
+	{"crypto/internal/fips140/subtle", "\x1e\a\x1a\xc5\x01"},
+	{"crypto/internal/fips140/tls12", "\x03\x1f\x05\t\x06\x029\x15"},
+	{"crypto/internal/fips140/tls13", "\x03\x1f\x05\b\a\t2\x15"},
+	{"crypto/internal/fips140cache", "\xae\x02\r&"},
+	{"crypto/internal/fips140deps", ""},
+	{"crypto/internal/fips140deps/byteorder", "\x9c\x01"},
+	{"crypto/internal/fips140deps/cpu", "\xb1\x01\a"},
+	{"crypto/internal/fips140deps/godebug", "\xb9\x01"},
+	{"crypto/internal/fips140deps/time", "\xc9\x02"},
+	{"crypto/internal/fips140hash", "7\x1c3\xc9\x01"},
+	{"crypto/internal/fips140only", ")\r\x01\x01N3<"},
+	{"crypto/internal/fips140test", ""},
+	{"crypto/internal/hpke", "\x0e\x01\x01\x03\x056#+hM"},
+	{"crypto/internal/impl", "\xb9\x02"},
+	{"crypto/internal/randutil", "\xf5\x01\x12"},
+	{"crypto/internal/sysrand", "qo! \r\r\x01\x01\f\x06"},
+	{"crypto/internal/sysrand/internal/seccomp", "q"},
+	{"crypto/md5", "\x0e6-\x15\x16h"},
+	{"crypto/mlkem", "1"},
+	{"crypto/pbkdf2", "4\x0f\x01-\x15"},
+	{"crypto/rand", "\x1a\b\a\x1b\x04\x01(\x84\x01\rM"},
+	{"crypto/rc4", "%\x1f-\xc7\x01"},
+	{"crypto/rsa", "\x0e\f\x01\v\x0f\x0e\x01\x04\x06\a\x1c\x03\x123<\f\x01"},
+	{"crypto/sha1", "\x0e\f*\x03*\x15\x16\x15S"},
+	{"crypto/sha256", "\x0e\f\x1cP"},
+	{"crypto/sha3", "\x0e)O\xc9\x01"},
+	{"crypto/sha512", "\x0e\f\x1eN"},
+	{"crypto/subtle", "\x1e\x1c\x9c\x01X"},
+	{"crypto/tls", "\x03\b\x02\x01\x01\x01\x01\x02\x01\x01\x01\x02\x01\x01\t\x01\r\n\x01\n\x05\x03\x01\x01\x01\x01\x02\x01\x02\x01\x17\x02\x03\x12\x16\x15\b<\x16\x16\r\b\x01\x01\x01\x02\x01\r\x06\x02\x01\x0f"},
+	{"crypto/tls/internal/fips140tls", "\x17\xa5\x02"},
+	{"crypto/x509", "\x03\v\x01\x01\x01\x01\x01\x01\x01\x015\x05\x01\x01\x02\x05\x0e\x06\x02\x02\x03E\x039\x01\x02\b\x01\x01\x02\a\x10\x05\x01\x06\x02\x05\b\x02\x01\x02\x0e\x02\x01\x01\x02\x03\x01"},
+	{"crypto/x509/pkix", "g\x06\a\x8e\x01G"},
+	{"database/sql", "\x03\nN\x16\x03\x81\x01\v\a\"\x05\b\x02\x03\x01\r\x02\x02\x02"},
+	{"database/sql/driver", "\rd\x03\xb5\x01\x0f\x11"},
+	{"debug/buildinfo", "\x03[\x02\x01\x01\b\a\x03e\x1a\x02\x01+\x0f\x1f"},
+	{"debug/dwarf", "\x03g\a\x03\x81\x011\x11\x01\x01"},
+	{"debug/elf", "\x03\x06T\r\a\x03e\x1b\x01\f \x17\x01\x16"},
+	{"debug/gosym", "\x03g\n\xc3\x01\x01\x01\x02"},
+	{"debug/macho", "\x03\x06T\r\ne\x1c,\x17\x01"},
+	{"debug/pe", "\x03\x06T\r\a\x03e\x1c,\x17\x01\x16"},
+	{"debug/plan9obj", "j\a\x03e\x1c,"},
+	{"embed", "q*A\x19\x01S"},
+	{"embed/internal/embedtest", ""},
+	{"encoding", ""},
+	{"encoding/ascii85", "\xf5\x01C"},
+	{"encoding/asn1", "\x03n\x03e(\x01'\r\x02\x01\x10\x03\x01"},
+	{"encoding/base32", "\xf5\x01A\x02"},
+	{"encoding/base64", "\x9c\x01YA\x02"},
+	{"encoding/binary", "q\x84\x01\f(\r\x05"},
+	{"encoding/csv", "\x02\x01n\x03\x81\x01D\x12\x02"},
+	{"encoding/gob", "\x02c\x05\a\x03e\x1c\v\x01\x03\x1d\b\x12\x01\x0f\x02"},
+	{"encoding/hex", "q\x03\x81\x01A\x03"},
+	{"encoding/json", "\x03\x01a\x04\b\x03\x81\x01\f(\r\x02\x01\x02\x10\x01\x01\x02"},
+	{"encoding/pem", "\x03f\b\x84\x01A\x03"},
+	{"encoding/xml", "\x02\x01b\f\x03\x81\x014\x05\n\x01\x02\x10\x02"},
+	{"errors", "\xcc\x01\x83\x01"},
+	{"expvar", "nK@\b\v\x15\r\b\x02\x03\x01\x11"},
+	{"flag", "e\f\x03\x81\x01,\b\x05\b\x02\x01\x10"},
+	{"fmt", "qE&\x19\f \b\r\x02\x03\x12"},
+	{"go/ast", "\x03\x01p\x0e\x01r\x03)\b\r\x02\x01\x12\x02"},
+	{"go/build", "\x02\x01n\x03\x01\x02\x02\a\x02\x01\x17\x1f\x04\x02\b\x1b\x13\x01+\x01\x04\x01\a\b\x02\x01\x12\x02\x02"},
+	{"go/build/constraint", "q\xc7\x01\x01\x12\x02"},
+	{"go/constant", "t\x0f~\x01\x024\x01\x02\x12"},
+	{"go/doc", "\x04p\x01\x05\t=51\x10\x02\x01\x12\x02"},
+	{"go/doc/comment", "\x03q\xc2\x01\x01\x01\x01\x12\x02"},
+	{"go/format", "\x03q\x01\v\x01\x02rD"},
+	{"go/importer", "v\a\x01\x01\x04\x01q9"},
+	{"go/internal/gccgoimporter", "\x02\x01[\x13\x03\x04\v\x01o\x02,\x01\x05\x11\x01\f\b"},
+	{"go/internal/gcimporter", "\x02r\x0f\x010\x05\r/,\x15\x03\x02"},
+	{"go/internal/srcimporter", "t\x01\x01\n\x03\x01q,\x01\x05\x12\x02\x14"},
+	{"go/parser", "\x03n\x03\x01\x02\v\x01r\x01+\x06\x12"},
+	{"go/printer", "t\x01\x02\x03\tr\f \x15\x02\x01\x02\v\x05\x02"},
+	{"go/scanner", "\x03q\x0fr2\x10\x01\x13\x02"},
+	{"go/token", "\x04p\x84\x01>\x02\x03\x01\x0f\x02"},
+	{"go/types", "\x03\x01\x06g\x03\x01\x03\b\x03\x024\x062\x04\x03\t \x06\a\b\x01\x01\x01\x02\x01\x0f\x02\x02"},
+	{"go/version", "\xbe\x01{"},
+	{"hash", "\xf5\x01"},
+	{"hash/adler32", "q\x15\x16"},
+	{"hash/crc32", "q\x15\x16\x15\x8a\x01\x01\x13"},
+	{"hash/crc64", "q\x15\x16\x9f\x01"},
+	{"hash/fnv", "q\x15\x16h"},
+	{"hash/maphash", "\x86\x01\x11<|"},
+	{"html", "\xb9\x02\x02\x12"},
+	{"html/template", "\x03k\x06\x18-<\x01\n!\x05\x01\x02\x03\f\x01\x02\f\x01\x03\x02"},
+	{"image", "\x02o\x1ef\x0f4\x03\x01"},
+	{"image/color", ""},
+	{"image/color/palette", "\x8f\x01"},
+	{"image/draw", "\x8e\x01\x01\x04"},
+	{"image/gif", "\x02\x01\x05i\x03\x1a\x01\x01\x01\vY"},
+	{"image/internal/imageutil", "\x8e\x01"},
+	{"image/jpeg", "\x02o\x1d\x01\x04b"},
+	{"image/png", "\x02\aa\n\x12\x02\x06\x01fC"},
+	{"index/suffixarray", "\x03g\a\x84\x01\f+\n\x01"},
+	{"internal/abi", "\xb8\x01\x97\x01"},
+	{"internal/asan", "\xcf\x02"},
+	{"internal/bisect", "\xae\x02\r\x01"},
+	{"internal/buildcfg", "tGf\x06\x02\x05\n\x01"},
+	{"internal/bytealg", "\xb1\x01\x9e\x01"},
+	{"internal/byteorder", ""},
+	{"internal/cfg", ""},
+	{"internal/cgrouptest", "tZS\x06\x0f\x02\x01\x04\x01"},
+	{"internal/chacha8rand", "\x9c\x01\x15\a\x97\x01"},
+	{"internal/copyright", ""},
+	{"internal/coverage", ""},
+	{"internal/coverage/calloc", ""},
+	{"internal/coverage/cfile", "n\x06\x16\x17\x01\x02\x01\x01\x01\x01\x01\x01\x01\"\x02&,\x06\a\n\x01\x03\r\x06"},
+	{"internal/coverage/cformat", "\x04p-\x04P\v6\x01\x02\r"},
+	{"internal/coverage/cmerge", "t-`"},
+	{"internal/coverage/decodecounter", "j\n-\v\x02G,\x17\x17"},
+	{"internal/coverage/decodemeta", "\x02h\n\x16\x17\v\x02G,"},
+	{"internal/coverage/encodecounter", "\x02h\n-\f\x01\x02E\v!\x15"},
+	{"internal/coverage/encodemeta", "\x02\x01g\n\x12\x04\x17\r\x02E,."},
+	{"internal/coverage/pods", "\x04p-\x80\x01\x06\x05\n\x02\x01"},
+	{"internal/coverage/rtcov", "\xcf\x02"},
+	{"internal/coverage/slicereader", "j\n\x81\x01Z"},
+	{"internal/coverage/slicewriter", "t\x81\x01"},
+	{"internal/coverage/stringtab", "t8\x04E"},
+	{"internal/coverage/test", ""},
+	{"internal/coverage/uleb128", ""},
+	{"internal/cpu", "\xcf\x02"},
+	{"internal/dag", "\x04p\xc2\x01\x03"},
+	{"internal/diff", "\x03q\xc3\x01\x02"},
+	{"internal/exportdata", "\x02\x01n\x03\x02c\x1c,\x01\x05\x11\x01\x02"},
+	{"internal/filepathlite", "q*A\x1a@"},
+	{"internal/fmtsort", "\x04\xa5\x02\r"},
+	{"internal/fuzz", "\x03\nE\x18\x04\x03\x03\x01\v\x036<\f\x03\x1d\x01\x05\x02\x05\n\x01\x02\x01\x01\f\x04\x02"},
+	{"internal/goarch", ""},
+	{"internal/godebug", "\x99\x01!\x81\x01\x01\x13"},
+	{"internal/godebugs", ""},
+	{"internal/goexperiment", ""},
+	{"internal/goos", ""},
+	{"internal/goroot", "\xa1\x02\x01\x05\x12\x02"},
+	{"internal/gover", "\x04"},
+	{"internal/goversion", ""},
+	{"internal/lazyregexp", "\xa1\x02\v\r\x02"},
+	{"internal/lazytemplate", "\xf5\x01,\x18\x02\f"},
+	{"internal/msan", "\xcf\x02"},
+	{"internal/nettrace", ""},
+	{"internal/obscuretestdata", "i\x8c\x01,"},
+	{"internal/oserror", "q"},
+	{"internal/pkgbits", "\x03O\x18\a\x03\x04\vr\r\x1f\r\n\x01"},
+	{"internal/platform", ""},
+	{"internal/poll", "qj\x05\x159\r\x01\x01\f\x06"},
+	{"internal/profile", "\x03\x04j\x03\x81\x017\n\x01\x01\x01\x10"},
+	{"internal/profilerecord", ""},
+	{"internal/race", "\x97\x01\xb8\x01"},
+	{"internal/reflectlite", "\x97\x01!:\x16"},
+	{"vendor/golang.org/x/text/unicode/norm", "j\n\x81\x01F\x12\x11"},
+	{"weak", "\x97\x01\x97\x01!"},
+}
+
+// bootstrap is the list of bootstrap packages extracted from cmd/dist.
+var bootstrap = map[string]bool{
+	"cmp":                                     true,
+	"cmd/asm":                                 true,
+	"cmd/asm/internal/arch":                   true,
+	"cmd/asm/internal/asm":                    true,
+	"cmd/asm/internal/flags":                  true,
+	"cmd/asm/internal/lex":                    true,
+	"cmd/cgo":                                 true,
+	"cmd/compile":                             true,
+	"cmd/compile/internal/abi":                true,
+	"cmd/compile/internal/abt":                true,
+	"cmd/compile/internal/amd64":              true,
+	"cmd/compile/internal/arm":                true,
+	"cmd/compile/internal/arm64":              true,
+	"cmd/compile/internal/base":               true,
+	"cmd/compile/internal/bitvec":             true,
+	"cmd/compile/internal/compare":            true,
+	"cmd/compile/internal/coverage":           true,
+	"cmd/compile/internal/deadlocals":         true,
+	"cmd/compile/internal/devirtualize":       true,
+	"cmd/compile/internal/dwarfgen":           true,
+	"cmd/compile/internal/escape":             true,
+	"cmd/compile/internal/gc":                 true,
+	"cmd/compile/internal/importer":           true,
+	"cmd/compile/internal/inline":             true,
+	"cmd/compile/internal/inline/inlheur":     true,
+	"cmd/compile/internal/inline/interleaved": true,
+	"cmd/compile/internal/ir":                 true,
+	"cmd/compile/internal/liveness":           true,
+	"cmd/compile/internal/logopt":             true,
+	"cmd/compile/internal/loong64":            true,
+	"cmd/compile/internal/loopvar":            true,
+	"cmd/compile/internal/mips":               true,
+	"cmd/compile/internal/mips64":             true,
+	"cmd/compile/internal/noder":              true,
+	"cmd/compile/internal/objw":               true,
+	"cmd/compile/internal/pgoir":              true,
+	"cmd/compile/internal/pkginit":            true,
+	"cmd/compile/internal/ppc64":              true,
+	"cmd/compile/internal/rangefunc":          true,
+	"cmd/compile/internal/reflectdata":        true,
+	"cmd/compile/internal/riscv64":            true,
+	"cmd/compile/internal/rttype":             true,
+	"cmd/compile/internal/s390x":              true,
+	"cmd/compile/internal/ssa":                true,
+	"cmd/compile/internal/ssagen":             true,
+	"cmd/compile/internal/staticdata":         true,
+	"cmd/compile/internal/staticinit":         true,
+	"cmd/compile/internal/syntax":             true,
+	"cmd/compile/internal/test":               true,
+	"cmd/compile/internal/typebits":           true,
+	"cmd/compile/internal/typecheck":          true,
+	"cmd/compile/internal/types":              true,
+	"cmd/compile/internal/types2":             true,
+	"cmd/compile/internal/walk":               true,
+	"cmd/compile/internal/wasm":               true,
+	"cmd/compile/internal/x86":                true,
+	"cmd/internal/archive":                    true,
+	"cmd/internal/bio":                        true,
+	"cmd/internal/codesign":                   true,
+	"cmd/internal/dwarf":                      true,
+	"cmd/internal/edit":                       true,
+	"cmd/internal/gcprog":                     true,
+	"cmd/internal/goobj":                      true,
+	"cmd/internal/hash":                       true,
+	"cmd/internal/macho":                      true,
+	"cmd/internal/obj":                        true,
+	"cmd/internal/obj/arm":                    true,
+	"cmd/internal/obj/arm64":                  true,
+	"cmd/internal/obj/loong64":                true,
+	"cmd/internal/obj/mips":                   true,
+	"cmd/internal/obj/ppc64":                  true,
+	"cmd/internal/obj/riscv":                  true,
+	"cmd/internal/obj/s390x":                  true,
+	"cmd/internal/obj/wasm":                   true,
+	"cmd/internal/obj/x86":                    true,
+	"cmd/internal/objabi":                     true,
+	"cmd/internal/par":                        true,
+	"cmd/internal/pgo":                        true,
+	"cmd/internal/pkgpath":                    true,
+	"cmd/internal/quoted":                     true,
+	"cmd/internal/src":                        true,
+	"cmd/internal/sys":                        true,
+	"cmd/internal/telemetry":                  true,
+	"cmd/internal/telemetry/counter":          true,
+	"cmd/link":                                true,
+	"cmd/link/internal/amd64":                 true,
+	"cmd/link/internal/arm":                   true,
+	"cmd/link/internal/arm64":                 true,
+	"cmd/link/internal/benchmark":             true,
+	"cmd/link/internal/dwtest":                true,
+	"cmd/link/internal/ld":                    true,
+	"cmd/link/internal/loadelf":               true,
+	"cmd/link/internal/loader":                true,
+	"cmd/link/internal/loadmacho":             true,
+	"cmd/link/internal/loadpe":                true,
+	"cmd/link/internal/loadxcoff":             true,
+	"cmd/link/internal/loong64":               true,
+	"cmd/link/internal/mips":                  true,
+	"cmd/link/internal/mips64":                true,
+	"cmd/link/internal/ppc64":                 true,
+	"cmd/link/internal/riscv64":               true,
+	"cmd/link/internal/s390x":                 true,
+	"cmd/link/internal/sym":                   true,
+	"cmd/link/internal/wasm":                  true,
+	"cmd/link/internal/x86":                   true,
+	"compress/flate":                          true,
+	"compress/zlib":                           true,
+	"container/heap":                          true,
+	"debug/dwarf":                             true,
+	"debug/elf":                               true,
+	"debug/macho":                             true,
+	"debug/pe":                                true,
+	"go/build/constraint":                     true,
+	"go/constant":                             true,
+	"go/version":                              true,
+	"internal/abi":                            true,
+	"internal/coverage":                       true,
+	"cmd/internal/cov/covcmd":                 true,
+	"internal/bisect":                         true,
+	"internal/buildcfg":                       true,
+	"internal/exportdata":                     true,
+	"internal/goarch":                         true,
+	"internal/godebugs":                       true,
+	"internal/goexperiment":                   true,
+	"internal/goroot":                         true,
+	"internal/gover":                          true,
+	"internal/goversion":                      true,
+	"internal/lazyregexp":                     true,
+	"internal/pkgbits":                        true,
+	"internal/platform":                       true,
+	"internal/profile":                        true,
+	"internal/race":                           true,
+	"internal/runtime/gc":                     true,
+	"internal/saferio":                        true,
+	"internal/syscall/unix":                   true,
+	"internal/types/errors":                   true,
+	"internal/unsafeheader":                   true,
+	"internal/xcoff":                          true,
+	"internal/zstd":                           true,
+	"math/bits":                               true,
+	"sort":                                    true,
+}
+
+// BootstrapVersion is the minor version of Go used during toolchain
+// bootstrapping. Packages for which [IsBootstrapPackage] must not use
+// features of Go newer than this version.
+const BootstrapVersion = Version(24) // go1.24.6
diff --git a/vendor/golang.org/x/tools/internal/stdlib/import.go b/vendor/golang.org/x/tools/internal/stdlib/import.go
new file mode 100644
index 00000000..8ecc672b
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/stdlib/import.go
@@ -0,0 +1,97 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package stdlib
+
+// This file provides the API for the import graph of the standard library.
+//
+// Be aware that the compiler-generated code for every package
+// implicitly depends on package "runtime" and a handful of others
+// (see runtimePkgs in GOROOT/src/cmd/internal/objabi/pkgspecial.go).
+
+import (
+	"encoding/binary"
+	"iter"
+	"slices"
+	"strings"
+)
+
+// Imports returns the sequence of packages directly imported by the
+// named standard packages, in name order.
+// The imports of an unknown package are the empty set.
+//
+// The graph is built into the application and may differ from the
+// graph in the Go source tree being analyzed by the application.
+func Imports(pkgs ...string) iter.Seq[string] {
+	return func(yield func(string) bool) {
+		for _, pkg := range pkgs {
+			if i, ok := find(pkg); ok {
+				var depIndex uint64
+				for data := []byte(deps[i].deps); len(data) > 0; {
+					delta, n := binary.Uvarint(data)
+					depIndex += delta
+					if !yield(deps[depIndex].name) {
+						return
+					}
+					data = data[n:]
+				}
+			}
+		}
+	}
+}
+
+// Dependencies returns the set of all dependencies of the named
+// standard packages, including the initial package,
+// in a deterministic topological order.
+// The dependencies of an unknown package are the empty set.
+//
+// The graph is built into the application and may differ from the
+// graph in the Go source tree being analyzed by the application.
+func Dependencies(pkgs ...string) iter.Seq[string] {
+	return func(yield func(string) bool) {
+		for _, pkg := range pkgs {
+			if i, ok := find(pkg); ok {
+				var seen [1 + len(deps)/8]byte // bit set of seen packages
+				var visit func(i int) bool
+				visit = func(i int) bool {
+					bit := byte(1) << (i % 8)
+					if seen[i/8]&bit == 0 {
+						seen[i/8] |= bit
+						var depIndex uint64
+						for data := []byte(deps[i].deps); len(data) > 0; {
+							delta, n := binary.Uvarint(data)
+							depIndex += delta
+							if !visit(int(depIndex)) {
+								return false
+							}
+							data = data[n:]
+						}
+						if !yield(deps[i].name) {
+							return false
+						}
+					}
+					return true
+				}
+				if !visit(i) {
+					return
+				}
+			}
+		}
+	}
+}
+
+// find returns the index of pkg in the deps table.
+func find(pkg string) (int, bool) {
+	return slices.BinarySearchFunc(deps[:], pkg, func(p pkginfo, n string) int {
+		return strings.Compare(p.name, n)
+	})
+}
+
+// IsBootstrapPackage reports whether pkg is one of the low-level
+// packages in the Go distribution that must compile with the older
+// language version specified by [BootstrapVersion] during toolchain
+// bootstrapping; see golang.org/s/go15bootstrap.
+func IsBootstrapPackage(pkg string) bool {
+	return bootstrap[pkg]
+}
diff --git a/vendor/golang.org/x/tools/internal/stdlib/manifest.go b/vendor/golang.org/x/tools/internal/stdlib/manifest.go
new file mode 100644
index 00000000..362f23c4
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/stdlib/manifest.go
@@ -0,0 +1,17759 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate.go. DO NOT EDIT.
+
+package stdlib
+
+var PackageSymbols = map[string][]Symbol{
+	"archive/tar": {
+		{"(*Header).FileInfo", Method, 1, ""},
+		{"(*Reader).Next", Method, 0, ""},
+		{"(*Reader).Read", Method, 0, ""},
+		{"(*Writer).AddFS", Method, 22, ""},
+		{"(*Writer).Close", Method, 0, ""},
+		{"(*Writer).Flush", Method, 0, ""},
+		{"(*Writer).Write", Method, 0, ""},
+		{"(*Writer).WriteHeader", Method, 0, ""},
+		{"(Format).String", Method, 10, ""},
+		{"ErrFieldTooLong", Var, 0, ""},
+		{"ErrHeader", Var, 0, ""},
+		{"ErrInsecurePath", Var, 20, ""},
+		{"ErrWriteAfterClose", Var, 0, ""},
+		{"ErrWriteTooLong", Var, 0, ""},
+		{"FileInfoHeader", Func, 1, "func(fi fs.FileInfo, link string) (*Header, error)"},
+		{"FileInfoNames", Type, 23, ""},
+		{"Format", Type, 10, ""},
+		{"FormatGNU", Const, 10, ""},
+		{"FormatPAX", Const, 10, ""},
+		{"FormatUSTAR", Const, 10, ""},
+		{"FormatUnknown", Const, 10, ""},
+		{"Header", Type, 0, ""},
+		{"Header.AccessTime", Field, 0, ""},
+		{"Header.ChangeTime", Field, 0, ""},
+		{"Header.Devmajor", Field, 0, ""},
+		{"Header.Devminor", Field, 0, ""},
+		{"Header.Format", Field, 10, ""},
+		{"Header.Gid", Field, 0, ""},
+		{"Header.Gname", Field, 0, ""},
+		{"Header.Linkname", Field, 0, ""},
+		{"Header.ModTime", Field, 0, ""},
+		{"Header.Mode", Field, 0, ""},
+		{"Header.Name", Field, 0, ""},
+		{"Header.PAXRecords", Field, 10, ""},
+		{"Header.Size", Field, 0, ""},
+		{"Header.Typeflag", Field, 0, ""},
+		{"Header.Uid", Field, 0, ""},
+		{"Header.Uname", Field, 0, ""},
+		{"Header.Xattrs", Field, 3, ""},
+		{"NewReader", Func, 0, "func(r io.Reader) *Reader"},
+		{"NewWriter", Func, 0, "func(w io.Writer) *Writer"},
+		{"Reader", Type, 0, ""},
+		{"TypeBlock", Const, 0, ""},
+		{"TypeChar", Const, 0, ""},
+		{"TypeCont", Const, 0, ""},
+		{"TypeDir", Const, 0, ""},
+		{"TypeFifo", Const, 0, ""},
+		{"TypeGNULongLink", Const, 1, ""},
+		{"TypeGNULongName", Const, 1, ""},
+		{"TypeGNUSparse", Const, 3, ""},
+		{"TypeLink", Const, 0, ""},
+		{"TypeReg", Const, 0, ""},
+		{"TypeRegA", Const, 0, ""},
+		{"TypeSymlink", Const, 0, ""},
+		{"TypeXGlobalHeader", Const, 0, ""},
+		{"TypeXHeader", Const, 0, ""},
+		{"Writer", Type, 0, ""},
+	},
+	"archive/zip": {
+		{"(*File).DataOffset", Method, 2, ""},
+		{"(*File).FileInfo", Method, 0, ""},
+		{"(*File).ModTime", Method, 0, ""},
+		{"(*File).Mode", Method, 0, ""},
+		{"(*File).Open", Method, 0, ""},
+		{"(*File).OpenRaw", Method, 17, ""},
+		{"(*File).SetModTime", Method, 0, ""},
+		{"(*File).SetMode", Method, 0, ""},
+		{"(*FileHeader).FileInfo", Method, 0, ""},
+		{"(*FileHeader).ModTime", Method, 0, ""},
+		{"(*FileHeader).Mode", Method, 0, ""},
+		{"(*FileHeader).SetModTime", Method, 0, ""},
+		{"(*FileHeader).SetMode", Method, 0, ""},
+		{"(*ReadCloser).Close", Method, 0, ""},
+		{"(*ReadCloser).Open", Method, 16, ""},
+		{"(*ReadCloser).RegisterDecompressor", Method, 6, ""},
+		{"(*Reader).Open", Method, 16, ""},
+		{"(*Reader).RegisterDecompressor", Method, 6, ""},
+		{"(*Writer).AddFS", Method, 22, ""},
+		{"(*Writer).Close", Method, 0, ""},
+		{"(*Writer).Copy", Method, 17, ""},
+		{"(*Writer).Create", Method, 0, ""},
+		{"(*Writer).CreateHeader", Method, 0, ""},
+		{"(*Writer).CreateRaw", Method, 17, ""},
+		{"(*Writer).Flush", Method, 4, ""},
+		{"(*Writer).RegisterCompressor", Method, 6, ""},
+		{"(*Writer).SetComment", Method, 10, ""},
+		{"(*Writer).SetOffset", Method, 5, ""},
+		{"Compressor", Type, 2, ""},
+		{"Decompressor", Type, 2, ""},
+		{"Deflate", Const, 0, ""},
+		{"ErrAlgorithm", Var, 0, ""},
+		{"ErrChecksum", Var, 0, ""},
+		{"ErrFormat", Var, 0, ""},
+		{"ErrInsecurePath", Var, 20, ""},
+		{"File", Type, 0, ""},
+		{"File.FileHeader", Field, 0, ""},
+		{"FileHeader", Type, 0, ""},
+		{"FileHeader.CRC32", Field, 0, ""},
+		{"FileHeader.Comment", Field, 0, ""},
+		{"FileHeader.CompressedSize", Field, 0, ""},
+		{"FileHeader.CompressedSize64", Field, 1, ""},
+		{"FileHeader.CreatorVersion", Field, 0, ""},
+		{"FileHeader.ExternalAttrs", Field, 0, ""},
+		{"FileHeader.Extra", Field, 0, ""},
+		{"FileHeader.Flags", Field, 0, ""},
+		{"FileHeader.Method", Field, 0, ""},
+		{"FileHeader.Modified", Field, 10, ""},
+		{"FileHeader.ModifiedDate", Field, 0, ""},
+		{"FileHeader.ModifiedTime", Field, 0, ""},
+		{"FileHeader.Name", Field, 0, ""},
+		{"FileHeader.NonUTF8", Field, 10, ""},
+		{"FileHeader.ReaderVersion", Field, 0, ""},
+		{"FileHeader.UncompressedSize", Field, 0, ""},
+		{"FileHeader.UncompressedSize64", Field, 1, ""},
+		{"FileInfoHeader", Func, 0, "func(fi fs.FileInfo) (*FileHeader, error)"},
+		{"NewReader", Func, 0, "func(r io.ReaderAt, size int64) (*Reader, error)"},
+		{"NewWriter", Func, 0, "func(w io.Writer) *Writer"},
+		{"OpenReader", Func, 0, "func(name string) (*ReadCloser, error)"},
+		{"ReadCloser", Type, 0, ""},
+		{"ReadCloser.Reader", Field, 0, ""},
+		{"Reader", Type, 0, ""},
+		{"Reader.Comment", Field, 0, ""},
+		{"Reader.File", Field, 0, ""},
+		{"RegisterCompressor", Func, 2, "func(method uint16, comp Compressor)"},
+		{"RegisterDecompressor", Func, 2, "func(method uint16, dcomp Decompressor)"},
+		{"Store", Const, 0, ""},
+		{"Writer", Type, 0, ""},
+	},
+	"bufio": {
+		{"(*Reader).Buffered", Method, 0, ""},
+		{"(*Reader).Discard", Method, 5, ""},
+		{"(*Reader).Peek", Method, 0, ""},
+		{"(*Reader).Read", Method, 0, ""},
+		{"(*Reader).ReadByte", Method, 0, ""},
+		{"(*Reader).ReadBytes", Method, 0, ""},
+		{"(*Reader).ReadLine", Method, 0, ""},
+		{"(*Reader).ReadRune", Method, 0, ""},
+		{"(*Reader).ReadSlice", Method, 0, ""},
+		{"(*Reader).ReadString", Method, 0, ""},
+		{"(*Reader).Reset", Method, 2, ""},
+		{"(*Reader).Size", Method, 10, ""},
+		{"(*Reader).UnreadByte", Method, 0, ""},
+		{"(*Reader).UnreadRune", Method, 0, ""},
+		{"(*Reader).WriteTo", Method, 1, ""},
+		{"(*Scanner).Buffer", Method, 6, ""},
+		{"(*Scanner).Bytes", Method, 1, ""},
+		{"(*Scanner).Err", Method, 1, ""},
+		{"(*Scanner).Scan", Method, 1, ""},
+		{"(*Scanner).Split", Method, 1, ""},
+		{"(*Scanner).Text", Method, 1, ""},
+		{"(*Writer).Available", Method, 0, ""},
+		{"(*Writer).AvailableBuffer", Method, 18, ""},
+		{"(*Writer).Buffered", Method, 0, ""},
+		{"(*Writer).Flush", Method, 0, ""},
+		{"(*Writer).ReadFrom", Method, 1, ""},
+		{"(*Writer).Reset", Method, 2, ""},
+		{"(*Writer).Size", Method, 10, ""},
+		{"(*Writer).Write", Method, 0, ""},
+		{"(*Writer).WriteByte", Method, 0, ""},
+		{"(*Writer).WriteRune", Method, 0, ""},
+		{"(*Writer).WriteString", Method, 0, ""},
+		{"(ReadWriter).Available", Method, 0, ""},
+		{"(ReadWriter).AvailableBuffer", Method, 18, ""},
+		{"(ReadWriter).Discard", Method, 5, ""},
+		{"(ReadWriter).Flush", Method, 0, ""},
+		{"(ReadWriter).Peek", Method, 0, ""},
+		{"(ReadWriter).Read", Method, 0, ""},
+		{"(ReadWriter).ReadByte", Method, 0, ""},
+		{"(ReadWriter).ReadBytes", Method, 0, ""},
+		{"(ReadWriter).ReadFrom", Method, 1, ""},
+		{"(ReadWriter).ReadLine", Method, 0, ""},
+		{"(ReadWriter).ReadRune", Method, 0, ""},
+		{"(ReadWriter).ReadSlice", Method, 0, ""},
+		{"(ReadWriter).ReadString", Method, 0, ""},
+		{"(ReadWriter).UnreadByte", Method, 0, ""},
+		{"(ReadWriter).UnreadRune", Method, 0, ""},
+		{"(ReadWriter).Write", Method, 0, ""},
+		{"(ReadWriter).WriteByte", Method, 0, ""},
+		{"(ReadWriter).WriteRune", Method, 0, ""},
+		{"(ReadWriter).WriteString", Method, 0, ""},
+		{"(ReadWriter).WriteTo", Method, 1, ""},
+		{"ErrAdvanceTooFar", Var, 1, ""},
+		{"ErrBadReadCount", Var, 15, ""},
+		{"ErrBufferFull", Var, 0, ""},
+		{"ErrFinalToken", Var, 6, ""},
+		{"ErrInvalidUnreadByte", Var, 0, ""},
+		{"ErrInvalidUnreadRune", Var, 0, ""},
+		{"ErrNegativeAdvance", Var, 1, ""},
+		{"ErrNegativeCount", Var, 0, ""},
+		{"ErrTooLong", Var, 1, ""},
+		{"MaxScanTokenSize", Const, 1, ""},
+		{"NewReadWriter", Func, 0, "func(r *Reader, w *Writer) *ReadWriter"},
+		{"NewReader", Func, 0, "func(rd io.Reader) *Reader"},
+		{"NewReaderSize", Func, 0, "func(rd io.Reader, size int) *Reader"},
+		{"NewScanner", Func, 1, "func(r io.Reader) *Scanner"},
+		{"NewWriter", Func, 0, "func(w io.Writer) *Writer"},
+		{"NewWriterSize", Func, 0, "func(w io.Writer, size int) *Writer"},
+		{"ReadWriter", Type, 0, ""},
+		{"ReadWriter.Reader", Field, 0, ""},
+		{"ReadWriter.Writer", Field, 0, ""},
+		{"Reader", Type, 0, ""},
+		{"ScanBytes", Func, 1, "func(data []byte, atEOF bool) (advance int, token []byte, err error)"},
+		{"ScanLines", Func, 1, "func(data []byte, atEOF bool) (advance int, token []byte, err error)"},
+		{"ScanRunes", Func, 1, "func(data []byte, atEOF bool) (advance int, token []byte, err error)"},
+		{"ScanWords", Func, 1, "func(data []byte, atEOF bool) (advance int, token []byte, err error)"},
+		{"Scanner", Type, 1, ""},
+		{"SplitFunc", Type, 1, ""},
+		{"Writer", Type, 0, ""},
+	},
+	"bytes": {
+		{"(*Buffer).Available", Method, 21, ""},
+		{"(*Buffer).AvailableBuffer", Method, 21, ""},
+		{"(*Buffer).Bytes", Method, 0, ""},
+		{"(*Buffer).Cap", Method, 5, ""},
+		{"(*Buffer).Grow", Method, 1, ""},
+		{"(*Buffer).Len", Method, 0, ""},
+		{"(*Buffer).Next", Method, 0, ""},
+		{"(*Buffer).Peek", Method, 26, ""},
+		{"(*Buffer).Read", Method, 0, ""},
+		{"(*Buffer).ReadByte", Method, 0, ""},
+		{"(*Buffer).ReadBytes", Method, 0, ""},
+		{"(*Buffer).ReadFrom", Method, 0, ""},
+		{"(*Buffer).ReadRune", Method, 0, ""},
+		{"(*Buffer).ReadString", Method, 0, ""},
+		{"(*Buffer).Reset", Method, 0, ""},
+		{"(*Buffer).String", Method, 0, ""},
+		{"(*Buffer).Truncate", Method, 0, ""},
+		{"(*Buffer).UnreadByte", Method, 0, ""},
+		{"(*Buffer).UnreadRune", Method, 0, ""},
+		{"(*Buffer).Write", Method, 0, ""},
+		{"(*Buffer).WriteByte", Method, 0, ""},
+		{"(*Buffer).WriteRune", Method, 0, ""},
+		{"(*Buffer).WriteString", Method, 0, ""},
+		{"(*Buffer).WriteTo", Method, 0, ""},
+		{"(*Reader).Len", Method, 0, ""},
+		{"(*Reader).Read", Method, 0, ""},
+		{"(*Reader).ReadAt", Method, 0, ""},
+		{"(*Reader).ReadByte", Method, 0, ""},
+		{"(*Reader).ReadRune", Method, 0, ""},
+		{"(*Reader).Reset", Method, 7, ""},
+		{"(*Reader).Seek", Method, 0, ""},
+		{"(*Reader).Size", Method, 5, ""},
+		{"(*Reader).UnreadByte", Method, 0, ""},
+		{"(*Reader).UnreadRune", Method, 0, ""},
+		{"(*Reader).WriteTo", Method, 1, ""},
+		{"Buffer", Type, 0, ""},
+		{"Clone", Func, 20, "func(b []byte) []byte"},
+		{"Compare", Func, 0, "func(a []byte, b []byte) int"},
+		{"Contains", Func, 0, "func(b []byte, subslice []byte) bool"},
+		{"ContainsAny", Func, 7, "func(b []byte, chars string) bool"},
+		{"ContainsFunc", Func, 21, "func(b []byte, f func(rune) bool) bool"},
+		{"ContainsRune", Func, 7, "func(b []byte, r rune) bool"},
+		{"Count", Func, 0, "func(s []byte, sep []byte) int"},
+		{"Cut", Func, 18, "func(s []byte, sep []byte) (before []byte, after []byte, found bool)"},
+		{"CutPrefix", Func, 20, "func(s []byte, prefix []byte) (after []byte, found bool)"},
+		{"CutSuffix", Func, 20, "func(s []byte, suffix []byte) (before []byte, found bool)"},
+		{"Equal", Func, 0, "func(a []byte, b []byte) bool"},
+		{"EqualFold", Func, 0, "func(s []byte, t []byte) bool"},
+		{"ErrTooLarge", Var, 0, ""},
+		{"Fields", Func, 0, "func(s []byte) [][]byte"},
+		{"FieldsFunc", Func, 0, "func(s []byte, f func(rune) bool) [][]byte"},
+		{"FieldsFuncSeq", Func, 24, "func(s []byte, f func(rune) bool) iter.Seq[[]byte]"},
+		{"FieldsSeq", Func, 24, "func(s []byte) iter.Seq[[]byte]"},
+		{"HasPrefix", Func, 0, "func(s []byte, prefix []byte) bool"},
+		{"HasSuffix", Func, 0, "func(s []byte, suffix []byte) bool"},
+		{"Index", Func, 0, "func(s []byte, sep []byte) int"},
+		{"IndexAny", Func, 0, "func(s []byte, chars string) int"},
+		{"IndexByte", Func, 0, "func(b []byte, c byte) int"},
+		{"IndexFunc", Func, 0, "func(s []byte, f func(r rune) bool) int"},
+		{"IndexRune", Func, 0, "func(s []byte, r rune) int"},
+		{"Join", Func, 0, "func(s [][]byte, sep []byte) []byte"},
+		{"LastIndex", Func, 0, "func(s []byte, sep []byte) int"},
+		{"LastIndexAny", Func, 0, "func(s []byte, chars string) int"},
+		{"LastIndexByte", Func, 5, "func(s []byte, c byte) int"},
+		{"LastIndexFunc", Func, 0, "func(s []byte, f func(r rune) bool) int"},
+		{"Lines", Func, 24, "func(s []byte) iter.Seq[[]byte]"},
+		{"Map", Func, 0, "func(mapping func(r rune) rune, s []byte) []byte"},
+		{"MinRead", Const, 0, ""},
+		{"NewBuffer", Func, 0, "func(buf []byte) *Buffer"},
+		{"NewBufferString", Func, 0, "func(s string) *Buffer"},
+		{"NewReader", Func, 0, "func(b []byte) *Reader"},
+		{"Reader", Type, 0, ""},
+		{"Repeat", Func, 0, "func(b []byte, count int) []byte"},
+		{"Replace", Func, 0, "func(s []byte, old []byte, new []byte, n int) []byte"},
+		{"ReplaceAll", Func, 12, "func(s []byte, old []byte, new []byte) []byte"},
+		{"Runes", Func, 0, "func(s []byte) []rune"},
+		{"Split", Func, 0, "func(s []byte, sep []byte) [][]byte"},
+		{"SplitAfter", Func, 0, "func(s []byte, sep []byte) [][]byte"},
+		{"SplitAfterN", Func, 0, "func(s []byte, sep []byte, n int) [][]byte"},
+		{"SplitAfterSeq", Func, 24, "func(s []byte, sep []byte) iter.Seq[[]byte]"},
+		{"SplitN", Func, 0, "func(s []byte, sep []byte, n int) [][]byte"},
+		{"SplitSeq", Func, 24, "func(s []byte, sep []byte) iter.Seq[[]byte]"},
+		{"Title", Func, 0, "func(s []byte) []byte"},
+		{"ToLower", Func, 0, "func(s []byte) []byte"},
+		{"ToLowerSpecial", Func, 0, "func(c unicode.SpecialCase, s []byte) []byte"},
+		{"ToTitle", Func, 0, "func(s []byte) []byte"},
+		{"ToTitleSpecial", Func, 0, "func(c unicode.SpecialCase, s []byte) []byte"},
+		{"ToUpper", Func, 0, "func(s []byte) []byte"},
+		{"ToUpperSpecial", Func, 0, "func(c unicode.SpecialCase, s []byte) []byte"},
+		{"ToValidUTF8", Func, 13, "func(s []byte, replacement []byte) []byte"},
+		{"Trim", Func, 0, "func(s []byte, cutset string) []byte"},
+		{"TrimFunc", Func, 0, "func(s []byte, f func(r rune) bool) []byte"},
+		{"TrimLeft", Func, 0, "func(s []byte, cutset string) []byte"},
+		{"TrimLeftFunc", Func, 0, "func(s []byte, f func(r rune) bool) []byte"},
+		{"TrimPrefix", Func, 1, "func(s []byte, prefix []byte) []byte"},
+		{"TrimRight", Func, 0, "func(s []byte, cutset string) []byte"},
+		{"TrimRightFunc", Func, 0, "func(s []byte, f func(r rune) bool) []byte"},
+		{"TrimSpace", Func, 0, "func(s []byte) []byte"},
+		{"TrimSuffix", Func, 1, "func(s []byte, suffix []byte) []byte"},
+	},
+	"cmp": {
+		{"Compare", Func, 21, "func[T Ordered](x T, y T) int"},
+		{"Less", Func, 21, "func[T Ordered](x T, y T) bool"},
+		{"Or", Func, 22, "func[T comparable](vals ...T) T"},
+		{"Ordered", Type, 21, ""},
+	},
+	"compress/bzip2": {
+		{"(StructuralError).Error", Method, 0, ""},
+		{"NewReader", Func, 0, "func(r io.Reader) io.Reader"},
+		{"StructuralError", Type, 0, ""},
+	},
+	"compress/flate": {
+		{"(*ReadError).Error", Method, 0, ""},
+		{"(*WriteError).Error", Method, 0, ""},
+		{"(*Writer).Close", Method, 0, ""},
+		{"(*Writer).Flush", Method, 0, ""},
+		{"(*Writer).Reset", Method, 2, ""},
+		{"(*Writer).Write", Method, 0, ""},
+		{"(CorruptInputError).Error", Method, 0, ""},
+		{"(InternalError).Error", Method, 0, ""},
+		{"BestCompression", Const, 0, ""},
+		{"BestSpeed", Const, 0, ""},
+		{"CorruptInputError", Type, 0, ""},
+		{"DefaultCompression", Const, 0, ""},
+		{"HuffmanOnly", Const, 7, ""},
+		{"InternalError", Type, 0, ""},
+		{"NewReader", Func, 0, "func(r io.Reader) io.ReadCloser"},
+		{"NewReaderDict", Func, 0, "func(r io.Reader, dict []byte) io.ReadCloser"},
+		{"NewWriter", Func, 0, "func(w io.Writer, level int) (*Writer, error)"},
+		{"NewWriterDict", Func, 0, "func(w io.Writer, level int, dict []byte) (*Writer, error)"},
+		{"NoCompression", Const, 0, ""},
+		{"ReadError", Type, 0, ""},
+		{"ReadError.Err", Field, 0, ""},
+		{"ReadError.Offset", Field, 0, ""},
+		{"Reader", Type, 0, ""},
+		{"Resetter", Type, 4, ""},
+		{"WriteError", Type, 0, ""},
+		{"WriteError.Err", Field, 0, ""},
+		{"WriteError.Offset", Field, 0, ""},
+		{"Writer", Type, 0, ""},
+	},
+	"compress/gzip": {
+		{"(*Reader).Close", Method, 0, ""},
+		{"(*Reader).Multistream", Method, 4, ""},
+		{"(*Reader).Read", Method, 0, ""},
+		{"(*Reader).Reset", Method, 3, ""},
+		{"(*Writer).Close", Method, 0, ""},
+		{"(*Writer).Flush", Method, 1, ""},
+		{"(*Writer).Reset", Method, 2, ""},
+		{"(*Writer).Write", Method, 0, ""},
+		{"BestCompression", Const, 0, ""},
+		{"BestSpeed", Const, 0, ""},
+		{"DefaultCompression", Const, 0, ""},
+		{"ErrChecksum", Var, 0, ""},
+		{"ErrHeader", Var, 0, ""},
+		{"Header", Type, 0, ""},
+		{"Header.Comment", Field, 0, ""},
+		{"Header.Extra", Field, 0, ""},
+		{"Header.ModTime", Field, 0, ""},
+		{"Header.Name", Field, 0, ""},
+		{"Header.OS", Field, 0, ""},
+		{"HuffmanOnly", Const, 8, ""},
+		{"NewReader", Func, 0, "func(r io.Reader) (*Reader, error)"},
+		{"NewWriter", Func, 0, "func(w io.Writer) *Writer"},
+		{"NewWriterLevel", Func, 0, "func(w io.Writer, level int) (*Writer, error)"},
+		{"NoCompression", Const, 0, ""},
+		{"Reader", Type, 0, ""},
+		{"Reader.Header", Field, 0, ""},
+		{"Writer", Type, 0, ""},
+		{"Writer.Header", Field, 0, ""},
+	},
+	"compress/lzw": {
+		{"(*Reader).Close", Method, 17, ""},
+		{"(*Reader).Read", Method, 17, ""},
+		{"(*Reader).Reset", Method, 17, ""},
+		{"(*Writer).Close", Method, 17, ""},
+		{"(*Writer).Reset", Method, 17, ""},
+		{"(*Writer).Write", Method, 17, ""},
+		{"LSB", Const, 0, ""},
+		{"MSB", Const, 0, ""},
+		{"NewReader", Func, 0, "func(r io.Reader, order Order, litWidth int) io.ReadCloser"},
+		{"NewWriter", Func, 0, "func(w io.Writer, order Order, litWidth int) io.WriteCloser"},
+		{"Order", Type, 0, ""},
+		{"Reader", Type, 17, ""},
+		{"Writer", Type, 17, ""},
+	},
+	"compress/zlib": {
+		{"(*Writer).Close", Method, 0, ""},
+		{"(*Writer).Flush", Method, 0, ""},
+		{"(*Writer).Reset", Method, 2, ""},
+		{"(*Writer).Write", Method, 0, ""},
+		{"BestCompression", Const, 0, ""},
+		{"BestSpeed", Const, 0, ""},
+		{"DefaultCompression", Const, 0, ""},
+		{"ErrChecksum", Var, 0, ""},
+		{"ErrDictionary", Var, 0, ""},
+		{"ErrHeader", Var, 0, ""},
+		{"HuffmanOnly", Const, 8, ""},
+		{"NewReader", Func, 0, "func(r io.Reader) (io.ReadCloser, error)"},
+		{"NewReaderDict", Func, 0, "func(r io.Reader, dict []byte) (io.ReadCloser, error)"},
+		{"NewWriter", Func, 0, "func(w io.Writer) *Writer"},
+		{"NewWriterLevel", Func, 0, "func(w io.Writer, level int) (*Writer, error)"},
+		{"NewWriterLevelDict", Func, 0, "func(w io.Writer, level int, dict []byte) (*Writer, error)"},
+		{"NoCompression", Const, 0, ""},
+		{"Resetter", Type, 4, ""},
+		{"Writer", Type, 0, ""},
+	},
+	"container/heap": {
+		{"Fix", Func, 2, "func(h Interface, i int)"},
+		{"Init", Func, 0, "func(h Interface)"},
+		{"Interface", Type, 0, ""},
+		{"Pop", Func, 0, "func(h Interface) any"},
+		{"Push", Func, 0, "func(h Interface, x any)"},
+		{"Remove", Func, 0, "func(h Interface, i int) any"},
+	},
+	"container/list": {
+		{"(*Element).Next", Method, 0, ""},
+		{"(*Element).Prev", Method, 0, ""},
+		{"(*List).Back", Method, 0, ""},
+		{"(*List).Front", Method, 0, ""},
+		{"(*List).Init", Method, 0, ""},
+		{"(*List).InsertAfter", Method, 0, ""},
+		{"(*List).InsertBefore", Method, 0, ""},
+		{"(*List).Len", Method, 0, ""},
+		{"(*List).MoveAfter", Method, 2, ""},
+		{"(*List).MoveBefore", Method, 2, ""},
+		{"(*List).MoveToBack", Method, 0, ""},
+		{"(*List).MoveToFront", Method, 0, ""},
+		{"(*List).PushBack", Method, 0, ""},
+		{"(*List).PushBackList", Method, 0, ""},
+		{"(*List).PushFront", Method, 0, ""},
+		{"(*List).PushFrontList", Method, 0, ""},
+		{"(*List).Remove", Method, 0, ""},
+		{"Element", Type, 0, ""},
+		{"Element.Value", Field, 0, ""},
+		{"List", Type, 0, ""},
+		{"New", Func, 0, "func() *List"},
+	},
+	"container/ring": {
+		{"(*Ring).Do", Method, 0, ""},
+		{"(*Ring).Len", Method, 0, ""},
+		{"(*Ring).Link", Method, 0, ""},
+		{"(*Ring).Move", Method, 0, ""},
+		{"(*Ring).Next", Method, 0, ""},
+		{"(*Ring).Prev", Method, 0, ""},
+		{"(*Ring).Unlink", Method, 0, ""},
+		{"New", Func, 0, "func(n int) *Ring"},
+		{"Ring", Type, 0, ""},
+		{"Ring.Value", Field, 0, ""},
+	},
+	"context": {
+		{"AfterFunc", Func, 21, "func(ctx Context, f func()) (stop func() bool)"},
+		{"Background", Func, 7, "func() Context"},
+		{"CancelCauseFunc", Type, 20, ""},
+		{"CancelFunc", Type, 7, ""},
+		{"Canceled", Var, 7, ""},
+		{"Cause", Func, 20, "func(c Context) error"},
+		{"Context", Type, 7, ""},
+		{"DeadlineExceeded", Var, 7, ""},
+		{"TODO", Func, 7, "func() Context"},
+		{"WithCancel", Func, 7, "func(parent Context) (ctx Context, cancel CancelFunc)"},
+		{"WithCancelCause", Func, 20, "func(parent Context) (ctx Context, cancel CancelCauseFunc)"},
+		{"WithDeadline", Func, 7, "func(parent Context, d time.Time) (Context, CancelFunc)"},
+		{"WithDeadlineCause", Func, 21, "func(parent Context, d time.Time, cause error) (Context, CancelFunc)"},
+		{"WithTimeout", Func, 7, "func(parent Context, timeout time.Duration) (Context, CancelFunc)"},
+		{"WithTimeoutCause", Func, 21, "func(parent Context, timeout time.Duration, cause error) (Context, CancelFunc)"},
+		{"WithValue", Func, 7, "func(parent Context, key any, val any) Context"},
+		{"WithoutCancel", Func, 21, "func(parent Context) Context"},
+	},
+	"crypto": {
+		{"(Hash).Available", Method, 0, ""},
+		{"(Hash).HashFunc", Method, 4, ""},
+		{"(Hash).New", Method, 0, ""},
+		{"(Hash).Size", Method, 0, ""},
+		{"(Hash).String", Method, 15, ""},
+		{"BLAKE2b_256", Const, 9, ""},
+		{"BLAKE2b_384", Const, 9, ""},
+		{"BLAKE2b_512", Const, 9, ""},
+		{"BLAKE2s_256", Const, 9, ""},
+		{"Decrypter", Type, 5, ""},
+		{"DecrypterOpts", Type, 5, ""},
+		{"Hash", Type, 0, ""},
+		{"MD4", Const, 0, ""},
+		{"MD5", Const, 0, ""},
+		{"MD5SHA1", Const, 0, ""},
+		{"MessageSigner", Type, 25, ""},
+		{"PrivateKey", Type, 0, ""},
+		{"PublicKey", Type, 2, ""},
+		{"RIPEMD160", Const, 0, ""},
+		{"RegisterHash", Func, 0, "func(h Hash, f func() hash.Hash)"},
+		{"SHA1", Const, 0, ""},
+		{"SHA224", Const, 0, ""},
+		{"SHA256", Const, 0, ""},
+		{"SHA384", Const, 0, ""},
+		{"SHA3_224", Const, 4, ""},
+		{"SHA3_256", Const, 4, ""},
+		{"SHA3_384", Const, 4, ""},
+		{"SHA3_512", Const, 4, ""},
+		{"SHA512", Const, 0, ""},
+		{"SHA512_224", Const, 5, ""},
+		{"SHA512_256", Const, 5, ""},
+		{"SignMessage", Func, 25, "func(signer Signer, rand io.Reader, msg []byte, opts SignerOpts) (signature []byte, err error)"},
+		{"Signer", Type, 4, ""},
+		{"SignerOpts", Type, 4, ""},
+	},
+	"crypto/aes": {
+		{"(KeySizeError).Error", Method, 0, ""},
+		{"BlockSize", Const, 0, ""},
+		{"KeySizeError", Type, 0, ""},
+		{"NewCipher", Func, 0, "func(key []byte) (cipher.Block, error)"},
+	},
+	"crypto/cipher": {
+		{"(StreamReader).Read", Method, 0, ""},
+		{"(StreamWriter).Close", Method, 0, ""},
+		{"(StreamWriter).Write", Method, 0, ""},
+		{"AEAD", Type, 2, ""},
+		{"Block", Type, 0, ""},
+		{"BlockMode", Type, 0, ""},
+		{"NewCBCDecrypter", Func, 0, "func(b Block, iv []byte) BlockMode"},
+		{"NewCBCEncrypter", Func, 0, "func(b Block, iv []byte) BlockMode"},
+		{"NewCFBDecrypter", Func, 0, "func(block Block, iv []byte) Stream"},
+		{"NewCFBEncrypter", Func, 0, "func(block Block, iv []byte) Stream"},
+		{"NewCTR", Func, 0, "func(block Block, iv []byte) Stream"},
+		{"NewGCM", Func, 2, "func(cipher Block) (AEAD, error)"},
+		{"NewGCMWithNonceSize", Func, 5, "func(cipher Block, size int) (AEAD, error)"},
+		{"NewGCMWithRandomNonce", Func, 24, "func(cipher Block) (AEAD, error)"},
+		{"NewGCMWithTagSize", Func, 11, "func(cipher Block, tagSize int) (AEAD, error)"},
+		{"NewOFB", Func, 0, "func(b Block, iv []byte) Stream"},
+		{"Stream", Type, 0, ""},
+		{"StreamReader", Type, 0, ""},
+		{"StreamReader.R", Field, 0, ""},
+		{"StreamReader.S", Field, 0, ""},
+		{"StreamWriter", Type, 0, ""},
+		{"StreamWriter.Err", Field, 0, ""},
+		{"StreamWriter.S", Field, 0, ""},
+		{"StreamWriter.W", Field, 0, ""},
+	},
+	"crypto/des": {
+		{"(KeySizeError).Error", Method, 0, ""},
+		{"BlockSize", Const, 0, ""},
+		{"KeySizeError", Type, 0, ""},
+		{"NewCipher", Func, 0, "func(key []byte) (cipher.Block, error)"},
+		{"NewTripleDESCipher", Func, 0, "func(key []byte) (cipher.Block, error)"},
+	},
+	"crypto/dsa": {
+		{"ErrInvalidPublicKey", Var, 0, ""},
+		{"GenerateKey", Func, 0, "func(priv *PrivateKey, rand io.Reader) error"},
+		{"GenerateParameters", Func, 0, "func(params *Parameters, rand io.Reader, sizes ParameterSizes) error"},
+		{"L1024N160", Const, 0, ""},
+		{"L2048N224", Const, 0, ""},
+		{"L2048N256", Const, 0, ""},
+		{"L3072N256", Const, 0, ""},
+		{"ParameterSizes", Type, 0, ""},
+		{"Parameters", Type, 0, ""},
+		{"Parameters.G", Field, 0, ""},
+		{"Parameters.P", Field, 0, ""},
+		{"Parameters.Q", Field, 0, ""},
+		{"PrivateKey", Type, 0, ""},
+		{"PrivateKey.PublicKey", Field, 0, ""},
+		{"PrivateKey.X", Field, 0, ""},
+		{"PublicKey", Type, 0, ""},
+		{"PublicKey.Parameters", Field, 0, ""},
+		{"PublicKey.Y", Field, 0, ""},
+		{"Sign", Func, 0, "func(rand io.Reader, priv *PrivateKey, hash []byte) (r *big.Int, s *big.Int, err error)"},
+		{"Verify", Func, 0, "func(pub *PublicKey, hash []byte, r *big.Int, s *big.Int) bool"},
+	},
+	"crypto/ecdh": {
+		{"(*PrivateKey).Bytes", Method, 20, ""},
+		{"(*PrivateKey).Curve", Method, 20, ""},
+		{"(*PrivateKey).ECDH", Method, 20, ""},
+		{"(*PrivateKey).Equal", Method, 20, ""},
+		{"(*PrivateKey).Public", Method, 20, ""},
+		{"(*PrivateKey).PublicKey", Method, 20, ""},
+		{"(*PublicKey).Bytes", Method, 20, ""},
+		{"(*PublicKey).Curve", Method, 20, ""},
+		{"(*PublicKey).Equal", Method, 20, ""},
+		{"Curve", Type, 20, ""},
+		{"P256", Func, 20, "func() Curve"},
+		{"P384", Func, 20, "func() Curve"},
+		{"P521", Func, 20, "func() Curve"},
+		{"PrivateKey", Type, 20, ""},
+		{"PublicKey", Type, 20, ""},
+		{"X25519", Func, 20, "func() Curve"},
+	},
+	"crypto/ecdsa": {
+		{"(*PrivateKey).Bytes", Method, 25, ""},
+		{"(*PrivateKey).ECDH", Method, 20, ""},
+		{"(*PrivateKey).Equal", Method, 15, ""},
+		{"(*PrivateKey).Public", Method, 4, ""},
+		{"(*PrivateKey).Sign", Method, 4, ""},
+		{"(*PublicKey).Bytes", Method, 25, ""},
+		{"(*PublicKey).ECDH", Method, 20, ""},
+		{"(*PublicKey).Equal", Method, 15, ""},
+		{"(PrivateKey).Add", Method, 0, ""},
+		{"(PrivateKey).Double", Method, 0, ""},
+		{"(PrivateKey).IsOnCurve", Method, 0, ""},
+		{"(PrivateKey).Params", Method, 0, ""},
+		{"(PrivateKey).ScalarBaseMult", Method, 0, ""},
+		{"(PrivateKey).ScalarMult", Method, 0, ""},
+		{"(PublicKey).Add", Method, 0, ""},
+		{"(PublicKey).Double", Method, 0, ""},
+		{"(PublicKey).IsOnCurve", Method, 0, ""},
+		{"(PublicKey).Params", Method, 0, ""},
+		{"(PublicKey).ScalarBaseMult", Method, 0, ""},
+		{"(PublicKey).ScalarMult", Method, 0, ""},
+		{"GenerateKey", Func, 0, "func(c elliptic.Curve, rand io.Reader) (*PrivateKey, error)"},
+		{"ParseRawPrivateKey", Func, 25, "func(curve elliptic.Curve, data []byte) (*PrivateKey, error)"},
+		{"ParseUncompressedPublicKey", Func, 25, "func(curve elliptic.Curve, data []byte) (*PublicKey, error)"},
+		{"PrivateKey", Type, 0, ""},
+		{"PrivateKey.D", Field, 0, ""},
+		{"PrivateKey.PublicKey", Field, 0, ""},
+		{"PublicKey", Type, 0, ""},
+		{"PublicKey.Curve", Field, 0, ""},
+		{"PublicKey.X", Field, 0, ""},
+		{"PublicKey.Y", Field, 0, ""},
+		{"Sign", Func, 0, "func(rand io.Reader, priv *PrivateKey, hash []byte) (r *big.Int, s *big.Int, err error)"},
+		{"SignASN1", Func, 15, "func(rand io.Reader, priv *PrivateKey, hash []byte) ([]byte, error)"},
+		{"Verify", Func, 0, "func(pub *PublicKey, hash []byte, r *big.Int, s *big.Int) bool"},
+		{"VerifyASN1", Func, 15, "func(pub *PublicKey, hash []byte, sig []byte) bool"},
+	},
+	"crypto/ed25519": {
+		{"(*Options).HashFunc", Method, 20, ""},
+		{"(PrivateKey).Equal", Method, 15, ""},
+		{"(PrivateKey).Public", Method, 13, ""},
+		{"(PrivateKey).Seed", Method, 13, ""},
+		{"(PrivateKey).Sign", Method, 13, ""},
+		{"(PublicKey).Equal", Method, 15, ""},
+		{"GenerateKey", Func, 13, "func(rand io.Reader) (PublicKey, PrivateKey, error)"},
+		{"NewKeyFromSeed", Func, 13, "func(seed []byte) PrivateKey"},
+		{"Options", Type, 20, ""},
+		{"Options.Context", Field, 20, ""},
+		{"Options.Hash", Field, 20, ""},
+		{"PrivateKey", Type, 13, ""},
+		{"PrivateKeySize", Const, 13, ""},
+		{"PublicKey", Type, 13, ""},
+		{"PublicKeySize", Const, 13, ""},
+		{"SeedSize", Const, 13, ""},
+		{"Sign", Func, 13, "func(privateKey PrivateKey, message []byte) []byte"},
+		{"SignatureSize", Const, 13, ""},
+		{"Verify", Func, 13, "func(publicKey PublicKey, message []byte, sig []byte) bool"},
+		{"VerifyWithOptions", Func, 20, "func(publicKey PublicKey, message []byte, sig []byte, opts *Options) error"},
+	},
+	"crypto/elliptic": {
+		{"(*CurveParams).Add", Method, 0, ""},
+		{"(*CurveParams).Double", Method, 0, ""},
+		{"(*CurveParams).IsOnCurve", Method, 0, ""},
+		{"(*CurveParams).Params", Method, 0, ""},
+		{"(*CurveParams).ScalarBaseMult", Method, 0, ""},
+		{"(*CurveParams).ScalarMult", Method, 0, ""},
+		{"Curve", Type, 0, ""},
+		{"CurveParams", Type, 0, ""},
+		{"CurveParams.B", Field, 0, ""},
+		{"CurveParams.BitSize", Field, 0, ""},
+		{"CurveParams.Gx", Field, 0, ""},
+		{"CurveParams.Gy", Field, 0, ""},
+		{"CurveParams.N", Field, 0, ""},
+		{"CurveParams.Name", Field, 5, ""},
+		{"CurveParams.P", Field, 0, ""},
+		{"GenerateKey", Func, 0, "func(curve Curve, rand io.Reader) (priv []byte, x *big.Int, y *big.Int, err error)"},
+		{"Marshal", Func, 0, "func(curve Curve, x *big.Int, y *big.Int) []byte"},
+		{"MarshalCompressed", Func, 15, "func(curve Curve, x *big.Int, y *big.Int) []byte"},
+		{"P224", Func, 0, "func() Curve"},
+		{"P256", Func, 0, "func() Curve"},
+		{"P384", Func, 0, "func() Curve"},
+		{"P521", Func, 0, "func() Curve"},
+		{"Unmarshal", Func, 0, "func(curve Curve, data []byte) (x *big.Int, y *big.Int)"},
+		{"UnmarshalCompressed", Func, 15, "func(curve Curve, data []byte) (x *big.Int, y *big.Int)"},
+	},
+	"crypto/fips140": {
+		{"Enabled", Func, 24, "func() bool"},
+	},
+	"crypto/hkdf": {
+		{"Expand", Func, 24, "func[H hash.Hash](h func() H, pseudorandomKey []byte, info string, keyLength int) ([]byte, error)"},
+		{"Extract", Func, 24, "func[H hash.Hash](h func() H, secret []byte, salt []byte) ([]byte, error)"},
+		{"Key", Func, 24, "func[Hash hash.Hash](h func() Hash, secret []byte, salt []byte, info string, keyLength int) ([]byte, error)"},
+	},
+	"crypto/hmac": {
+		{"Equal", Func, 1, "func(mac1 []byte, mac2 []byte) bool"},
+		{"New", Func, 0, "func(h func() hash.Hash, key []byte) hash.Hash"},
+	},
+	"crypto/md5": {
+		{"BlockSize", Const, 0, ""},
+		{"New", Func, 0, "func() hash.Hash"},
+		{"Size", Const, 0, ""},
+		{"Sum", Func, 2, "func(data []byte) [16]byte"},
+	},
+	"crypto/mlkem": {
+		{"(*DecapsulationKey1024).Bytes", Method, 24, ""},
+		{"(*DecapsulationKey1024).Decapsulate", Method, 24, ""},
+		{"(*DecapsulationKey1024).EncapsulationKey", Method, 24, ""},
+		{"(*DecapsulationKey768).Bytes", Method, 24, ""},
+		{"(*DecapsulationKey768).Decapsulate", Method, 24, ""},
+		{"(*DecapsulationKey768).EncapsulationKey", Method, 24, ""},
+		{"(*EncapsulationKey1024).Bytes", Method, 24, ""},
+		{"(*EncapsulationKey1024).Encapsulate", Method, 24, ""},
+		{"(*EncapsulationKey768).Bytes", Method, 24, ""},
+		{"(*EncapsulationKey768).Encapsulate", Method, 24, ""},
+		{"CiphertextSize1024", Const, 24, ""},
+		{"CiphertextSize768", Const, 24, ""},
+		{"DecapsulationKey1024", Type, 24, ""},
+		{"DecapsulationKey768", Type, 24, ""},
+		{"EncapsulationKey1024", Type, 24, ""},
+		{"EncapsulationKey768", Type, 24, ""},
+		{"EncapsulationKeySize1024", Const, 24, ""},
+		{"EncapsulationKeySize768", Const, 24, ""},
+		{"GenerateKey1024", Func, 24, "func() (*DecapsulationKey1024, error)"},
+		{"GenerateKey768", Func, 24, "func() (*DecapsulationKey768, error)"},
+		{"NewDecapsulationKey1024", Func, 24, "func(seed []byte) (*DecapsulationKey1024, error)"},
+		{"NewDecapsulationKey768", Func, 24, "func(seed []byte) (*DecapsulationKey768, error)"},
+		{"NewEncapsulationKey1024", Func, 24, "func(encapsulationKey []byte) (*EncapsulationKey1024, error)"},
+		{"NewEncapsulationKey768", Func, 24, "func(encapsulationKey []byte) (*EncapsulationKey768, error)"},
+		{"SeedSize", Const, 24, ""},
+		{"SharedKeySize", Const, 24, ""},
+	},
+	"crypto/pbkdf2": {
+		{"Key", Func, 24, "func[Hash hash.Hash](h func() Hash, password string, salt []byte, iter int, keyLength int) ([]byte, error)"},
+	},
+	"crypto/rand": {
+		{"Int", Func, 0, "func(rand io.Reader, max *big.Int) (n *big.Int, err error)"},
+		{"Prime", Func, 0, "func(rand io.Reader, bits int) (*big.Int, error)"},
+		{"Read", Func, 0, "func(b []byte) (n int, err error)"},
+		{"Reader", Var, 0, ""},
+		{"Text", Func, 24, "func() string"},
+	},
+	"crypto/rc4": {
+		{"(*Cipher).Reset", Method, 0, ""},
+		{"(*Cipher).XORKeyStream", Method, 0, ""},
+		{"(KeySizeError).Error", Method, 0, ""},
+		{"Cipher", Type, 0, ""},
+		{"KeySizeError", Type, 0, ""},
+		{"NewCipher", Func, 0, "func(key []byte) (*Cipher, error)"},
+	},
+	"crypto/rsa": {
+		{"(*PSSOptions).HashFunc", Method, 4, ""},
+		{"(*PrivateKey).Decrypt", Method, 5, ""},
+		{"(*PrivateKey).Equal", Method, 15, ""},
+		{"(*PrivateKey).Precompute", Method, 0, ""},
+		{"(*PrivateKey).Public", Method, 4, ""},
+		{"(*PrivateKey).Sign", Method, 4, ""},
+		{"(*PrivateKey).Size", Method, 11, ""},
+		{"(*PrivateKey).Validate", Method, 0, ""},
+		{"(*PublicKey).Equal", Method, 15, ""},
+		{"(*PublicKey).Size", Method, 11, ""},
+		{"CRTValue", Type, 0, ""},
+		{"CRTValue.Coeff", Field, 0, ""},
+		{"CRTValue.Exp", Field, 0, ""},
+		{"CRTValue.R", Field, 0, ""},
+		{"DecryptOAEP", Func, 0, "func(hash hash.Hash, random io.Reader, priv *PrivateKey, ciphertext []byte, label []byte) ([]byte, error)"},
+		{"DecryptPKCS1v15", Func, 0, "func(random io.Reader, priv *PrivateKey, ciphertext []byte) ([]byte, error)"},
+		{"DecryptPKCS1v15SessionKey", Func, 0, "func(random io.Reader, priv *PrivateKey, ciphertext []byte, key []byte) error"},
+		{"EncryptOAEP", Func, 0, "func(hash hash.Hash, random io.Reader, pub *PublicKey, msg []byte, label []byte) ([]byte, error)"},
+		{"EncryptPKCS1v15", Func, 0, "func(random io.Reader, pub *PublicKey, msg []byte) ([]byte, error)"},
+		{"ErrDecryption", Var, 0, ""},
+		{"ErrMessageTooLong", Var, 0, ""},
+		{"ErrVerification", Var, 0, ""},
+		{"GenerateKey", Func, 0, "func(random io.Reader, bits int) (*PrivateKey, error)"},
+		{"GenerateMultiPrimeKey", Func, 0, "func(random io.Reader, nprimes int, bits int) (*PrivateKey, error)"},
+		{"OAEPOptions", Type, 5, ""},
+		{"OAEPOptions.Hash", Field, 5, ""},
+		{"OAEPOptions.Label", Field, 5, ""},
+		{"OAEPOptions.MGFHash", Field, 20, ""},
+		{"PKCS1v15DecryptOptions", Type, 5, ""},
+		{"PKCS1v15DecryptOptions.SessionKeyLen", Field, 5, ""},
+		{"PSSOptions", Type, 2, ""},
+		{"PSSOptions.Hash", Field, 4, ""},
+		{"PSSOptions.SaltLength", Field, 2, ""},
+		{"PSSSaltLengthAuto", Const, 2, ""},
+		{"PSSSaltLengthEqualsHash", Const, 2, ""},
+		{"PrecomputedValues", Type, 0, ""},
+		{"PrecomputedValues.CRTValues", Field, 0, ""},
+		{"PrecomputedValues.Dp", Field, 0, ""},
+		{"PrecomputedValues.Dq", Field, 0, ""},
+		{"PrecomputedValues.Qinv", Field, 0, ""},
+		{"PrivateKey", Type, 0, ""},
+		{"PrivateKey.D", Field, 0, ""},
+		{"PrivateKey.Precomputed", Field, 0, ""},
+		{"PrivateKey.Primes", Field, 0, ""},
+		{"PrivateKey.PublicKey", Field, 0, ""},
+		{"PublicKey", Type, 0, ""},
+		{"PublicKey.E", Field, 0, ""},
+		{"PublicKey.N", Field, 0, ""},
+		{"SignPKCS1v15", Func, 0, "func(random io.Reader, priv *PrivateKey, hash crypto.Hash, hashed []byte) ([]byte, error)"},
+		{"SignPSS", Func, 2, "func(rand io.Reader, priv *PrivateKey, hash crypto.Hash, digest []byte, opts *PSSOptions) ([]byte, error)"},
+		{"VerifyPKCS1v15", Func, 0, "func(pub *PublicKey, hash crypto.Hash, hashed []byte, sig []byte) error"},
+		{"VerifyPSS", Func, 2, "func(pub *PublicKey, hash crypto.Hash, digest []byte, sig []byte, opts *PSSOptions) error"},
+	},
+	"crypto/sha1": {
+		{"BlockSize", Const, 0, ""},
+		{"New", Func, 0, "func() hash.Hash"},
+		{"Size", Const, 0, ""},
+		{"Sum", Func, 2, "func(data []byte) [20]byte"},
+	},
+	"crypto/sha256": {
+		{"BlockSize", Const, 0, ""},
+		{"New", Func, 0, "func() hash.Hash"},
+		{"New224", Func, 0, "func() hash.Hash"},
+		{"Size", Const, 0, ""},
+		{"Size224", Const, 0, ""},
+		{"Sum224", Func, 2, "func(data []byte) [28]byte"},
+		{"Sum256", Func, 2, "func(data []byte) [32]byte"},
+	},
+	"crypto/sha3": {
+		{"(*SHA3).AppendBinary", Method, 24, ""},
+		{"(*SHA3).BlockSize", Method, 24, ""},
+		{"(*SHA3).Clone", Method, 25, ""},
+		{"(*SHA3).MarshalBinary", Method, 24, ""},
+		{"(*SHA3).Reset", Method, 24, ""},
+		{"(*SHA3).Size", Method, 24, ""},
+		{"(*SHA3).Sum", Method, 24, ""},
+		{"(*SHA3).UnmarshalBinary", Method, 24, ""},
+		{"(*SHA3).Write", Method, 24, ""},
+		{"(*SHAKE).AppendBinary", Method, 24, ""},
+		{"(*SHAKE).BlockSize", Method, 24, ""},
+		{"(*SHAKE).MarshalBinary", Method, 24, ""},
+		{"(*SHAKE).Read", Method, 24, ""},
+		{"(*SHAKE).Reset", Method, 24, ""},
+		{"(*SHAKE).UnmarshalBinary", Method, 24, ""},
+		{"(*SHAKE).Write", Method, 24, ""},
+		{"New224", Func, 24, "func() *SHA3"},
+		{"New256", Func, 24, "func() *SHA3"},
+		{"New384", Func, 24, "func() *SHA3"},
+		{"New512", Func, 24, "func() *SHA3"},
+		{"NewCSHAKE128", Func, 24, "func(N []byte, S []byte) *SHAKE"},
+		{"NewCSHAKE256", Func, 24, "func(N []byte, S []byte) *SHAKE"},
+		{"NewSHAKE128", Func, 24, "func() *SHAKE"},
+		{"NewSHAKE256", Func, 24, "func() *SHAKE"},
+		{"SHA3", Type, 24, ""},
+		{"SHAKE", Type, 24, ""},
+		{"Sum224", Func, 24, "func(data []byte) [28]byte"},
+		{"Sum256", Func, 24, "func(data []byte) [32]byte"},
+		{"Sum384", Func, 24, "func(data []byte) [48]byte"},
+		{"Sum512", Func, 24, "func(data []byte) [64]byte"},
+		{"SumSHAKE128", Func, 24, "func(data []byte, length int) []byte"},
+		{"SumSHAKE256", Func, 24, "func(data []byte, length int) []byte"},
+	},
+	"crypto/sha512": {
+		{"BlockSize", Const, 0, ""},
+		{"New", Func, 0, "func() hash.Hash"},
+		{"New384", Func, 0, "func() hash.Hash"},
+		{"New512_224", Func, 5, "func() hash.Hash"},
+		{"New512_256", Func, 5, "func() hash.Hash"},
+		{"Size", Const, 0, ""},
+		{"Size224", Const, 5, ""},
+		{"Size256", Const, 5, ""},
+		{"Size384", Const, 0, ""},
+		{"Sum384", Func, 2, "func(data []byte) [48]byte"},
+		{"Sum512", Func, 2, "func(data []byte) [64]byte"},
+		{"Sum512_224", Func, 5, "func(data []byte) [28]byte"},
+		{"Sum512_256", Func, 5, "func(data []byte) [32]byte"},
+	},
+	"crypto/subtle": {
+		{"ConstantTimeByteEq", Func, 0, "func(x uint8, y uint8) int"},
+		{"ConstantTimeCompare", Func, 0, "func(x []byte, y []byte) int"},
+		{"ConstantTimeCopy", Func, 0, "func(v int, x []byte, y []byte)"},
+		{"ConstantTimeEq", Func, 0, "func(x int32, y int32) int"},
+		{"ConstantTimeLessOrEq", Func, 2, "func(x int, y int) int"},
+		{"ConstantTimeSelect", Func, 0, "func(v int, x int, y int) int"},
+		{"WithDataIndependentTiming", Func, 24, "func(f func())"},
+		{"XORBytes", Func, 20, "func(dst []byte, x []byte, y []byte) int"},
+	},
+	"crypto/tls": {
+		{"(*CertificateRequestInfo).Context", Method, 17, ""},
+		{"(*CertificateRequestInfo).SupportsCertificate", Method, 14, ""},
+		{"(*CertificateVerificationError).Error", Method, 20, ""},
+		{"(*CertificateVerificationError).Unwrap", Method, 20, ""},
+		{"(*ClientHelloInfo).Context", Method, 17, ""},
+		{"(*ClientHelloInfo).SupportsCertificate", Method, 14, ""},
+		{"(*ClientSessionState).ResumptionState", Method, 21, ""},
+		{"(*Config).BuildNameToCertificate", Method, 0, ""},
+		{"(*Config).Clone", Method, 8, ""},
+		{"(*Config).DecryptTicket", Method, 21, ""},
+		{"(*Config).EncryptTicket", Method, 21, ""},
+		{"(*Config).SetSessionTicketKeys", Method, 5, ""},
+		{"(*Conn).Close", Method, 0, ""},
+		{"(*Conn).CloseWrite", Method, 8, ""},
+		{"(*Conn).ConnectionState", Method, 0, ""},
+		{"(*Conn).Handshake", Method, 0, ""},
+		{"(*Conn).HandshakeContext", Method, 17, ""},
+		{"(*Conn).LocalAddr", Method, 0, ""},
+		{"(*Conn).NetConn", Method, 18, ""},
+		{"(*Conn).OCSPResponse", Method, 0, ""},
+		{"(*Conn).Read", Method, 0, ""},
+		{"(*Conn).RemoteAddr", Method, 0, ""},
+		{"(*Conn).SetDeadline", Method, 0, ""},
+		{"(*Conn).SetReadDeadline", Method, 0, ""},
+		{"(*Conn).SetWriteDeadline", Method, 0, ""},
+		{"(*Conn).VerifyHostname", Method, 0, ""},
+		{"(*Conn).Write", Method, 0, ""},
+		{"(*ConnectionState).ExportKeyingMaterial", Method, 11, ""},
+		{"(*Dialer).Dial", Method, 15, ""},
+		{"(*Dialer).DialContext", Method, 15, ""},
+		{"(*ECHRejectionError).Error", Method, 23, ""},
+		{"(*QUICConn).Close", Method, 21, ""},
+		{"(*QUICConn).ConnectionState", Method, 21, ""},
+		{"(*QUICConn).HandleData", Method, 21, ""},
+		{"(*QUICConn).NextEvent", Method, 21, ""},
+		{"(*QUICConn).SendSessionTicket", Method, 21, ""},
+		{"(*QUICConn).SetTransportParameters", Method, 21, ""},
+		{"(*QUICConn).Start", Method, 21, ""},
+		{"(*QUICConn).StoreSession", Method, 23, ""},
+		{"(*SessionState).Bytes", Method, 21, ""},
+		{"(AlertError).Error", Method, 21, ""},
+		{"(ClientAuthType).String", Method, 15, ""},
+		{"(CurveID).String", Method, 15, ""},
+		{"(QUICEncryptionLevel).String", Method, 21, ""},
+		{"(RecordHeaderError).Error", Method, 6, ""},
+		{"(SignatureScheme).String", Method, 15, ""},
+		{"AlertError", Type, 21, ""},
+		{"Certificate", Type, 0, ""},
+		{"Certificate.Certificate", Field, 0, ""},
+		{"Certificate.Leaf", Field, 0, ""},
+		{"Certificate.OCSPStaple", Field, 0, ""},
+		{"Certificate.PrivateKey", Field, 0, ""},
+		{"Certificate.SignedCertificateTimestamps", Field, 5, ""},
+		{"Certificate.SupportedSignatureAlgorithms", Field, 14, ""},
+		{"CertificateRequestInfo", Type, 8, ""},
+		{"CertificateRequestInfo.AcceptableCAs", Field, 8, ""},
+		{"CertificateRequestInfo.SignatureSchemes", Field, 8, ""},
+		{"CertificateRequestInfo.Version", Field, 14, ""},
+		{"CertificateVerificationError", Type, 20, ""},
+		{"CertificateVerificationError.Err", Field, 20, ""},
+		{"CertificateVerificationError.UnverifiedCertificates", Field, 20, ""},
+		{"CipherSuite", Type, 14, ""},
+		{"CipherSuite.ID", Field, 14, ""},
+		{"CipherSuite.Insecure", Field, 14, ""},
+		{"CipherSuite.Name", Field, 14, ""},
+		{"CipherSuite.SupportedVersions", Field, 14, ""},
+		{"CipherSuiteName", Func, 14, "func(id uint16) string"},
+		{"CipherSuites", Func, 14, "func() []*CipherSuite"},
+		{"Client", Func, 0, "func(conn net.Conn, config *Config) *Conn"},
+		{"ClientAuthType", Type, 0, ""},
+		{"ClientHelloInfo", Type, 4, ""},
+		{"ClientHelloInfo.CipherSuites", Field, 4, ""},
+		{"ClientHelloInfo.Conn", Field, 8, ""},
+		{"ClientHelloInfo.Extensions", Field, 24, ""},
+		{"ClientHelloInfo.ServerName", Field, 4, ""},
+		{"ClientHelloInfo.SignatureSchemes", Field, 8, ""},
+		{"ClientHelloInfo.SupportedCurves", Field, 4, ""},
+		{"ClientHelloInfo.SupportedPoints", Field, 4, ""},
+		{"ClientHelloInfo.SupportedProtos", Field, 8, ""},
+		{"ClientHelloInfo.SupportedVersions", Field, 8, ""},
+		{"ClientSessionCache", Type, 3, ""},
+		{"ClientSessionState", Type, 3, ""},
+		{"Config", Type, 0, ""},
+		{"Config.Certificates", Field, 0, ""},
+		{"Config.CipherSuites", Field, 0, ""},
+		{"Config.ClientAuth", Field, 0, ""},
+		{"Config.ClientCAs", Field, 0, ""},
+		{"Config.ClientSessionCache", Field, 3, ""},
+		{"Config.CurvePreferences", Field, 3, ""},
+		{"Config.DynamicRecordSizingDisabled", Field, 7, ""},
+		{"Config.EncryptedClientHelloConfigList", Field, 23, ""},
+		{"Config.EncryptedClientHelloKeys", Field, 24, ""},
+		{"Config.EncryptedClientHelloRejectionVerify", Field, 23, ""},
+		{"Config.GetCertificate", Field, 4, ""},
+		{"Config.GetClientCertificate", Field, 8, ""},
+		{"Config.GetConfigForClient", Field, 8, ""},
+		{"Config.GetEncryptedClientHelloKeys", Field, 25, ""},
+		{"Config.InsecureSkipVerify", Field, 0, ""},
+		{"Config.KeyLogWriter", Field, 8, ""},
+		{"Config.MaxVersion", Field, 2, ""},
+		{"Config.MinVersion", Field, 2, ""},
+		{"Config.NameToCertificate", Field, 0, ""},
+		{"Config.NextProtos", Field, 0, ""},
+		{"Config.PreferServerCipherSuites", Field, 1, ""},
+		{"Config.Rand", Field, 0, ""},
+		{"Config.Renegotiation", Field, 7, ""},
+		{"Config.RootCAs", Field, 0, ""},
+		{"Config.ServerName", Field, 0, ""},
+		{"Config.SessionTicketKey", Field, 1, ""},
+		{"Config.SessionTicketsDisabled", Field, 1, ""},
+		{"Config.Time", Field, 0, ""},
+		{"Config.UnwrapSession", Field, 21, ""},
+		{"Config.VerifyConnection", Field, 15, ""},
+		{"Config.VerifyPeerCertificate", Field, 8, ""},
+		{"Config.WrapSession", Field, 21, ""},
+		{"Conn", Type, 0, ""},
+		{"ConnectionState", Type, 0, ""},
+		{"ConnectionState.CipherSuite", Field, 0, ""},
+		{"ConnectionState.CurveID", Field, 25, ""},
+		{"ConnectionState.DidResume", Field, 1, ""},
+		{"ConnectionState.ECHAccepted", Field, 23, ""},
+		{"ConnectionState.HandshakeComplete", Field, 0, ""},
+		{"ConnectionState.NegotiatedProtocol", Field, 0, ""},
+		{"ConnectionState.NegotiatedProtocolIsMutual", Field, 0, ""},
+		{"ConnectionState.OCSPResponse", Field, 5, ""},
+		{"ConnectionState.PeerCertificates", Field, 0, ""},
+		{"ConnectionState.ServerName", Field, 0, ""},
+		{"ConnectionState.SignedCertificateTimestamps", Field, 5, ""},
+		{"ConnectionState.TLSUnique", Field, 4, ""},
+		{"ConnectionState.VerifiedChains", Field, 0, ""},
+		{"ConnectionState.Version", Field, 3, ""},
+		{"CurveID", Type, 3, ""},
+		{"CurveP256", Const, 3, ""},
+		{"CurveP384", Const, 3, ""},
+		{"CurveP521", Const, 3, ""},
+		{"Dial", Func, 0, "func(network string, addr string, config *Config) (*Conn, error)"},
+		{"DialWithDialer", Func, 3, "func(dialer *net.Dialer, network string, addr string, config *Config) (*Conn, error)"},
+		{"Dialer", Type, 15, ""},
+		{"Dialer.Config", Field, 15, ""},
+		{"Dialer.NetDialer", Field, 15, ""},
+		{"ECDSAWithP256AndSHA256", Const, 8, ""},
+		{"ECDSAWithP384AndSHA384", Const, 8, ""},
+		{"ECDSAWithP521AndSHA512", Const, 8, ""},
+		{"ECDSAWithSHA1", Const, 10, ""},
+		{"ECHRejectionError", Type, 23, ""},
+		{"ECHRejectionError.RetryConfigList", Field, 23, ""},
+		{"Ed25519", Const, 13, ""},
+		{"EncryptedClientHelloKey", Type, 24, ""},
+		{"EncryptedClientHelloKey.Config", Field, 24, ""},
+		{"EncryptedClientHelloKey.PrivateKey", Field, 24, ""},
+		{"EncryptedClientHelloKey.SendAsRetry", Field, 24, ""},
+		{"InsecureCipherSuites", Func, 14, "func() []*CipherSuite"},
+		{"Listen", Func, 0, "func(network string, laddr string, config *Config) (net.Listener, error)"},
+		{"LoadX509KeyPair", Func, 0, "func(certFile string, keyFile string) (Certificate, error)"},
+		{"NewLRUClientSessionCache", Func, 3, "func(capacity int) ClientSessionCache"},
+		{"NewListener", Func, 0, "func(inner net.Listener, config *Config) net.Listener"},
+		{"NewResumptionState", Func, 21, "func(ticket []byte, state *SessionState) (*ClientSessionState, error)"},
+		{"NoClientCert", Const, 0, ""},
+		{"PKCS1WithSHA1", Const, 8, ""},
+		{"PKCS1WithSHA256", Const, 8, ""},
+		{"PKCS1WithSHA384", Const, 8, ""},
+		{"PKCS1WithSHA512", Const, 8, ""},
+		{"PSSWithSHA256", Const, 8, ""},
+		{"PSSWithSHA384", Const, 8, ""},
+		{"PSSWithSHA512", Const, 8, ""},
+		{"ParseSessionState", Func, 21, "func(data []byte) (*SessionState, error)"},
+		{"QUICClient", Func, 21, "func(config *QUICConfig) *QUICConn"},
+		{"QUICConfig", Type, 21, ""},
+		{"QUICConfig.EnableSessionEvents", Field, 23, ""},
+		{"QUICConfig.TLSConfig", Field, 21, ""},
+		{"QUICConn", Type, 21, ""},
+		{"QUICEncryptionLevel", Type, 21, ""},
+		{"QUICEncryptionLevelApplication", Const, 21, ""},
+		{"QUICEncryptionLevelEarly", Const, 21, ""},
+		{"QUICEncryptionLevelHandshake", Const, 21, ""},
+		{"QUICEncryptionLevelInitial", Const, 21, ""},
+		{"QUICEvent", Type, 21, ""},
+		{"QUICEvent.Data", Field, 21, ""},
+		{"QUICEvent.Kind", Field, 21, ""},
+		{"QUICEvent.Level", Field, 21, ""},
+		{"QUICEvent.SessionState", Field, 23, ""},
+		{"QUICEvent.Suite", Field, 21, ""},
+		{"QUICEventKind", Type, 21, ""},
+		{"QUICHandshakeDone", Const, 21, ""},
+		{"QUICNoEvent", Const, 21, ""},
+		{"QUICRejectedEarlyData", Const, 21, ""},
+		{"QUICResumeSession", Const, 23, ""},
+		{"QUICServer", Func, 21, "func(config *QUICConfig) *QUICConn"},
+		{"QUICSessionTicketOptions", Type, 21, ""},
+		{"QUICSessionTicketOptions.EarlyData", Field, 21, ""},
+		{"QUICSessionTicketOptions.Extra", Field, 23, ""},
+		{"QUICSetReadSecret", Const, 21, ""},
+		{"QUICSetWriteSecret", Const, 21, ""},
+		{"QUICStoreSession", Const, 23, ""},
+		{"QUICTransportParameters", Const, 21, ""},
+		{"QUICTransportParametersRequired", Const, 21, ""},
+		{"QUICWriteData", Const, 21, ""},
+		{"RecordHeaderError", Type, 6, ""},
+		{"RecordHeaderError.Conn", Field, 12, ""},
+		{"RecordHeaderError.Msg", Field, 6, ""},
+		{"RecordHeaderError.RecordHeader", Field, 6, ""},
+		{"RenegotiateFreelyAsClient", Const, 7, ""},
+		{"RenegotiateNever", Const, 7, ""},
+		{"RenegotiateOnceAsClient", Const, 7, ""},
+		{"RenegotiationSupport", Type, 7, ""},
+		{"RequestClientCert", Const, 0, ""},
+		{"RequireAndVerifyClientCert", Const, 0, ""},
+		{"RequireAnyClientCert", Const, 0, ""},
+		{"Server", Func, 0, "func(conn net.Conn, config *Config) *Conn"},
+		{"SessionState", Type, 21, ""},
+		{"SessionState.EarlyData", Field, 21, ""},
+		{"SessionState.Extra", Field, 21, ""},
+		{"SignatureScheme", Type, 8, ""},
+		{"TLS_AES_128_GCM_SHA256", Const, 12, ""},
+		{"TLS_AES_256_GCM_SHA384", Const, 12, ""},
+		{"TLS_CHACHA20_POLY1305_SHA256", Const, 12, ""},
+		{"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", Const, 2, ""},
+		{"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", Const, 8, ""},
+		{"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", Const, 2, ""},
+		{"TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", Const, 2, ""},
+		{"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", Const, 5, ""},
+		{"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", Const, 8, ""},
+		{"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", Const, 14, ""},
+		{"TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", Const, 2, ""},
+		{"TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", Const, 0, ""},
+		{"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", Const, 0, ""},
+		{"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", Const, 8, ""},
+		{"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", Const, 2, ""},
+		{"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", Const, 1, ""},
+		{"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", Const, 5, ""},
+		{"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", Const, 8, ""},
+		{"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", Const, 14, ""},
+		{"TLS_ECDHE_RSA_WITH_RC4_128_SHA", Const, 0, ""},
+		{"TLS_FALLBACK_SCSV", Const, 4, ""},
+		{"TLS_RSA_WITH_3DES_EDE_CBC_SHA", Const, 0, ""},
+		{"TLS_RSA_WITH_AES_128_CBC_SHA", Const, 0, ""},
+		{"TLS_RSA_WITH_AES_128_CBC_SHA256", Const, 8, ""},
+		{"TLS_RSA_WITH_AES_128_GCM_SHA256", Const, 6, ""},
+		{"TLS_RSA_WITH_AES_256_CBC_SHA", Const, 1, ""},
+		{"TLS_RSA_WITH_AES_256_GCM_SHA384", Const, 6, ""},
+		{"TLS_RSA_WITH_RC4_128_SHA", Const, 0, ""},
+		{"VerifyClientCertIfGiven", Const, 0, ""},
+		{"VersionName", Func, 21, "func(version uint16) string"},
+		{"VersionSSL30", Const, 2, ""},
+		{"VersionTLS10", Const, 2, ""},
+		{"VersionTLS11", Const, 2, ""},
+		{"VersionTLS12", Const, 2, ""},
+		{"VersionTLS13", Const, 12, ""},
+		{"X25519", Const, 8, ""},
+		{"X25519MLKEM768", Const, 24, ""},
+		{"X509KeyPair", Func, 0, "func(certPEMBlock []byte, keyPEMBlock []byte) (Certificate, error)"},
+	},
+	"crypto/x509": {
+		{"(*CertPool).AddCert", Method, 0, ""},
+		{"(*CertPool).AddCertWithConstraint", Method, 22, ""},
+		{"(*CertPool).AppendCertsFromPEM", Method, 0, ""},
+		{"(*CertPool).Clone", Method, 19, ""},
+		{"(*CertPool).Equal", Method, 19, ""},
+		{"(*CertPool).Subjects", Method, 0, ""},
+		{"(*Certificate).CheckCRLSignature", Method, 0, ""},
+		{"(*Certificate).CheckSignature", Method, 0, ""},
+		{"(*Certificate).CheckSignatureFrom", Method, 0, ""},
+		{"(*Certificate).CreateCRL", Method, 0, ""},
+		{"(*Certificate).Equal", Method, 0, ""},
+		{"(*Certificate).Verify", Method, 0, ""},
+		{"(*Certificate).VerifyHostname", Method, 0, ""},
+		{"(*CertificateRequest).CheckSignature", Method, 5, ""},
+		{"(*OID).UnmarshalBinary", Method, 23, ""},
+		{"(*OID).UnmarshalText", Method, 23, ""},
+		{"(*RevocationList).CheckSignatureFrom", Method, 19, ""},
+		{"(CertificateInvalidError).Error", Method, 0, ""},
+		{"(ConstraintViolationError).Error", Method, 0, ""},
+		{"(HostnameError).Error", Method, 0, ""},
+		{"(InsecureAlgorithmError).Error", Method, 6, ""},
+		{"(OID).AppendBinary", Method, 24, ""},
+		{"(OID).AppendText", Method, 24, ""},
+		{"(OID).Equal", Method, 22, ""},
+		{"(OID).EqualASN1OID", Method, 22, ""},
+		{"(OID).MarshalBinary", Method, 23, ""},
+		{"(OID).MarshalText", Method, 23, ""},
+		{"(OID).String", Method, 22, ""},
+		{"(PublicKeyAlgorithm).String", Method, 10, ""},
+		{"(SignatureAlgorithm).String", Method, 6, ""},
+		{"(SystemRootsError).Error", Method, 1, ""},
+		{"(SystemRootsError).Unwrap", Method, 16, ""},
+		{"(UnhandledCriticalExtension).Error", Method, 0, ""},
+		{"(UnknownAuthorityError).Error", Method, 0, ""},
+		{"CANotAuthorizedForExtKeyUsage", Const, 10, ""},
+		{"CANotAuthorizedForThisName", Const, 0, ""},
+		{"CertPool", Type, 0, ""},
+		{"Certificate", Type, 0, ""},
+		{"Certificate.AuthorityKeyId", Field, 0, ""},
+		{"Certificate.BasicConstraintsValid", Field, 0, ""},
+		{"Certificate.CRLDistributionPoints", Field, 2, ""},
+		{"Certificate.DNSNames", Field, 0, ""},
+		{"Certificate.EmailAddresses", Field, 0, ""},
+		{"Certificate.ExcludedDNSDomains", Field, 9, ""},
+		{"Certificate.ExcludedEmailAddresses", Field, 10, ""},
+		{"Certificate.ExcludedIPRanges", Field, 10, ""},
+		{"Certificate.ExcludedURIDomains", Field, 10, ""},
+		{"Certificate.ExtKeyUsage", Field, 0, ""},
+		{"Certificate.Extensions", Field, 2, ""},
+		{"Certificate.ExtraExtensions", Field, 2, ""},
+		{"Certificate.IPAddresses", Field, 1, ""},
+		{"Certificate.InhibitAnyPolicy", Field, 24, ""},
+		{"Certificate.InhibitAnyPolicyZero", Field, 24, ""},
+		{"Certificate.InhibitPolicyMapping", Field, 24, ""},
+		{"Certificate.InhibitPolicyMappingZero", Field, 24, ""},
+		{"Certificate.IsCA", Field, 0, ""},
+		{"Certificate.Issuer", Field, 0, ""},
+		{"Certificate.IssuingCertificateURL", Field, 2, ""},
+		{"Certificate.KeyUsage", Field, 0, ""},
+		{"Certificate.MaxPathLen", Field, 0, ""},
+		{"Certificate.MaxPathLenZero", Field, 4, ""},
+		{"Certificate.NotAfter", Field, 0, ""},
+		{"Certificate.NotBefore", Field, 0, ""},
+		{"Certificate.OCSPServer", Field, 2, ""},
+		{"Certificate.PermittedDNSDomains", Field, 0, ""},
+		{"Certificate.PermittedDNSDomainsCritical", Field, 0, ""},
+		{"Certificate.PermittedEmailAddresses", Field, 10, ""},
+		{"Certificate.PermittedIPRanges", Field, 10, ""},
+		{"Certificate.PermittedURIDomains", Field, 10, ""},
+		{"Certificate.Policies", Field, 22, ""},
+		{"Certificate.PolicyIdentifiers", Field, 0, ""},
+		{"Certificate.PolicyMappings", Field, 24, ""},
+		{"Certificate.PublicKey", Field, 0, ""},
+		{"Certificate.PublicKeyAlgorithm", Field, 0, ""},
+		{"Certificate.Raw", Field, 0, ""},
+		{"Certificate.RawIssuer", Field, 0, ""},
+		{"Certificate.RawSubject", Field, 0, ""},
+		{"Certificate.RawSubjectPublicKeyInfo", Field, 0, ""},
+		{"Certificate.RawTBSCertificate", Field, 0, ""},
+		{"Certificate.RequireExplicitPolicy", Field, 24, ""},
+		{"Certificate.RequireExplicitPolicyZero", Field, 24, ""},
+		{"Certificate.SerialNumber", Field, 0, ""},
+		{"Certificate.Signature", Field, 0, ""},
+		{"Certificate.SignatureAlgorithm", Field, 0, ""},
+		{"Certificate.Subject", Field, 0, ""},
+		{"Certificate.SubjectKeyId", Field, 0, ""},
+		{"Certificate.URIs", Field, 10, ""},
+		{"Certificate.UnhandledCriticalExtensions", Field, 5, ""},
+		{"Certificate.UnknownExtKeyUsage", Field, 0, ""},
+		{"Certificate.Version", Field, 0, ""},
+		{"CertificateInvalidError", Type, 0, ""},
+		{"CertificateInvalidError.Cert", Field, 0, ""},
+		{"CertificateInvalidError.Detail", Field, 10, ""},
+		{"CertificateInvalidError.Reason", Field, 0, ""},
+		{"CertificateRequest", Type, 3, ""},
+		{"CertificateRequest.Attributes", Field, 3, ""},
+		{"CertificateRequest.DNSNames", Field, 3, ""},
+		{"CertificateRequest.EmailAddresses", Field, 3, ""},
+		{"CertificateRequest.Extensions", Field, 3, ""},
+		{"CertificateRequest.ExtraExtensions", Field, 3, ""},
+		{"CertificateRequest.IPAddresses", Field, 3, ""},
+		{"CertificateRequest.PublicKey", Field, 3, ""},
+		{"CertificateRequest.PublicKeyAlgorithm", Field, 3, ""},
+		{"CertificateRequest.Raw", Field, 3, ""},
+		{"CertificateRequest.RawSubject", Field, 3, ""},
+		{"CertificateRequest.RawSubjectPublicKeyInfo", Field, 3, ""},
+		{"CertificateRequest.RawTBSCertificateRequest", Field, 3, ""},
+		{"CertificateRequest.Signature", Field, 3, ""},
+		{"CertificateRequest.SignatureAlgorithm", Field, 3, ""},
+		{"CertificateRequest.Subject", Field, 3, ""},
+		{"CertificateRequest.URIs", Field, 10, ""},
+		{"CertificateRequest.Version", Field, 3, ""},
+		{"ConstraintViolationError", Type, 0, ""},
+		{"CreateCertificate", Func, 0, "func(rand io.Reader, template *Certificate, parent *Certificate, pub any, priv any) ([]byte, error)"},
+		{"CreateCertificateRequest", Func, 3, "func(rand io.Reader, template *CertificateRequest, priv any) (csr []byte, err error)"},
+		{"CreateRevocationList", Func, 15, "func(rand io.Reader, template *RevocationList, issuer *Certificate, priv crypto.Signer) ([]byte, error)"},
+		{"DSA", Const, 0, ""},
+		{"DSAWithSHA1", Const, 0, ""},
+		{"DSAWithSHA256", Const, 0, ""},
+		{"DecryptPEMBlock", Func, 1, "func(b *pem.Block, password []byte) ([]byte, error)"},
+		{"ECDSA", Const, 1, ""},
+		{"ECDSAWithSHA1", Const, 1, ""},
+		{"ECDSAWithSHA256", Const, 1, ""},
+		{"ECDSAWithSHA384", Const, 1, ""},
+		{"ECDSAWithSHA512", Const, 1, ""},
+		{"Ed25519", Const, 13, ""},
+		{"EncryptPEMBlock", Func, 1, "func(rand io.Reader, blockType string, data []byte, password []byte, alg PEMCipher) (*pem.Block, error)"},
+		{"ErrUnsupportedAlgorithm", Var, 0, ""},
+		{"Expired", Const, 0, ""},
+		{"ExtKeyUsage", Type, 0, ""},
+		{"ExtKeyUsageAny", Const, 0, ""},
+		{"ExtKeyUsageClientAuth", Const, 0, ""},
+		{"ExtKeyUsageCodeSigning", Const, 0, ""},
+		{"ExtKeyUsageEmailProtection", Const, 0, ""},
+		{"ExtKeyUsageIPSECEndSystem", Const, 1, ""},
+		{"ExtKeyUsageIPSECTunnel", Const, 1, ""},
+		{"ExtKeyUsageIPSECUser", Const, 1, ""},
+		{"ExtKeyUsageMicrosoftCommercialCodeSigning", Const, 10, ""},
+		{"ExtKeyUsageMicrosoftKernelCodeSigning", Const, 10, ""},
+		{"ExtKeyUsageMicrosoftServerGatedCrypto", Const, 1, ""},
+		{"ExtKeyUsageNetscapeServerGatedCrypto", Const, 1, ""},
+		{"ExtKeyUsageOCSPSigning", Const, 0, ""},
+		{"ExtKeyUsageServerAuth", Const, 0, ""},
+		{"ExtKeyUsageTimeStamping", Const, 0, ""},
+		{"HostnameError", Type, 0, ""},
+		{"HostnameError.Certificate", Field, 0, ""},
+		{"HostnameError.Host", Field, 0, ""},
+		{"IncompatibleUsage", Const, 1, ""},
+		{"IncorrectPasswordError", Var, 1, ""},
+		{"InsecureAlgorithmError", Type, 6, ""},
+		{"InvalidReason", Type, 0, ""},
+		{"IsEncryptedPEMBlock", Func, 1, "func(b *pem.Block) bool"},
+		{"KeyUsage", Type, 0, ""},
+		{"KeyUsageCRLSign", Const, 0, ""},
+		{"KeyUsageCertSign", Const, 0, ""},
+		{"KeyUsageContentCommitment", Const, 0, ""},
+		{"KeyUsageDataEncipherment", Const, 0, ""},
+		{"KeyUsageDecipherOnly", Const, 0, ""},
+		{"KeyUsageDigitalSignature", Const, 0, ""},
+		{"KeyUsageEncipherOnly", Const, 0, ""},
+		{"KeyUsageKeyAgreement", Const, 0, ""},
+		{"KeyUsageKeyEncipherment", Const, 0, ""},
+		{"MD2WithRSA", Const, 0, ""},
+		{"MD5WithRSA", Const, 0, ""},
+		{"MarshalECPrivateKey", Func, 2, "func(key *ecdsa.PrivateKey) ([]byte, error)"},
+		{"MarshalPKCS1PrivateKey", Func, 0, "func(key *rsa.PrivateKey) []byte"},
+		{"MarshalPKCS1PublicKey", Func, 10, "func(key *rsa.PublicKey) []byte"},
+		{"MarshalPKCS8PrivateKey", Func, 10, "func(key any) ([]byte, error)"},
+		{"MarshalPKIXPublicKey", Func, 0, "func(pub any) ([]byte, error)"},
+		{"NameConstraintsWithoutSANs", Const, 10, ""},
+		{"NameMismatch", Const, 8, ""},
+		{"NewCertPool", Func, 0, "func() *CertPool"},
+		{"NoValidChains", Const, 24, ""},
+		{"NotAuthorizedToSign", Const, 0, ""},
+		{"OID", Type, 22, ""},
+		{"OIDFromInts", Func, 22, "func(oid []uint64) (OID, error)"},
+		{"PEMCipher", Type, 1, ""},
+		{"PEMCipher3DES", Const, 1, ""},
+		{"PEMCipherAES128", Const, 1, ""},
+		{"PEMCipherAES192", Const, 1, ""},
+		{"PEMCipherAES256", Const, 1, ""},
+		{"PEMCipherDES", Const, 1, ""},
+		{"ParseCRL", Func, 0, "func(crlBytes []byte) (*pkix.CertificateList, error)"},
+		{"ParseCertificate", Func, 0, "func(der []byte) (*Certificate, error)"},
+		{"ParseCertificateRequest", Func, 3, "func(asn1Data []byte) (*CertificateRequest, error)"},
+		{"ParseCertificates", Func, 0, "func(der []byte) ([]*Certificate, error)"},
+		{"ParseDERCRL", Func, 0, "func(derBytes []byte) (*pkix.CertificateList, error)"},
+		{"ParseECPrivateKey", Func, 1, "func(der []byte) (*ecdsa.PrivateKey, error)"},
+		{"ParseOID", Func, 23, "func(oid string) (OID, error)"},
+		{"ParsePKCS1PrivateKey", Func, 0, "func(der []byte) (*rsa.PrivateKey, error)"},
+		{"ParsePKCS1PublicKey", Func, 10, "func(der []byte) (*rsa.PublicKey, error)"},
+		{"ParsePKCS8PrivateKey", Func, 0, "func(der []byte) (key any, err error)"},
+		{"ParsePKIXPublicKey", Func, 0, "func(derBytes []byte) (pub any, err error)"},
+		{"ParseRevocationList", Func, 19, "func(der []byte) (*RevocationList, error)"},
+		{"PolicyMapping", Type, 24, ""},
+		{"PolicyMapping.IssuerDomainPolicy", Field, 24, ""},
+		{"PolicyMapping.SubjectDomainPolicy", Field, 24, ""},
+		{"PublicKeyAlgorithm", Type, 0, ""},
+		{"PureEd25519", Const, 13, ""},
+		{"RSA", Const, 0, ""},
+		{"RevocationList", Type, 15, ""},
+		{"RevocationList.AuthorityKeyId", Field, 19, ""},
+		{"RevocationList.Extensions", Field, 19, ""},
+		{"RevocationList.ExtraExtensions", Field, 15, ""},
+		{"RevocationList.Issuer", Field, 19, ""},
+		{"RevocationList.NextUpdate", Field, 15, ""},
+		{"RevocationList.Number", Field, 15, ""},
+		{"RevocationList.Raw", Field, 19, ""},
+		{"RevocationList.RawIssuer", Field, 19, ""},
+		{"RevocationList.RawTBSRevocationList", Field, 19, ""},
+		{"RevocationList.RevokedCertificateEntries", Field, 21, ""},
+		{"RevocationList.RevokedCertificates", Field, 15, ""},
+		{"RevocationList.Signature", Field, 19, ""},
+		{"RevocationList.SignatureAlgorithm", Field, 15, ""},
+		{"RevocationList.ThisUpdate", Field, 15, ""},
+		{"RevocationListEntry", Type, 21, ""},
+		{"RevocationListEntry.Extensions", Field, 21, ""},
+		{"RevocationListEntry.ExtraExtensions", Field, 21, ""},
+		{"RevocationListEntry.Raw", Field, 21, ""},
+		{"RevocationListEntry.ReasonCode", Field, 21, ""},
+		{"RevocationListEntry.RevocationTime", Field, 21, ""},
+		{"RevocationListEntry.SerialNumber", Field, 21, ""},
+		{"SHA1WithRSA", Const, 0, ""},
+		{"SHA256WithRSA", Const, 0, ""},
+		{"SHA256WithRSAPSS", Const, 8, ""},
+		{"SHA384WithRSA", Const, 0, ""},
+		{"SHA384WithRSAPSS", Const, 8, ""},
+		{"SHA512WithRSA", Const, 0, ""},
+		{"SHA512WithRSAPSS", Const, 8, ""},
+		{"SetFallbackRoots", Func, 20, "func(roots *CertPool)"},
+		{"SignatureAlgorithm", Type, 0, ""},
+		{"SystemCertPool", Func, 7, "func() (*CertPool, error)"},
+		{"SystemRootsError", Type, 1, ""},
+		{"SystemRootsError.Err", Field, 7, ""},
+		{"TooManyConstraints", Const, 10, ""},
+		{"TooManyIntermediates", Const, 0, ""},
+		{"UnconstrainedName", Const, 10, ""},
+		{"UnhandledCriticalExtension", Type, 0, ""},
+		{"UnknownAuthorityError", Type, 0, ""},
+		{"UnknownAuthorityError.Cert", Field, 8, ""},
+		{"UnknownPublicKeyAlgorithm", Const, 0, ""},
+		{"UnknownSignatureAlgorithm", Const, 0, ""},
+		{"VerifyOptions", Type, 0, ""},
+		{"VerifyOptions.CertificatePolicies", Field, 24, ""},
+		{"VerifyOptions.CurrentTime", Field, 0, ""},
+		{"VerifyOptions.DNSName", Field, 0, ""},
+		{"VerifyOptions.Intermediates", Field, 0, ""},
+		{"VerifyOptions.KeyUsages", Field, 1, ""},
+		{"VerifyOptions.MaxConstraintComparisions", Field, 10, ""},
+		{"VerifyOptions.Roots", Field, 0, ""},
+	},
+	"crypto/x509/pkix": {
+		{"(*CertificateList).HasExpired", Method, 0, ""},
+		{"(*Name).FillFromRDNSequence", Method, 0, ""},
+		{"(Name).String", Method, 10, ""},
+		{"(Name).ToRDNSequence", Method, 0, ""},
+		{"(RDNSequence).String", Method, 10, ""},
+		{"AlgorithmIdentifier", Type, 0, ""},
+		{"AlgorithmIdentifier.Algorithm", Field, 0, ""},
+		{"AlgorithmIdentifier.Parameters", Field, 0, ""},
+		{"AttributeTypeAndValue", Type, 0, ""},
+		{"AttributeTypeAndValue.Type", Field, 0, ""},
+		{"AttributeTypeAndValue.Value", Field, 0, ""},
+		{"AttributeTypeAndValueSET", Type, 3, ""},
+		{"AttributeTypeAndValueSET.Type", Field, 3, ""},
+		{"AttributeTypeAndValueSET.Value", Field, 3, ""},
+		{"CertificateList", Type, 0, ""},
+		{"CertificateList.SignatureAlgorithm", Field, 0, ""},
+		{"CertificateList.SignatureValue", Field, 0, ""},
+		{"CertificateList.TBSCertList", Field, 0, ""},
+		{"Extension", Type, 0, ""},
+		{"Extension.Critical", Field, 0, ""},
+		{"Extension.Id", Field, 0, ""},
+		{"Extension.Value", Field, 0, ""},
+		{"Name", Type, 0, ""},
+		{"Name.CommonName", Field, 0, ""},
+		{"Name.Country", Field, 0, ""},
+		{"Name.ExtraNames", Field, 5, ""},
+		{"Name.Locality", Field, 0, ""},
+		{"Name.Names", Field, 0, ""},
+		{"Name.Organization", Field, 0, ""},
+		{"Name.OrganizationalUnit", Field, 0, ""},
+		{"Name.PostalCode", Field, 0, ""},
+		{"Name.Province", Field, 0, ""},
+		{"Name.SerialNumber", Field, 0, ""},
+		{"Name.StreetAddress", Field, 0, ""},
+		{"RDNSequence", Type, 0, ""},
+		{"RelativeDistinguishedNameSET", Type, 0, ""},
+		{"RevokedCertificate", Type, 0, ""},
+		{"RevokedCertificate.Extensions", Field, 0, ""},
+		{"RevokedCertificate.RevocationTime", Field, 0, ""},
+		{"RevokedCertificate.SerialNumber", Field, 0, ""},
+		{"TBSCertificateList", Type, 0, ""},
+		{"TBSCertificateList.Extensions", Field, 0, ""},
+		{"TBSCertificateList.Issuer", Field, 0, ""},
+		{"TBSCertificateList.NextUpdate", Field, 0, ""},
+		{"TBSCertificateList.Raw", Field, 0, ""},
+		{"TBSCertificateList.RevokedCertificates", Field, 0, ""},
+		{"TBSCertificateList.Signature", Field, 0, ""},
+		{"TBSCertificateList.ThisUpdate", Field, 0, ""},
+		{"TBSCertificateList.Version", Field, 0, ""},
+	},
+	"database/sql": {
+		{"(*ColumnType).DatabaseTypeName", Method, 8, ""},
+		{"(*ColumnType).DecimalSize", Method, 8, ""},
+		{"(*ColumnType).Length", Method, 8, ""},
+		{"(*ColumnType).Name", Method, 8, ""},
+		{"(*ColumnType).Nullable", Method, 8, ""},
+		{"(*ColumnType).ScanType", Method, 8, ""},
+		{"(*Conn).BeginTx", Method, 9, ""},
+		{"(*Conn).Close", Method, 9, ""},
+		{"(*Conn).ExecContext", Method, 9, ""},
+		{"(*Conn).PingContext", Method, 9, ""},
+		{"(*Conn).PrepareContext", Method, 9, ""},
+		{"(*Conn).QueryContext", Method, 9, ""},
+		{"(*Conn).QueryRowContext", Method, 9, ""},
+		{"(*Conn).Raw", Method, 13, ""},
+		{"(*DB).Begin", Method, 0, ""},
+		{"(*DB).BeginTx", Method, 8, ""},
+		{"(*DB).Close", Method, 0, ""},
+		{"(*DB).Conn", Method, 9, ""},
+		{"(*DB).Driver", Method, 0, ""},
+		{"(*DB).Exec", Method, 0, ""},
+		{"(*DB).ExecContext", Method, 8, ""},
+		{"(*DB).Ping", Method, 1, ""},
+		{"(*DB).PingContext", Method, 8, ""},
+		{"(*DB).Prepare", Method, 0, ""},
+		{"(*DB).PrepareContext", Method, 8, ""},
+		{"(*DB).Query", Method, 0, ""},
+		{"(*DB).QueryContext", Method, 8, ""},
+		{"(*DB).QueryRow", Method, 0, ""},
+		{"(*DB).QueryRowContext", Method, 8, ""},
+		{"(*DB).SetConnMaxIdleTime", Method, 15, ""},
+		{"(*DB).SetConnMaxLifetime", Method, 6, ""},
+		{"(*DB).SetMaxIdleConns", Method, 1, ""},
+		{"(*DB).SetMaxOpenConns", Method, 2, ""},
+		{"(*DB).Stats", Method, 5, ""},
+		{"(*Null).Scan", Method, 22, ""},
+		{"(*NullBool).Scan", Method, 0, ""},
+		{"(*NullByte).Scan", Method, 17, ""},
+		{"(*NullFloat64).Scan", Method, 0, ""},
+		{"(*NullInt16).Scan", Method, 17, ""},
+		{"(*NullInt32).Scan", Method, 13, ""},
+		{"(*NullInt64).Scan", Method, 0, ""},
+		{"(*NullString).Scan", Method, 0, ""},
+		{"(*NullTime).Scan", Method, 13, ""},
+		{"(*Row).Err", Method, 15, ""},
+		{"(*Row).Scan", Method, 0, ""},
+		{"(*Rows).Close", Method, 0, ""},
+		{"(*Rows).ColumnTypes", Method, 8, ""},
+		{"(*Rows).Columns", Method, 0, ""},
+		{"(*Rows).Err", Method, 0, ""},
+		{"(*Rows).Next", Method, 0, ""},
+		{"(*Rows).NextResultSet", Method, 8, ""},
+		{"(*Rows).Scan", Method, 0, ""},
+		{"(*Stmt).Close", Method, 0, ""},
+		{"(*Stmt).Exec", Method, 0, ""},
+		{"(*Stmt).ExecContext", Method, 8, ""},
+		{"(*Stmt).Query", Method, 0, ""},
+		{"(*Stmt).QueryContext", Method, 8, ""},
+		{"(*Stmt).QueryRow", Method, 0, ""},
+		{"(*Stmt).QueryRowContext", Method, 8, ""},
+		{"(*Tx).Commit", Method, 0, ""},
+		{"(*Tx).Exec", Method, 0, ""},
+		{"(*Tx).ExecContext", Method, 8, ""},
+		{"(*Tx).Prepare", Method, 0, ""},
+		{"(*Tx).PrepareContext", Method, 8, ""},
+		{"(*Tx).Query", Method, 0, ""},
+		{"(*Tx).QueryContext", Method, 8, ""},
+		{"(*Tx).QueryRow", Method, 0, ""},
+		{"(*Tx).QueryRowContext", Method, 8, ""},
+		{"(*Tx).Rollback", Method, 0, ""},
+		{"(*Tx).Stmt", Method, 0, ""},
+		{"(*Tx).StmtContext", Method, 8, ""},
+		{"(IsolationLevel).String", Method, 11, ""},
+		{"(Null).Value", Method, 22, ""},
+		{"(NullBool).Value", Method, 0, ""},
+		{"(NullByte).Value", Method, 17, ""},
+		{"(NullFloat64).Value", Method, 0, ""},
+		{"(NullInt16).Value", Method, 17, ""},
+		{"(NullInt32).Value", Method, 13, ""},
+		{"(NullInt64).Value", Method, 0, ""},
+		{"(NullString).Value", Method, 0, ""},
+		{"(NullTime).Value", Method, 13, ""},
+		{"ColumnType", Type, 8, ""},
+		{"Conn", Type, 9, ""},
+		{"DB", Type, 0, ""},
+		{"DBStats", Type, 5, ""},
+		{"DBStats.Idle", Field, 11, ""},
+		{"DBStats.InUse", Field, 11, ""},
+		{"DBStats.MaxIdleClosed", Field, 11, ""},
+		{"DBStats.MaxIdleTimeClosed", Field, 15, ""},
+		{"DBStats.MaxLifetimeClosed", Field, 11, ""},
+		{"DBStats.MaxOpenConnections", Field, 11, ""},
+		{"DBStats.OpenConnections", Field, 5, ""},
+		{"DBStats.WaitCount", Field, 11, ""},
+		{"DBStats.WaitDuration", Field, 11, ""},
+		{"Drivers", Func, 4, "func() []string"},
+		{"ErrConnDone", Var, 9, ""},
+		{"ErrNoRows", Var, 0, ""},
+		{"ErrTxDone", Var, 0, ""},
+		{"IsolationLevel", Type, 8, ""},
+		{"LevelDefault", Const, 8, ""},
+		{"LevelLinearizable", Const, 8, ""},
+		{"LevelReadCommitted", Const, 8, ""},
+		{"LevelReadUncommitted", Const, 8, ""},
+		{"LevelRepeatableRead", Const, 8, ""},
+		{"LevelSerializable", Const, 8, ""},
+		{"LevelSnapshot", Const, 8, ""},
+		{"LevelWriteCommitted", Const, 8, ""},
+		{"Named", Func, 8, "func(name string, value any) NamedArg"},
+		{"NamedArg", Type, 8, ""},
+		{"NamedArg.Name", Field, 8, ""},
+		{"NamedArg.Value", Field, 8, ""},
+		{"Null", Type, 22, ""},
+		{"Null.V", Field, 22, ""},
+		{"Null.Valid", Field, 22, ""},
+		{"NullBool", Type, 0, ""},
+		{"NullBool.Bool", Field, 0, ""},
+		{"NullBool.Valid", Field, 0, ""},
+		{"NullByte", Type, 17, ""},
+		{"NullByte.Byte", Field, 17, ""},
+		{"NullByte.Valid", Field, 17, ""},
+		{"NullFloat64", Type, 0, ""},
+		{"NullFloat64.Float64", Field, 0, ""},
+		{"NullFloat64.Valid", Field, 0, ""},
+		{"NullInt16", Type, 17, ""},
+		{"NullInt16.Int16", Field, 17, ""},
+		{"NullInt16.Valid", Field, 17, ""},
+		{"NullInt32", Type, 13, ""},
+		{"NullInt32.Int32", Field, 13, ""},
+		{"NullInt32.Valid", Field, 13, ""},
+		{"NullInt64", Type, 0, ""},
+		{"NullInt64.Int64", Field, 0, ""},
+		{"NullInt64.Valid", Field, 0, ""},
+		{"NullString", Type, 0, ""},
+		{"NullString.String", Field, 0, ""},
+		{"NullString.Valid", Field, 0, ""},
+		{"NullTime", Type, 13, ""},
+		{"NullTime.Time", Field, 13, ""},
+		{"NullTime.Valid", Field, 13, ""},
+		{"Open", Func, 0, "func(driverName string, dataSourceName string) (*DB, error)"},
+		{"OpenDB", Func, 10, "func(c driver.Connector) *DB"},
+		{"Out", Type, 9, ""},
+		{"Out.Dest", Field, 9, ""},
+		{"Out.In", Field, 9, ""},
+		{"RawBytes", Type, 0, ""},
+		{"Register", Func, 0, "func(name string, driver driver.Driver)"},
+		{"Result", Type, 0, ""},
+		{"Row", Type, 0, ""},
+		{"Rows", Type, 0, ""},
+		{"Scanner", Type, 0, ""},
+		{"Stmt", Type, 0, ""},
+		{"Tx", Type, 0, ""},
+		{"TxOptions", Type, 8, ""},
+		{"TxOptions.Isolation", Field, 8, ""},
+		{"TxOptions.ReadOnly", Field, 8, ""},
+	},
+	"database/sql/driver": {
+		{"(NotNull).ConvertValue", Method, 0, ""},
+		{"(Null).ConvertValue", Method, 0, ""},
+		{"(RowsAffected).LastInsertId", Method, 0, ""},
+		{"(RowsAffected).RowsAffected", Method, 0, ""},
+		{"Bool", Var, 0, ""},
+		{"ColumnConverter", Type, 0, ""},
+		{"Conn", Type, 0, ""},
+		{"ConnBeginTx", Type, 8, ""},
+		{"ConnPrepareContext", Type, 8, ""},
+		{"Connector", Type, 10, ""},
+		{"DefaultParameterConverter", Var, 0, ""},
+		{"Driver", Type, 0, ""},
+		{"DriverContext", Type, 10, ""},
+		{"ErrBadConn", Var, 0, ""},
+		{"ErrRemoveArgument", Var, 9, ""},
+		{"ErrSkip", Var, 0, ""},
+		{"Execer", Type, 0, ""},
+		{"ExecerContext", Type, 8, ""},
+		{"Int32", Var, 0, ""},
+		{"IsScanValue", Func, 0, "func(v any) bool"},
+		{"IsValue", Func, 0, "func(v any) bool"},
+		{"IsolationLevel", Type, 8, ""},
+		{"NamedValue", Type, 8, ""},
+		{"NamedValue.Name", Field, 8, ""},
+		{"NamedValue.Ordinal", Field, 8, ""},
+		{"NamedValue.Value", Field, 8, ""},
+		{"NamedValueChecker", Type, 9, ""},
+		{"NotNull", Type, 0, ""},
+		{"NotNull.Converter", Field, 0, ""},
+		{"Null", Type, 0, ""},
+		{"Null.Converter", Field, 0, ""},
+		{"Pinger", Type, 8, ""},
+		{"Queryer", Type, 1, ""},
+		{"QueryerContext", Type, 8, ""},
+		{"Result", Type, 0, ""},
+		{"ResultNoRows", Var, 0, ""},
+		{"Rows", Type, 0, ""},
+		{"RowsAffected", Type, 0, ""},
+		{"RowsColumnScanner", Type, 26, ""},
+		{"RowsColumnTypeDatabaseTypeName", Type, 8, ""},
+		{"RowsColumnTypeLength", Type, 8, ""},
+		{"RowsColumnTypeNullable", Type, 8, ""},
+		{"RowsColumnTypePrecisionScale", Type, 8, ""},
+		{"RowsColumnTypeScanType", Type, 8, ""},
+		{"RowsNextResultSet", Type, 8, ""},
+		{"SessionResetter", Type, 10, ""},
+		{"Stmt", Type, 0, ""},
+		{"StmtExecContext", Type, 8, ""},
+		{"StmtQueryContext", Type, 8, ""},
+		{"String", Var, 0, ""},
+		{"Tx", Type, 0, ""},
+		{"TxOptions", Type, 8, ""},
+		{"TxOptions.Isolation", Field, 8, ""},
+		{"TxOptions.ReadOnly", Field, 8, ""},
+		{"Validator", Type, 15, ""},
+		{"Value", Type, 0, ""},
+		{"ValueConverter", Type, 0, ""},
+		{"Valuer", Type, 0, ""},
+	},
+	"debug/buildinfo": {
+		{"BuildInfo", Type, 18, ""},
+		{"Read", Func, 18, "func(r io.ReaderAt) (*BuildInfo, error)"},
+		{"ReadFile", Func, 18, "func(name string) (info *BuildInfo, err error)"},
+	},
+	"debug/dwarf": {
+		{"(*AddrType).Basic", Method, 0, ""},
+		{"(*AddrType).Common", Method, 0, ""},
+		{"(*AddrType).Size", Method, 0, ""},
+		{"(*AddrType).String", Method, 0, ""},
+		{"(*ArrayType).Common", Method, 0, ""},
+		{"(*ArrayType).Size", Method, 0, ""},
+		{"(*ArrayType).String", Method, 0, ""},
+		{"(*BasicType).Basic", Method, 0, ""},
+		{"(*BasicType).Common", Method, 0, ""},
+		{"(*BasicType).Size", Method, 0, ""},
+		{"(*BasicType).String", Method, 0, ""},
+		{"(*BoolType).Basic", Method, 0, ""},
+		{"(*BoolType).Common", Method, 0, ""},
+		{"(*BoolType).Size", Method, 0, ""},
+		{"(*BoolType).String", Method, 0, ""},
+		{"(*CharType).Basic", Method, 0, ""},
+		{"(*CharType).Common", Method, 0, ""},
+		{"(*CharType).Size", Method, 0, ""},
+		{"(*CharType).String", Method, 0, ""},
+		{"(*CommonType).Common", Method, 0, ""},
+		{"(*CommonType).Size", Method, 0, ""},
+		{"(*ComplexType).Basic", Method, 0, ""},
+		{"(*ComplexType).Common", Method, 0, ""},
+		{"(*ComplexType).Size", Method, 0, ""},
+		{"(*ComplexType).String", Method, 0, ""},
+		{"(*Data).AddSection", Method, 14, ""},
+		{"(*Data).AddTypes", Method, 3, ""},
+		{"(*Data).LineReader", Method, 5, ""},
+		{"(*Data).Ranges", Method, 7, ""},
+		{"(*Data).Reader", Method, 0, ""},
+		{"(*Data).Type", Method, 0, ""},
+		{"(*DotDotDotType).Common", Method, 0, ""},
+		{"(*DotDotDotType).Size", Method, 0, ""},
+		{"(*DotDotDotType).String", Method, 0, ""},
+		{"(*Entry).AttrField", Method, 5, ""},
+		{"(*Entry).Val", Method, 0, ""},
+		{"(*EnumType).Common", Method, 0, ""},
+		{"(*EnumType).Size", Method, 0, ""},
+		{"(*EnumType).String", Method, 0, ""},
+		{"(*FloatType).Basic", Method, 0, ""},
+		{"(*FloatType).Common", Method, 0, ""},
+		{"(*FloatType).Size", Method, 0, ""},
+		{"(*FloatType).String", Method, 0, ""},
+		{"(*FuncType).Common", Method, 0, ""},
+		{"(*FuncType).Size", Method, 0, ""},
+		{"(*FuncType).String", Method, 0, ""},
+		{"(*IntType).Basic", Method, 0, ""},
+		{"(*IntType).Common", Method, 0, ""},
+		{"(*IntType).Size", Method, 0, ""},
+		{"(*IntType).String", Method, 0, ""},
+		{"(*LineReader).Files", Method, 14, ""},
+		{"(*LineReader).Next", Method, 5, ""},
+		{"(*LineReader).Reset", Method, 5, ""},
+		{"(*LineReader).Seek", Method, 5, ""},
+		{"(*LineReader).SeekPC", Method, 5, ""},
+		{"(*LineReader).Tell", Method, 5, ""},
+		{"(*PtrType).Common", Method, 0, ""},
+		{"(*PtrType).Size", Method, 0, ""},
+		{"(*PtrType).String", Method, 0, ""},
+		{"(*QualType).Common", Method, 0, ""},
+		{"(*QualType).Size", Method, 0, ""},
+		{"(*QualType).String", Method, 0, ""},
+		{"(*Reader).AddressSize", Method, 5, ""},
+		{"(*Reader).ByteOrder", Method, 14, ""},
+		{"(*Reader).Next", Method, 0, ""},
+		{"(*Reader).Seek", Method, 0, ""},
+		{"(*Reader).SeekPC", Method, 7, ""},
+		{"(*Reader).SkipChildren", Method, 0, ""},
+		{"(*StructType).Common", Method, 0, ""},
+		{"(*StructType).Defn", Method, 0, ""},
+		{"(*StructType).Size", Method, 0, ""},
+		{"(*StructType).String", Method, 0, ""},
+		{"(*TypedefType).Common", Method, 0, ""},
+		{"(*TypedefType).Size", Method, 0, ""},
+		{"(*TypedefType).String", Method, 0, ""},
+		{"(*UcharType).Basic", Method, 0, ""},
+		{"(*UcharType).Common", Method, 0, ""},
+		{"(*UcharType).Size", Method, 0, ""},
+		{"(*UcharType).String", Method, 0, ""},
+		{"(*UintType).Basic", Method, 0, ""},
+		{"(*UintType).Common", Method, 0, ""},
+		{"(*UintType).Size", Method, 0, ""},
+		{"(*UintType).String", Method, 0, ""},
+		{"(*UnspecifiedType).Basic", Method, 4, ""},
+		{"(*UnspecifiedType).Common", Method, 4, ""},
+		{"(*UnspecifiedType).Size", Method, 4, ""},
+		{"(*UnspecifiedType).String", Method, 4, ""},
+		{"(*UnsupportedType).Common", Method, 13, ""},
+		{"(*UnsupportedType).Size", Method, 13, ""},
+		{"(*UnsupportedType).String", Method, 13, ""},
+		{"(*VoidType).Common", Method, 0, ""},
+		{"(*VoidType).Size", Method, 0, ""},
+		{"(*VoidType).String", Method, 0, ""},
+		{"(Attr).GoString", Method, 0, ""},
+		{"(Attr).String", Method, 0, ""},
+		{"(Class).GoString", Method, 5, ""},
+		{"(Class).String", Method, 5, ""},
+		{"(DecodeError).Error", Method, 0, ""},
+		{"(Tag).GoString", Method, 0, ""},
+		{"(Tag).String", Method, 0, ""},
+		{"AddrType", Type, 0, ""},
+		{"AddrType.BasicType", Field, 0, ""},
+		{"ArrayType", Type, 0, ""},
+		{"ArrayType.CommonType", Field, 0, ""},
+		{"ArrayType.Count", Field, 0, ""},
+		{"ArrayType.StrideBitSize", Field, 0, ""},
+		{"ArrayType.Type", Field, 0, ""},
+		{"Attr", Type, 0, ""},
+		{"AttrAbstractOrigin", Const, 0, ""},
+		{"AttrAccessibility", Const, 0, ""},
+		{"AttrAddrBase", Const, 14, ""},
+		{"AttrAddrClass", Const, 0, ""},
+		{"AttrAlignment", Const, 14, ""},
+		{"AttrAllocated", Const, 0, ""},
+		{"AttrArtificial", Const, 0, ""},
+		{"AttrAssociated", Const, 0, ""},
+		{"AttrBaseTypes", Const, 0, ""},
+		{"AttrBinaryScale", Const, 14, ""},
+		{"AttrBitOffset", Const, 0, ""},
+		{"AttrBitSize", Const, 0, ""},
+		{"AttrByteSize", Const, 0, ""},
+		{"AttrCallAllCalls", Const, 14, ""},
+		{"AttrCallAllSourceCalls", Const, 14, ""},
+		{"AttrCallAllTailCalls", Const, 14, ""},
+		{"AttrCallColumn", Const, 0, ""},
+		{"AttrCallDataLocation", Const, 14, ""},
+		{"AttrCallDataValue", Const, 14, ""},
+		{"AttrCallFile", Const, 0, ""},
+		{"AttrCallLine", Const, 0, ""},
+		{"AttrCallOrigin", Const, 14, ""},
+		{"AttrCallPC", Const, 14, ""},
+		{"AttrCallParameter", Const, 14, ""},
+		{"AttrCallReturnPC", Const, 14, ""},
+		{"AttrCallTailCall", Const, 14, ""},
+		{"AttrCallTarget", Const, 14, ""},
+		{"AttrCallTargetClobbered", Const, 14, ""},
+		{"AttrCallValue", Const, 14, ""},
+		{"AttrCalling", Const, 0, ""},
+		{"AttrCommonRef", Const, 0, ""},
+		{"AttrCompDir", Const, 0, ""},
+		{"AttrConstExpr", Const, 14, ""},
+		{"AttrConstValue", Const, 0, ""},
+		{"AttrContainingType", Const, 0, ""},
+		{"AttrCount", Const, 0, ""},
+		{"AttrDataBitOffset", Const, 14, ""},
+		{"AttrDataLocation", Const, 0, ""},
+		{"AttrDataMemberLoc", Const, 0, ""},
+		{"AttrDecimalScale", Const, 14, ""},
+		{"AttrDecimalSign", Const, 14, ""},
+		{"AttrDeclColumn", Const, 0, ""},
+		{"AttrDeclFile", Const, 0, ""},
+		{"AttrDeclLine", Const, 0, ""},
+		{"AttrDeclaration", Const, 0, ""},
+		{"AttrDefaultValue", Const, 0, ""},
+		{"AttrDefaulted", Const, 14, ""},
+		{"AttrDeleted", Const, 14, ""},
+		{"AttrDescription", Const, 0, ""},
+		{"AttrDigitCount", Const, 14, ""},
+		{"AttrDiscr", Const, 0, ""},
+		{"AttrDiscrList", Const, 0, ""},
+		{"AttrDiscrValue", Const, 0, ""},
+		{"AttrDwoName", Const, 14, ""},
+		{"AttrElemental", Const, 14, ""},
+		{"AttrEncoding", Const, 0, ""},
+		{"AttrEndianity", Const, 14, ""},
+		{"AttrEntrypc", Const, 0, ""},
+		{"AttrEnumClass", Const, 14, ""},
+		{"AttrExplicit", Const, 14, ""},
+		{"AttrExportSymbols", Const, 14, ""},
+		{"AttrExtension", Const, 0, ""},
+		{"AttrExternal", Const, 0, ""},
+		{"AttrFrameBase", Const, 0, ""},
+		{"AttrFriend", Const, 0, ""},
+		{"AttrHighpc", Const, 0, ""},
+		{"AttrIdentifierCase", Const, 0, ""},
+		{"AttrImport", Const, 0, ""},
+		{"AttrInline", Const, 0, ""},
+		{"AttrIsOptional", Const, 0, ""},
+		{"AttrLanguage", Const, 0, ""},
+		{"AttrLinkageName", Const, 14, ""},
+		{"AttrLocation", Const, 0, ""},
+		{"AttrLoclistsBase", Const, 14, ""},
+		{"AttrLowerBound", Const, 0, ""},
+		{"AttrLowpc", Const, 0, ""},
+		{"AttrMacroInfo", Const, 0, ""},
+		{"AttrMacros", Const, 14, ""},
+		{"AttrMainSubprogram", Const, 14, ""},
+		{"AttrMutable", Const, 14, ""},
+		{"AttrName", Const, 0, ""},
+		{"AttrNamelistItem", Const, 0, ""},
+		{"AttrNoreturn", Const, 14, ""},
+		{"AttrObjectPointer", Const, 14, ""},
+		{"AttrOrdering", Const, 0, ""},
+		{"AttrPictureString", Const, 14, ""},
+		{"AttrPriority", Const, 0, ""},
+		{"AttrProducer", Const, 0, ""},
+		{"AttrPrototyped", Const, 0, ""},
+		{"AttrPure", Const, 14, ""},
+		{"AttrRanges", Const, 0, ""},
+		{"AttrRank", Const, 14, ""},
+		{"AttrRecursive", Const, 14, ""},
+		{"AttrReference", Const, 14, ""},
+		{"AttrReturnAddr", Const, 0, ""},
+		{"AttrRnglistsBase", Const, 14, ""},
+		{"AttrRvalueReference", Const, 14, ""},
+		{"AttrSegment", Const, 0, ""},
+		{"AttrSibling", Const, 0, ""},
+		{"AttrSignature", Const, 14, ""},
+		{"AttrSmall", Const, 14, ""},
+		{"AttrSpecification", Const, 0, ""},
+		{"AttrStartScope", Const, 0, ""},
+		{"AttrStaticLink", Const, 0, ""},
+		{"AttrStmtList", Const, 0, ""},
+		{"AttrStrOffsetsBase", Const, 14, ""},
+		{"AttrStride", Const, 0, ""},
+		{"AttrStrideSize", Const, 0, ""},
+		{"AttrStringLength", Const, 0, ""},
+		{"AttrStringLengthBitSize", Const, 14, ""},
+		{"AttrStringLengthByteSize", Const, 14, ""},
+		{"AttrThreadsScaled", Const, 14, ""},
+		{"AttrTrampoline", Const, 0, ""},
+		{"AttrType", Const, 0, ""},
+		{"AttrUpperBound", Const, 0, ""},
+		{"AttrUseLocation", Const, 0, ""},
+		{"AttrUseUTF8", Const, 0, ""},
+		{"AttrVarParam", Const, 0, ""},
+		{"AttrVirtuality", Const, 0, ""},
+		{"AttrVisibility", Const, 0, ""},
+		{"AttrVtableElemLoc", Const, 0, ""},
+		{"BasicType", Type, 0, ""},
+		{"BasicType.BitOffset", Field, 0, ""},
+		{"BasicType.BitSize", Field, 0, ""},
+		{"BasicType.CommonType", Field, 0, ""},
+		{"BasicType.DataBitOffset", Field, 18, ""},
+		{"BoolType", Type, 0, ""},
+		{"BoolType.BasicType", Field, 0, ""},
+		{"CharType", Type, 0, ""},
+		{"CharType.BasicType", Field, 0, ""},
+		{"Class", Type, 5, ""},
+		{"ClassAddrPtr", Const, 14, ""},
+		{"ClassAddress", Const, 5, ""},
+		{"ClassBlock", Const, 5, ""},
+		{"ClassConstant", Const, 5, ""},
+		{"ClassExprLoc", Const, 5, ""},
+		{"ClassFlag", Const, 5, ""},
+		{"ClassLinePtr", Const, 5, ""},
+		{"ClassLocList", Const, 14, ""},
+		{"ClassLocListPtr", Const, 5, ""},
+		{"ClassMacPtr", Const, 5, ""},
+		{"ClassRangeListPtr", Const, 5, ""},
+		{"ClassReference", Const, 5, ""},
+		{"ClassReferenceAlt", Const, 5, ""},
+		{"ClassReferenceSig", Const, 5, ""},
+		{"ClassRngList", Const, 14, ""},
+		{"ClassRngListsPtr", Const, 14, ""},
+		{"ClassStrOffsetsPtr", Const, 14, ""},
+		{"ClassString", Const, 5, ""},
+		{"ClassStringAlt", Const, 5, ""},
+		{"ClassUnknown", Const, 6, ""},
+		{"CommonType", Type, 0, ""},
+		{"CommonType.ByteSize", Field, 0, ""},
+		{"CommonType.Name", Field, 0, ""},
+		{"ComplexType", Type, 0, ""},
+		{"ComplexType.BasicType", Field, 0, ""},
+		{"Data", Type, 0, ""},
+		{"DecodeError", Type, 0, ""},
+		{"DecodeError.Err", Field, 0, ""},
+		{"DecodeError.Name", Field, 0, ""},
+		{"DecodeError.Offset", Field, 0, ""},
+		{"DotDotDotType", Type, 0, ""},
+		{"DotDotDotType.CommonType", Field, 0, ""},
+		{"Entry", Type, 0, ""},
+		{"Entry.Children", Field, 0, ""},
+		{"Entry.Field", Field, 0, ""},
+		{"Entry.Offset", Field, 0, ""},
+		{"Entry.Tag", Field, 0, ""},
+		{"EnumType", Type, 0, ""},
+		{"EnumType.CommonType", Field, 0, ""},
+		{"EnumType.EnumName", Field, 0, ""},
+		{"EnumType.Val", Field, 0, ""},
+		{"EnumValue", Type, 0, ""},
+		{"EnumValue.Name", Field, 0, ""},
+		{"EnumValue.Val", Field, 0, ""},
+		{"ErrUnknownPC", Var, 5, ""},
+		{"Field", Type, 0, ""},
+		{"Field.Attr", Field, 0, ""},
+		{"Field.Class", Field, 5, ""},
+		{"Field.Val", Field, 0, ""},
+		{"FloatType", Type, 0, ""},
+		{"FloatType.BasicType", Field, 0, ""},
+		{"FuncType", Type, 0, ""},
+		{"FuncType.CommonType", Field, 0, ""},
+		{"FuncType.ParamType", Field, 0, ""},
+		{"FuncType.ReturnType", Field, 0, ""},
+		{"IntType", Type, 0, ""},
+		{"IntType.BasicType", Field, 0, ""},
+		{"LineEntry", Type, 5, ""},
+		{"LineEntry.Address", Field, 5, ""},
+		{"LineEntry.BasicBlock", Field, 5, ""},
+		{"LineEntry.Column", Field, 5, ""},
+		{"LineEntry.Discriminator", Field, 5, ""},
+		{"LineEntry.EndSequence", Field, 5, ""},
+		{"LineEntry.EpilogueBegin", Field, 5, ""},
+		{"LineEntry.File", Field, 5, ""},
+		{"LineEntry.ISA", Field, 5, ""},
+		{"LineEntry.IsStmt", Field, 5, ""},
+		{"LineEntry.Line", Field, 5, ""},
+		{"LineEntry.OpIndex", Field, 5, ""},
+		{"LineEntry.PrologueEnd", Field, 5, ""},
+		{"LineFile", Type, 5, ""},
+		{"LineFile.Length", Field, 5, ""},
+		{"LineFile.Mtime", Field, 5, ""},
+		{"LineFile.Name", Field, 5, ""},
+		{"LineReader", Type, 5, ""},
+		{"LineReaderPos", Type, 5, ""},
+		{"New", Func, 0, "func(abbrev []byte, aranges []byte, frame []byte, info []byte, line []byte, pubnames []byte, ranges []byte, str []byte) (*Data, error)"},
+		{"Offset", Type, 0, ""},
+		{"PtrType", Type, 0, ""},
+		{"PtrType.CommonType", Field, 0, ""},
+		{"PtrType.Type", Field, 0, ""},
+		{"QualType", Type, 0, ""},
+		{"QualType.CommonType", Field, 0, ""},
+		{"QualType.Qual", Field, 0, ""},
+		{"QualType.Type", Field, 0, ""},
+		{"Reader", Type, 0, ""},
+		{"StructField", Type, 0, ""},
+		{"StructField.BitOffset", Field, 0, ""},
+		{"StructField.BitSize", Field, 0, ""},
+		{"StructField.ByteOffset", Field, 0, ""},
+		{"StructField.ByteSize", Field, 0, ""},
+		{"StructField.DataBitOffset", Field, 18, ""},
+		{"StructField.Name", Field, 0, ""},
+		{"StructField.Type", Field, 0, ""},
+		{"StructType", Type, 0, ""},
+		{"StructType.CommonType", Field, 0, ""},
+		{"StructType.Field", Field, 0, ""},
+		{"StructType.Incomplete", Field, 0, ""},
+		{"StructType.Kind", Field, 0, ""},
+		{"StructType.StructName", Field, 0, ""},
+		{"Tag", Type, 0, ""},
+		{"TagAccessDeclaration", Const, 0, ""},
+		{"TagArrayType", Const, 0, ""},
+		{"TagAtomicType", Const, 14, ""},
+		{"TagBaseType", Const, 0, ""},
+		{"TagCallSite", Const, 14, ""},
+		{"TagCallSiteParameter", Const, 14, ""},
+		{"TagCatchDwarfBlock", Const, 0, ""},
+		{"TagClassType", Const, 0, ""},
+		{"TagCoarrayType", Const, 14, ""},
+		{"TagCommonDwarfBlock", Const, 0, ""},
+		{"TagCommonInclusion", Const, 0, ""},
+		{"TagCompileUnit", Const, 0, ""},
+		{"TagCondition", Const, 3, ""},
+		{"TagConstType", Const, 0, ""},
+		{"TagConstant", Const, 0, ""},
+		{"TagDwarfProcedure", Const, 0, ""},
+		{"TagDynamicType", Const, 14, ""},
+		{"TagEntryPoint", Const, 0, ""},
+		{"TagEnumerationType", Const, 0, ""},
+		{"TagEnumerator", Const, 0, ""},
+		{"TagFileType", Const, 0, ""},
+		{"TagFormalParameter", Const, 0, ""},
+		{"TagFriend", Const, 0, ""},
+		{"TagGenericSubrange", Const, 14, ""},
+		{"TagImmutableType", Const, 14, ""},
+		{"TagImportedDeclaration", Const, 0, ""},
+		{"TagImportedModule", Const, 0, ""},
+		{"TagImportedUnit", Const, 0, ""},
+		{"TagInheritance", Const, 0, ""},
+		{"TagInlinedSubroutine", Const, 0, ""},
+		{"TagInterfaceType", Const, 0, ""},
+		{"TagLabel", Const, 0, ""},
+		{"TagLexDwarfBlock", Const, 0, ""},
+		{"TagMember", Const, 0, ""},
+		{"TagModule", Const, 0, ""},
+		{"TagMutableType", Const, 0, ""},
+		{"TagNamelist", Const, 0, ""},
+		{"TagNamelistItem", Const, 0, ""},
+		{"TagNamespace", Const, 0, ""},
+		{"TagPackedType", Const, 0, ""},
+		{"TagPartialUnit", Const, 0, ""},
+		{"TagPointerType", Const, 0, ""},
+		{"TagPtrToMemberType", Const, 0, ""},
+		{"TagReferenceType", Const, 0, ""},
+		{"TagRestrictType", Const, 0, ""},
+		{"TagRvalueReferenceType", Const, 3, ""},
+		{"TagSetType", Const, 0, ""},
+		{"TagSharedType", Const, 3, ""},
+		{"TagSkeletonUnit", Const, 14, ""},
+		{"TagStringType", Const, 0, ""},
+		{"TagStructType", Const, 0, ""},
+		{"TagSubprogram", Const, 0, ""},
+		{"TagSubrangeType", Const, 0, ""},
+		{"TagSubroutineType", Const, 0, ""},
+		{"TagTemplateAlias", Const, 3, ""},
+		{"TagTemplateTypeParameter", Const, 0, ""},
+		{"TagTemplateValueParameter", Const, 0, ""},
+		{"TagThrownType", Const, 0, ""},
+		{"TagTryDwarfBlock", Const, 0, ""},
+		{"TagTypeUnit", Const, 3, ""},
+		{"TagTypedef", Const, 0, ""},
+		{"TagUnionType", Const, 0, ""},
+		{"TagUnspecifiedParameters", Const, 0, ""},
+		{"TagUnspecifiedType", Const, 0, ""},
+		{"TagVariable", Const, 0, ""},
+		{"TagVariant", Const, 0, ""},
+		{"TagVariantPart", Const, 0, ""},
+		{"TagVolatileType", Const, 0, ""},
+		{"TagWithStmt", Const, 0, ""},
+		{"Type", Type, 0, ""},
+		{"TypedefType", Type, 0, ""},
+		{"TypedefType.CommonType", Field, 0, ""},
+		{"TypedefType.Type", Field, 0, ""},
+		{"UcharType", Type, 0, ""},
+		{"UcharType.BasicType", Field, 0, ""},
+		{"UintType", Type, 0, ""},
+		{"UintType.BasicType", Field, 0, ""},
+		{"UnspecifiedType", Type, 4, ""},
+		{"UnspecifiedType.BasicType", Field, 4, ""},
+		{"UnsupportedType", Type, 13, ""},
+		{"UnsupportedType.CommonType", Field, 13, ""},
+		{"UnsupportedType.Tag", Field, 13, ""},
+		{"VoidType", Type, 0, ""},
+		{"VoidType.CommonType", Field, 0, ""},
+	},
+	"debug/elf": {
+		{"(*File).Close", Method, 0, ""},
+		{"(*File).DWARF", Method, 0, ""},
+		{"(*File).DynString", Method, 1, ""},
+		{"(*File).DynValue", Method, 21, ""},
+		{"(*File).DynamicSymbols", Method, 4, ""},
+		{"(*File).DynamicVersionNeeds", Method, 24, ""},
+		{"(*File).DynamicVersions", Method, 24, ""},
+		{"(*File).ImportedLibraries", Method, 0, ""},
+		{"(*File).ImportedSymbols", Method, 0, ""},
+		{"(*File).Section", Method, 0, ""},
+		{"(*File).SectionByType", Method, 0, ""},
+		{"(*File).Symbols", Method, 0, ""},
+		{"(*FormatError).Error", Method, 0, ""},
+		{"(*Prog).Open", Method, 0, ""},
+		{"(*Section).Data", Method, 0, ""},
+		{"(*Section).Open", Method, 0, ""},
+		{"(Class).GoString", Method, 0, ""},
+		{"(Class).String", Method, 0, ""},
+		{"(CompressionType).GoString", Method, 6, ""},
+		{"(CompressionType).String", Method, 6, ""},
+		{"(Data).GoString", Method, 0, ""},
+		{"(Data).String", Method, 0, ""},
+		{"(DynFlag).GoString", Method, 0, ""},
+		{"(DynFlag).String", Method, 0, ""},
+		{"(DynFlag1).GoString", Method, 21, ""},
+		{"(DynFlag1).String", Method, 21, ""},
+		{"(DynTag).GoString", Method, 0, ""},
+		{"(DynTag).String", Method, 0, ""},
+		{"(Machine).GoString", Method, 0, ""},
+		{"(Machine).String", Method, 0, ""},
+		{"(NType).GoString", Method, 0, ""},
+		{"(NType).String", Method, 0, ""},
+		{"(OSABI).GoString", Method, 0, ""},
+		{"(OSABI).String", Method, 0, ""},
+		{"(Prog).ReadAt", Method, 0, ""},
+		{"(ProgFlag).GoString", Method, 0, ""},
+		{"(ProgFlag).String", Method, 0, ""},
+		{"(ProgType).GoString", Method, 0, ""},
+		{"(ProgType).String", Method, 0, ""},
+		{"(R_386).GoString", Method, 0, ""},
+		{"(R_386).String", Method, 0, ""},
+		{"(R_390).GoString", Method, 7, ""},
+		{"(R_390).String", Method, 7, ""},
+		{"(R_AARCH64).GoString", Method, 4, ""},
+		{"(R_AARCH64).String", Method, 4, ""},
+		{"(R_ALPHA).GoString", Method, 0, ""},
+		{"(R_ALPHA).String", Method, 0, ""},
+		{"(R_ARM).GoString", Method, 0, ""},
+		{"(R_ARM).String", Method, 0, ""},
+		{"(R_LARCH).GoString", Method, 19, ""},
+		{"(R_LARCH).String", Method, 19, ""},
+		{"(R_MIPS).GoString", Method, 6, ""},
+		{"(R_MIPS).String", Method, 6, ""},
+		{"(R_PPC).GoString", Method, 0, ""},
+		{"(R_PPC).String", Method, 0, ""},
+		{"(R_PPC64).GoString", Method, 5, ""},
+		{"(R_PPC64).String", Method, 5, ""},
+		{"(R_RISCV).GoString", Method, 11, ""},
+		{"(R_RISCV).String", Method, 11, ""},
+		{"(R_SPARC).GoString", Method, 0, ""},
+		{"(R_SPARC).String", Method, 0, ""},
+		{"(R_X86_64).GoString", Method, 0, ""},
+		{"(R_X86_64).String", Method, 0, ""},
+		{"(Section).ReadAt", Method, 0, ""},
+		{"(SectionFlag).GoString", Method, 0, ""},
+		{"(SectionFlag).String", Method, 0, ""},
+		{"(SectionIndex).GoString", Method, 0, ""},
+		{"(SectionIndex).String", Method, 0, ""},
+		{"(SectionType).GoString", Method, 0, ""},
+		{"(SectionType).String", Method, 0, ""},
+		{"(SymBind).GoString", Method, 0, ""},
+		{"(SymBind).String", Method, 0, ""},
+		{"(SymType).GoString", Method, 0, ""},
+		{"(SymType).String", Method, 0, ""},
+		{"(SymVis).GoString", Method, 0, ""},
+		{"(SymVis).String", Method, 0, ""},
+		{"(Type).GoString", Method, 0, ""},
+		{"(Type).String", Method, 0, ""},
+		{"(Version).GoString", Method, 0, ""},
+		{"(Version).String", Method, 0, ""},
+		{"(VersionIndex).Index", Method, 24, ""},
+		{"(VersionIndex).IsHidden", Method, 24, ""},
+		{"ARM_MAGIC_TRAMP_NUMBER", Const, 0, ""},
+		{"COMPRESS_HIOS", Const, 6, ""},
+		{"COMPRESS_HIPROC", Const, 6, ""},
+		{"COMPRESS_LOOS", Const, 6, ""},
+		{"COMPRESS_LOPROC", Const, 6, ""},
+		{"COMPRESS_ZLIB", Const, 6, ""},
+		{"COMPRESS_ZSTD", Const, 21, ""},
+		{"Chdr32", Type, 6, ""},
+		{"Chdr32.Addralign", Field, 6, ""},
+		{"Chdr32.Size", Field, 6, ""},
+		{"Chdr32.Type", Field, 6, ""},
+		{"Chdr64", Type, 6, ""},
+		{"Chdr64.Addralign", Field, 6, ""},
+		{"Chdr64.Size", Field, 6, ""},
+		{"Chdr64.Type", Field, 6, ""},
+		{"Class", Type, 0, ""},
+		{"CompressionType", Type, 6, ""},
+		{"DF_1_CONFALT", Const, 21, ""},
+		{"DF_1_DIRECT", Const, 21, ""},
+		{"DF_1_DISPRELDNE", Const, 21, ""},
+		{"DF_1_DISPRELPND", Const, 21, ""},
+		{"DF_1_EDITED", Const, 21, ""},
+		{"DF_1_ENDFILTEE", Const, 21, ""},
+		{"DF_1_GLOBAL", Const, 21, ""},
+		{"DF_1_GLOBAUDIT", Const, 21, ""},
+		{"DF_1_GROUP", Const, 21, ""},
+		{"DF_1_IGNMULDEF", Const, 21, ""},
+		{"DF_1_INITFIRST", Const, 21, ""},
+		{"DF_1_INTERPOSE", Const, 21, ""},
+		{"DF_1_KMOD", Const, 21, ""},
+		{"DF_1_LOADFLTR", Const, 21, ""},
+		{"DF_1_NOCOMMON", Const, 21, ""},
+		{"DF_1_NODEFLIB", Const, 21, ""},
+		{"DF_1_NODELETE", Const, 21, ""},
+		{"DF_1_NODIRECT", Const, 21, ""},
+		{"DF_1_NODUMP", Const, 21, ""},
+		{"DF_1_NOHDR", Const, 21, ""},
+		{"DF_1_NOKSYMS", Const, 21, ""},
+		{"DF_1_NOOPEN", Const, 21, ""},
+		{"DF_1_NORELOC", Const, 21, ""},
+		{"DF_1_NOW", Const, 21, ""},
+		{"DF_1_ORIGIN", Const, 21, ""},
+		{"DF_1_PIE", Const, 21, ""},
+		{"DF_1_SINGLETON", Const, 21, ""},
+		{"DF_1_STUB", Const, 21, ""},
+		{"DF_1_SYMINTPOSE", Const, 21, ""},
+		{"DF_1_TRANS", Const, 21, ""},
+		{"DF_1_WEAKFILTER", Const, 21, ""},
+		{"DF_BIND_NOW", Const, 0, ""},
+		{"DF_ORIGIN", Const, 0, ""},
+		{"DF_STATIC_TLS", Const, 0, ""},
+		{"DF_SYMBOLIC", Const, 0, ""},
+		{"DF_TEXTREL", Const, 0, ""},
+		{"DT_ADDRRNGHI", Const, 16, ""},
+		{"DT_ADDRRNGLO", Const, 16, ""},
+		{"DT_AUDIT", Const, 16, ""},
+		{"DT_AUXILIARY", Const, 16, ""},
+		{"DT_BIND_NOW", Const, 0, ""},
+		{"DT_CHECKSUM", Const, 16, ""},
+		{"DT_CONFIG", Const, 16, ""},
+		{"DT_DEBUG", Const, 0, ""},
+		{"DT_DEPAUDIT", Const, 16, ""},
+		{"DT_ENCODING", Const, 0, ""},
+		{"DT_FEATURE", Const, 16, ""},
+		{"DT_FILTER", Const, 16, ""},
+		{"DT_FINI", Const, 0, ""},
+		{"DT_FINI_ARRAY", Const, 0, ""},
+		{"DT_FINI_ARRAYSZ", Const, 0, ""},
+		{"DT_FLAGS", Const, 0, ""},
+		{"DT_FLAGS_1", Const, 16, ""},
+		{"DT_GNU_CONFLICT", Const, 16, ""},
+		{"DT_GNU_CONFLICTSZ", Const, 16, ""},
+		{"DT_GNU_HASH", Const, 16, ""},
+		{"DT_GNU_LIBLIST", Const, 16, ""},
+		{"DT_GNU_LIBLISTSZ", Const, 16, ""},
+		{"DT_GNU_PRELINKED", Const, 16, ""},
+		{"DT_HASH", Const, 0, ""},
+		{"DT_HIOS", Const, 0, ""},
+		{"DT_HIPROC", Const, 0, ""},
+		{"DT_INIT", Const, 0, ""},
+		{"DT_INIT_ARRAY", Const, 0, ""},
+		{"DT_INIT_ARRAYSZ", Const, 0, ""},
+		{"DT_JMPREL", Const, 0, ""},
+		{"DT_LOOS", Const, 0, ""},
+		{"DT_LOPROC", Const, 0, ""},
+		{"DT_MIPS_AUX_DYNAMIC", Const, 16, ""},
+		{"DT_MIPS_BASE_ADDRESS", Const, 16, ""},
+		{"DT_MIPS_COMPACT_SIZE", Const, 16, ""},
+		{"DT_MIPS_CONFLICT", Const, 16, ""},
+		{"DT_MIPS_CONFLICTNO", Const, 16, ""},
+		{"DT_MIPS_CXX_FLAGS", Const, 16, ""},
+		{"DT_MIPS_DELTA_CLASS", Const, 16, ""},
+		{"DT_MIPS_DELTA_CLASSSYM", Const, 16, ""},
+		{"DT_MIPS_DELTA_CLASSSYM_NO", Const, 16, ""},
+		{"DT_MIPS_DELTA_CLASS_NO", Const, 16, ""},
+		{"DT_MIPS_DELTA_INSTANCE", Const, 16, ""},
+		{"DT_MIPS_DELTA_INSTANCE_NO", Const, 16, ""},
+		{"DT_MIPS_DELTA_RELOC", Const, 16, ""},
+		{"DT_MIPS_DELTA_RELOC_NO", Const, 16, ""},
+		{"DT_MIPS_DELTA_SYM", Const, 16, ""},
+		{"DT_MIPS_DELTA_SYM_NO", Const, 16, ""},
+		{"DT_MIPS_DYNSTR_ALIGN", Const, 16, ""},
+		{"DT_MIPS_FLAGS", Const, 16, ""},
+		{"DT_MIPS_GOTSYM", Const, 16, ""},
+		{"DT_MIPS_GP_VALUE", Const, 16, ""},
+		{"DT_MIPS_HIDDEN_GOTIDX", Const, 16, ""},
+		{"DT_MIPS_HIPAGENO", Const, 16, ""},
+		{"DT_MIPS_ICHECKSUM", Const, 16, ""},
+		{"DT_MIPS_INTERFACE", Const, 16, ""},
+		{"DT_MIPS_INTERFACE_SIZE", Const, 16, ""},
+		{"DT_MIPS_IVERSION", Const, 16, ""},
+		{"DT_MIPS_LIBLIST", Const, 16, ""},
+		{"DT_MIPS_LIBLISTNO", Const, 16, ""},
+		{"DT_MIPS_LOCALPAGE_GOTIDX", Const, 16, ""},
+		{"DT_MIPS_LOCAL_GOTIDX", Const, 16, ""},
+		{"DT_MIPS_LOCAL_GOTNO", Const, 16, ""},
+		{"DT_MIPS_MSYM", Const, 16, ""},
+		{"DT_MIPS_OPTIONS", Const, 16, ""},
+		{"DT_MIPS_PERF_SUFFIX", Const, 16, ""},
+		{"DT_MIPS_PIXIE_INIT", Const, 16, ""},
+		{"DT_MIPS_PLTGOT", Const, 16, ""},
+		{"DT_MIPS_PROTECTED_GOTIDX", Const, 16, ""},
+		{"DT_MIPS_RLD_MAP", Const, 16, ""},
+		{"DT_MIPS_RLD_MAP_REL", Const, 16, ""},
+		{"DT_MIPS_RLD_TEXT_RESOLVE_ADDR", Const, 16, ""},
+		{"DT_MIPS_RLD_VERSION", Const, 16, ""},
+		{"DT_MIPS_RWPLT", Const, 16, ""},
+		{"DT_MIPS_SYMBOL_LIB", Const, 16, ""},
+		{"DT_MIPS_SYMTABNO", Const, 16, ""},
+		{"DT_MIPS_TIME_STAMP", Const, 16, ""},
+		{"DT_MIPS_UNREFEXTNO", Const, 16, ""},
+		{"DT_MOVEENT", Const, 16, ""},
+		{"DT_MOVESZ", Const, 16, ""},
+		{"DT_MOVETAB", Const, 16, ""},
+		{"DT_NEEDED", Const, 0, ""},
+		{"DT_NULL", Const, 0, ""},
+		{"DT_PLTGOT", Const, 0, ""},
+		{"DT_PLTPAD", Const, 16, ""},
+		{"DT_PLTPADSZ", Const, 16, ""},
+		{"DT_PLTREL", Const, 0, ""},
+		{"DT_PLTRELSZ", Const, 0, ""},
+		{"DT_POSFLAG_1", Const, 16, ""},
+		{"DT_PPC64_GLINK", Const, 16, ""},
+		{"DT_PPC64_OPD", Const, 16, ""},
+		{"DT_PPC64_OPDSZ", Const, 16, ""},
+		{"DT_PPC64_OPT", Const, 16, ""},
+		{"DT_PPC_GOT", Const, 16, ""},
+		{"DT_PPC_OPT", Const, 16, ""},
+		{"DT_PREINIT_ARRAY", Const, 0, ""},
+		{"DT_PREINIT_ARRAYSZ", Const, 0, ""},
+		{"DT_REL", Const, 0, ""},
+		{"DT_RELA", Const, 0, ""},
+		{"DT_RELACOUNT", Const, 16, ""},
+		{"DT_RELAENT", Const, 0, ""},
+		{"DT_RELASZ", Const, 0, ""},
+		{"DT_RELCOUNT", Const, 16, ""},
+		{"DT_RELENT", Const, 0, ""},
+		{"DT_RELSZ", Const, 0, ""},
+		{"DT_RPATH", Const, 0, ""},
+		{"DT_RUNPATH", Const, 0, ""},
+		{"DT_SONAME", Const, 0, ""},
+		{"DT_SPARC_REGISTER", Const, 16, ""},
+		{"DT_STRSZ", Const, 0, ""},
+		{"DT_STRTAB", Const, 0, ""},
+		{"DT_SYMBOLIC", Const, 0, ""},
+		{"DT_SYMENT", Const, 0, ""},
+		{"DT_SYMINENT", Const, 16, ""},
+		{"DT_SYMINFO", Const, 16, ""},
+		{"DT_SYMINSZ", Const, 16, ""},
+		{"DT_SYMTAB", Const, 0, ""},
+		{"DT_SYMTAB_SHNDX", Const, 16, ""},
+		{"DT_TEXTREL", Const, 0, ""},
+		{"DT_TLSDESC_GOT", Const, 16, ""},
+		{"DT_TLSDESC_PLT", Const, 16, ""},
+		{"DT_USED", Const, 16, ""},
+		{"DT_VALRNGHI", Const, 16, ""},
+		{"DT_VALRNGLO", Const, 16, ""},
+		{"DT_VERDEF", Const, 16, ""},
+		{"DT_VERDEFNUM", Const, 16, ""},
+		{"DT_VERNEED", Const, 0, ""},
+		{"DT_VERNEEDNUM", Const, 0, ""},
+		{"DT_VERSYM", Const, 0, ""},
+		{"Data", Type, 0, ""},
+		{"Dyn32", Type, 0, ""},
+		{"Dyn32.Tag", Field, 0, ""},
+		{"Dyn32.Val", Field, 0, ""},
+		{"Dyn64", Type, 0, ""},
+		{"Dyn64.Tag", Field, 0, ""},
+		{"Dyn64.Val", Field, 0, ""},
+		{"DynFlag", Type, 0, ""},
+		{"DynFlag1", Type, 21, ""},
+		{"DynTag", Type, 0, ""},
+		{"DynamicVersion", Type, 24, ""},
+		{"DynamicVersion.Deps", Field, 24, ""},
+		{"DynamicVersion.Flags", Field, 24, ""},
+		{"DynamicVersion.Index", Field, 24, ""},
+		{"DynamicVersion.Name", Field, 24, ""},
+		{"DynamicVersionDep", Type, 24, ""},
+		{"DynamicVersionDep.Dep", Field, 24, ""},
+		{"DynamicVersionDep.Flags", Field, 24, ""},
+		{"DynamicVersionDep.Index", Field, 24, ""},
+		{"DynamicVersionFlag", Type, 24, ""},
+		{"DynamicVersionNeed", Type, 24, ""},
+		{"DynamicVersionNeed.Name", Field, 24, ""},
+		{"DynamicVersionNeed.Needs", Field, 24, ""},
+		{"EI_ABIVERSION", Const, 0, ""},
+		{"EI_CLASS", Const, 0, ""},
+		{"EI_DATA", Const, 0, ""},
+		{"EI_NIDENT", Const, 0, ""},
+		{"EI_OSABI", Const, 0, ""},
+		{"EI_PAD", Const, 0, ""},
+		{"EI_VERSION", Const, 0, ""},
+		{"ELFCLASS32", Const, 0, ""},
+		{"ELFCLASS64", Const, 0, ""},
+		{"ELFCLASSNONE", Const, 0, ""},
+		{"ELFDATA2LSB", Const, 0, ""},
+		{"ELFDATA2MSB", Const, 0, ""},
+		{"ELFDATANONE", Const, 0, ""},
+		{"ELFMAG", Const, 0, ""},
+		{"ELFOSABI_86OPEN", Const, 0, ""},
+		{"ELFOSABI_AIX", Const, 0, ""},
+		{"ELFOSABI_ARM", Const, 0, ""},
+		{"ELFOSABI_AROS", Const, 11, ""},
+		{"ELFOSABI_CLOUDABI", Const, 11, ""},
+		{"ELFOSABI_FENIXOS", Const, 11, ""},
+		{"ELFOSABI_FREEBSD", Const, 0, ""},
+		{"ELFOSABI_HPUX", Const, 0, ""},
+		{"ELFOSABI_HURD", Const, 0, ""},
+		{"ELFOSABI_IRIX", Const, 0, ""},
+		{"ELFOSABI_LINUX", Const, 0, ""},
+		{"ELFOSABI_MODESTO", Const, 0, ""},
+		{"ELFOSABI_NETBSD", Const, 0, ""},
+		{"ELFOSABI_NONE", Const, 0, ""},
+		{"ELFOSABI_NSK", Const, 0, ""},
+		{"ELFOSABI_OPENBSD", Const, 0, ""},
+		{"ELFOSABI_OPENVMS", Const, 0, ""},
+		{"ELFOSABI_SOLARIS", Const, 0, ""},
+		{"ELFOSABI_STANDALONE", Const, 0, ""},
+		{"ELFOSABI_TRU64", Const, 0, ""},
+		{"EM_386", Const, 0, ""},
+		{"EM_486", Const, 0, ""},
+		{"EM_56800EX", Const, 11, ""},
+		{"EM_68HC05", Const, 11, ""},
+		{"EM_68HC08", Const, 11, ""},
+		{"EM_68HC11", Const, 11, ""},
+		{"EM_68HC12", Const, 0, ""},
+		{"EM_68HC16", Const, 11, ""},
+		{"EM_68K", Const, 0, ""},
+		{"EM_78KOR", Const, 11, ""},
+		{"EM_8051", Const, 11, ""},
+		{"EM_860", Const, 0, ""},
+		{"EM_88K", Const, 0, ""},
+		{"EM_960", Const, 0, ""},
+		{"EM_AARCH64", Const, 4, ""},
+		{"EM_ALPHA", Const, 0, ""},
+		{"EM_ALPHA_STD", Const, 0, ""},
+		{"EM_ALTERA_NIOS2", Const, 11, ""},
+		{"EM_AMDGPU", Const, 11, ""},
+		{"EM_ARC", Const, 0, ""},
+		{"EM_ARCA", Const, 11, ""},
+		{"EM_ARC_COMPACT", Const, 11, ""},
+		{"EM_ARC_COMPACT2", Const, 11, ""},
+		{"EM_ARM", Const, 0, ""},
+		{"EM_AVR", Const, 11, ""},
+		{"EM_AVR32", Const, 11, ""},
+		{"EM_BA1", Const, 11, ""},
+		{"EM_BA2", Const, 11, ""},
+		{"EM_BLACKFIN", Const, 11, ""},
+		{"EM_BPF", Const, 11, ""},
+		{"EM_C166", Const, 11, ""},
+		{"EM_CDP", Const, 11, ""},
+		{"EM_CE", Const, 11, ""},
+		{"EM_CLOUDSHIELD", Const, 11, ""},
+		{"EM_COGE", Const, 11, ""},
+		{"EM_COLDFIRE", Const, 0, ""},
+		{"EM_COOL", Const, 11, ""},
+		{"EM_COREA_1ST", Const, 11, ""},
+		{"EM_COREA_2ND", Const, 11, ""},
+		{"EM_CR", Const, 11, ""},
+		{"EM_CR16", Const, 11, ""},
+		{"EM_CRAYNV2", Const, 11, ""},
+		{"EM_CRIS", Const, 11, ""},
+		{"EM_CRX", Const, 11, ""},
+		{"EM_CSR_KALIMBA", Const, 11, ""},
+		{"EM_CUDA", Const, 11, ""},
+		{"EM_CYPRESS_M8C", Const, 11, ""},
+		{"EM_D10V", Const, 11, ""},
+		{"EM_D30V", Const, 11, ""},
+		{"EM_DSP24", Const, 11, ""},
+		{"EM_DSPIC30F", Const, 11, ""},
+		{"EM_DXP", Const, 11, ""},
+		{"EM_ECOG1", Const, 11, ""},
+		{"EM_ECOG16", Const, 11, ""},
+		{"EM_ECOG1X", Const, 11, ""},
+		{"EM_ECOG2", Const, 11, ""},
+		{"EM_ETPU", Const, 11, ""},
+		{"EM_EXCESS", Const, 11, ""},
+		{"EM_F2MC16", Const, 11, ""},
+		{"EM_FIREPATH", Const, 11, ""},
+		{"EM_FR20", Const, 0, ""},
+		{"EM_FR30", Const, 11, ""},
+		{"EM_FT32", Const, 11, ""},
+		{"EM_FX66", Const, 11, ""},
+		{"EM_H8S", Const, 0, ""},
+		{"EM_H8_300", Const, 0, ""},
+		{"EM_H8_300H", Const, 0, ""},
+		{"EM_H8_500", Const, 0, ""},
+		{"EM_HUANY", Const, 11, ""},
+		{"EM_IA_64", Const, 0, ""},
+		{"EM_INTEL205", Const, 11, ""},
+		{"EM_INTEL206", Const, 11, ""},
+		{"EM_INTEL207", Const, 11, ""},
+		{"EM_INTEL208", Const, 11, ""},
+		{"EM_INTEL209", Const, 11, ""},
+		{"EM_IP2K", Const, 11, ""},
+		{"EM_JAVELIN", Const, 11, ""},
+		{"EM_K10M", Const, 11, ""},
+		{"EM_KM32", Const, 11, ""},
+		{"EM_KMX16", Const, 11, ""},
+		{"EM_KMX32", Const, 11, ""},
+		{"EM_KMX8", Const, 11, ""},
+		{"EM_KVARC", Const, 11, ""},
+		{"EM_L10M", Const, 11, ""},
+		{"EM_LANAI", Const, 11, ""},
+		{"EM_LATTICEMICO32", Const, 11, ""},
+		{"EM_LOONGARCH", Const, 19, ""},
+		{"EM_M16C", Const, 11, ""},
+		{"EM_M32", Const, 0, ""},
+		{"EM_M32C", Const, 11, ""},
+		{"EM_M32R", Const, 11, ""},
+		{"EM_MANIK", Const, 11, ""},
+		{"EM_MAX", Const, 11, ""},
+		{"EM_MAXQ30", Const, 11, ""},
+		{"EM_MCHP_PIC", Const, 11, ""},
+		{"EM_MCST_ELBRUS", Const, 11, ""},
+		{"EM_ME16", Const, 0, ""},
+		{"EM_METAG", Const, 11, ""},
+		{"EM_MICROBLAZE", Const, 11, ""},
+		{"EM_MIPS", Const, 0, ""},
+		{"EM_MIPS_RS3_LE", Const, 0, ""},
+		{"EM_MIPS_RS4_BE", Const, 0, ""},
+		{"EM_MIPS_X", Const, 0, ""},
+		{"EM_MMA", Const, 0, ""},
+		{"EM_MMDSP_PLUS", Const, 11, ""},
+		{"EM_MMIX", Const, 11, ""},
+		{"EM_MN10200", Const, 11, ""},
+		{"EM_MN10300", Const, 11, ""},
+		{"EM_MOXIE", Const, 11, ""},
+		{"EM_MSP430", Const, 11, ""},
+		{"EM_NCPU", Const, 0, ""},
+		{"EM_NDR1", Const, 0, ""},
+		{"EM_NDS32", Const, 11, ""},
+		{"EM_NONE", Const, 0, ""},
+		{"EM_NORC", Const, 11, ""},
+		{"EM_NS32K", Const, 11, ""},
+		{"EM_OPEN8", Const, 11, ""},
+		{"EM_OPENRISC", Const, 11, ""},
+		{"EM_PARISC", Const, 0, ""},
+		{"EM_PCP", Const, 0, ""},
+		{"EM_PDP10", Const, 11, ""},
+		{"EM_PDP11", Const, 11, ""},
+		{"EM_PDSP", Const, 11, ""},
+		{"EM_PJ", Const, 11, ""},
+		{"EM_PPC", Const, 0, ""},
+		{"EM_PPC64", Const, 0, ""},
+		{"EM_PRISM", Const, 11, ""},
+		{"EM_QDSP6", Const, 11, ""},
+		{"EM_R32C", Const, 11, ""},
+		{"EM_RCE", Const, 0, ""},
+		{"EM_RH32", Const, 0, ""},
+		{"EM_RISCV", Const, 11, ""},
+		{"EM_RL78", Const, 11, ""},
+		{"EM_RS08", Const, 11, ""},
+		{"EM_RX", Const, 11, ""},
+		{"EM_S370", Const, 0, ""},
+		{"EM_S390", Const, 0, ""},
+		{"EM_SCORE7", Const, 11, ""},
+		{"EM_SEP", Const, 11, ""},
+		{"EM_SE_C17", Const, 11, ""},
+		{"EM_SE_C33", Const, 11, ""},
+		{"EM_SH", Const, 0, ""},
+		{"EM_SHARC", Const, 11, ""},
+		{"EM_SLE9X", Const, 11, ""},
+		{"EM_SNP1K", Const, 11, ""},
+		{"EM_SPARC", Const, 0, ""},
+		{"EM_SPARC32PLUS", Const, 0, ""},
+		{"EM_SPARCV9", Const, 0, ""},
+		{"EM_ST100", Const, 0, ""},
+		{"EM_ST19", Const, 11, ""},
+		{"EM_ST200", Const, 11, ""},
+		{"EM_ST7", Const, 11, ""},
+		{"EM_ST9PLUS", Const, 11, ""},
+		{"EM_STARCORE", Const, 0, ""},
+		{"EM_STM8", Const, 11, ""},
+		{"EM_STXP7X", Const, 11, ""},
+		{"EM_SVX", Const, 11, ""},
+		{"EM_TILE64", Const, 11, ""},
+		{"EM_TILEGX", Const, 11, ""},
+		{"EM_TILEPRO", Const, 11, ""},
+		{"EM_TINYJ", Const, 0, ""},
+		{"EM_TI_ARP32", Const, 11, ""},
+		{"EM_TI_C2000", Const, 11, ""},
+		{"EM_TI_C5500", Const, 11, ""},
+		{"EM_TI_C6000", Const, 11, ""},
+		{"EM_TI_PRU", Const, 11, ""},
+		{"EM_TMM_GPP", Const, 11, ""},
+		{"EM_TPC", Const, 11, ""},
+		{"EM_TRICORE", Const, 0, ""},
+		{"EM_TRIMEDIA", Const, 11, ""},
+		{"EM_TSK3000", Const, 11, ""},
+		{"EM_UNICORE", Const, 11, ""},
+		{"EM_V800", Const, 0, ""},
+		{"EM_V850", Const, 11, ""},
+		{"EM_VAX", Const, 11, ""},
+		{"EM_VIDEOCORE", Const, 11, ""},
+		{"EM_VIDEOCORE3", Const, 11, ""},
+		{"EM_VIDEOCORE5", Const, 11, ""},
+		{"EM_VISIUM", Const, 11, ""},
+		{"EM_VPP500", Const, 0, ""},
+		{"EM_X86_64", Const, 0, ""},
+		{"EM_XCORE", Const, 11, ""},
+		{"EM_XGATE", Const, 11, ""},
+		{"EM_XIMO16", Const, 11, ""},
+		{"EM_XTENSA", Const, 11, ""},
+		{"EM_Z80", Const, 11, ""},
+		{"EM_ZSP", Const, 11, ""},
+		{"ET_CORE", Const, 0, ""},
+		{"ET_DYN", Const, 0, ""},
+		{"ET_EXEC", Const, 0, ""},
+		{"ET_HIOS", Const, 0, ""},
+		{"ET_HIPROC", Const, 0, ""},
+		{"ET_LOOS", Const, 0, ""},
+		{"ET_LOPROC", Const, 0, ""},
+		{"ET_NONE", Const, 0, ""},
+		{"ET_REL", Const, 0, ""},
+		{"EV_CURRENT", Const, 0, ""},
+		{"EV_NONE", Const, 0, ""},
+		{"ErrNoSymbols", Var, 4, ""},
+		{"File", Type, 0, ""},
+		{"File.FileHeader", Field, 0, ""},
+		{"File.Progs", Field, 0, ""},
+		{"File.Sections", Field, 0, ""},
+		{"FileHeader", Type, 0, ""},
+		{"FileHeader.ABIVersion", Field, 0, ""},
+		{"FileHeader.ByteOrder", Field, 0, ""},
+		{"FileHeader.Class", Field, 0, ""},
+		{"FileHeader.Data", Field, 0, ""},
+		{"FileHeader.Entry", Field, 1, ""},
+		{"FileHeader.Machine", Field, 0, ""},
+		{"FileHeader.OSABI", Field, 0, ""},
+		{"FileHeader.Type", Field, 0, ""},
+		{"FileHeader.Version", Field, 0, ""},
+		{"FormatError", Type, 0, ""},
+		{"Header32", Type, 0, ""},
+		{"Header32.Ehsize", Field, 0, ""},
+		{"Header32.Entry", Field, 0, ""},
+		{"Header32.Flags", Field, 0, ""},
+		{"Header32.Ident", Field, 0, ""},
+		{"Header32.Machine", Field, 0, ""},
+		{"Header32.Phentsize", Field, 0, ""},
+		{"Header32.Phnum", Field, 0, ""},
+		{"Header32.Phoff", Field, 0, ""},
+		{"Header32.Shentsize", Field, 0, ""},
+		{"Header32.Shnum", Field, 0, ""},
+		{"Header32.Shoff", Field, 0, ""},
+		{"Header32.Shstrndx", Field, 0, ""},
+		{"Header32.Type", Field, 0, ""},
+		{"Header32.Version", Field, 0, ""},
+		{"Header64", Type, 0, ""},
+		{"Header64.Ehsize", Field, 0, ""},
+		{"Header64.Entry", Field, 0, ""},
+		{"Header64.Flags", Field, 0, ""},
+		{"Header64.Ident", Field, 0, ""},
+		{"Header64.Machine", Field, 0, ""},
+		{"Header64.Phentsize", Field, 0, ""},
+		{"Header64.Phnum", Field, 0, ""},
+		{"Header64.Phoff", Field, 0, ""},
+		{"Header64.Shentsize", Field, 0, ""},
+		{"Header64.Shnum", Field, 0, ""},
+		{"Header64.Shoff", Field, 0, ""},
+		{"Header64.Shstrndx", Field, 0, ""},
+		{"Header64.Type", Field, 0, ""},
+		{"Header64.Version", Field, 0, ""},
+		{"ImportedSymbol", Type, 0, ""},
+		{"ImportedSymbol.Library", Field, 0, ""},
+		{"ImportedSymbol.Name", Field, 0, ""},
+		{"ImportedSymbol.Version", Field, 0, ""},
+		{"Machine", Type, 0, ""},
+		{"NT_FPREGSET", Const, 0, ""},
+		{"NT_PRPSINFO", Const, 0, ""},
+		{"NT_PRSTATUS", Const, 0, ""},
+		{"NType", Type, 0, ""},
+		{"NewFile", Func, 0, "func(r io.ReaderAt) (*File, error)"},
+		{"OSABI", Type, 0, ""},
+		{"Open", Func, 0, "func(name string) (*File, error)"},
+		{"PF_MASKOS", Const, 0, ""},
+		{"PF_MASKPROC", Const, 0, ""},
+		{"PF_R", Const, 0, ""},
+		{"PF_W", Const, 0, ""},
+		{"PF_X", Const, 0, ""},
+		{"PT_AARCH64_ARCHEXT", Const, 16, ""},
+		{"PT_AARCH64_UNWIND", Const, 16, ""},
+		{"PT_ARM_ARCHEXT", Const, 16, ""},
+		{"PT_ARM_EXIDX", Const, 16, ""},
+		{"PT_DYNAMIC", Const, 0, ""},
+		{"PT_GNU_EH_FRAME", Const, 16, ""},
+		{"PT_GNU_MBIND_HI", Const, 16, ""},
+		{"PT_GNU_MBIND_LO", Const, 16, ""},
+		{"PT_GNU_PROPERTY", Const, 16, ""},
+		{"PT_GNU_RELRO", Const, 16, ""},
+		{"PT_GNU_STACK", Const, 16, ""},
+		{"PT_HIOS", Const, 0, ""},
+		{"PT_HIPROC", Const, 0, ""},
+		{"PT_INTERP", Const, 0, ""},
+		{"PT_LOAD", Const, 0, ""},
+		{"PT_LOOS", Const, 0, ""},
+		{"PT_LOPROC", Const, 0, ""},
+		{"PT_MIPS_ABIFLAGS", Const, 16, ""},
+		{"PT_MIPS_OPTIONS", Const, 16, ""},
+		{"PT_MIPS_REGINFO", Const, 16, ""},
+		{"PT_MIPS_RTPROC", Const, 16, ""},
+		{"PT_NOTE", Const, 0, ""},
+		{"PT_NULL", Const, 0, ""},
+		{"PT_OPENBSD_BOOTDATA", Const, 16, ""},
+		{"PT_OPENBSD_NOBTCFI", Const, 23, ""},
+		{"PT_OPENBSD_RANDOMIZE", Const, 16, ""},
+		{"PT_OPENBSD_WXNEEDED", Const, 16, ""},
+		{"PT_PAX_FLAGS", Const, 16, ""},
+		{"PT_PHDR", Const, 0, ""},
+		{"PT_RISCV_ATTRIBUTES", Const, 25, ""},
+		{"PT_S390_PGSTE", Const, 16, ""},
+		{"PT_SHLIB", Const, 0, ""},
+		{"PT_SUNWSTACK", Const, 16, ""},
+		{"PT_SUNW_EH_FRAME", Const, 16, ""},
+		{"PT_TLS", Const, 0, ""},
+		{"Prog", Type, 0, ""},
+		{"Prog.ProgHeader", Field, 0, ""},
+		{"Prog.ReaderAt", Field, 0, ""},
+		{"Prog32", Type, 0, ""},
+		{"Prog32.Align", Field, 0, ""},
+		{"Prog32.Filesz", Field, 0, ""},
+		{"Prog32.Flags", Field, 0, ""},
+		{"Prog32.Memsz", Field, 0, ""},
+		{"Prog32.Off", Field, 0, ""},
+		{"Prog32.Paddr", Field, 0, ""},
+		{"Prog32.Type", Field, 0, ""},
+		{"Prog32.Vaddr", Field, 0, ""},
+		{"Prog64", Type, 0, ""},
+		{"Prog64.Align", Field, 0, ""},
+		{"Prog64.Filesz", Field, 0, ""},
+		{"Prog64.Flags", Field, 0, ""},
+		{"Prog64.Memsz", Field, 0, ""},
+		{"Prog64.Off", Field, 0, ""},
+		{"Prog64.Paddr", Field, 0, ""},
+		{"Prog64.Type", Field, 0, ""},
+		{"Prog64.Vaddr", Field, 0, ""},
+		{"ProgFlag", Type, 0, ""},
+		{"ProgHeader", Type, 0, ""},
+		{"ProgHeader.Align", Field, 0, ""},
+		{"ProgHeader.Filesz", Field, 0, ""},
+		{"ProgHeader.Flags", Field, 0, ""},
+		{"ProgHeader.Memsz", Field, 0, ""},
+		{"ProgHeader.Off", Field, 0, ""},
+		{"ProgHeader.Paddr", Field, 0, ""},
+		{"ProgHeader.Type", Field, 0, ""},
+		{"ProgHeader.Vaddr", Field, 0, ""},
+		{"ProgType", Type, 0, ""},
+		{"R_386", Type, 0, ""},
+		{"R_386_16", Const, 10, ""},
+		{"R_386_32", Const, 0, ""},
+		{"R_386_32PLT", Const, 10, ""},
+		{"R_386_8", Const, 10, ""},
+		{"R_386_COPY", Const, 0, ""},
+		{"R_386_GLOB_DAT", Const, 0, ""},
+		{"R_386_GOT32", Const, 0, ""},
+		{"R_386_GOT32X", Const, 10, ""},
+		{"R_386_GOTOFF", Const, 0, ""},
+		{"R_386_GOTPC", Const, 0, ""},
+		{"R_386_IRELATIVE", Const, 10, ""},
+		{"R_386_JMP_SLOT", Const, 0, ""},
+		{"R_386_NONE", Const, 0, ""},
+		{"R_386_PC16", Const, 10, ""},
+		{"R_386_PC32", Const, 0, ""},
+		{"R_386_PC8", Const, 10, ""},
+		{"R_386_PLT32", Const, 0, ""},
+		{"R_386_RELATIVE", Const, 0, ""},
+		{"R_386_SIZE32", Const, 10, ""},
+		{"R_386_TLS_DESC", Const, 10, ""},
+		{"R_386_TLS_DESC_CALL", Const, 10, ""},
+		{"R_386_TLS_DTPMOD32", Const, 0, ""},
+		{"R_386_TLS_DTPOFF32", Const, 0, ""},
+		{"R_386_TLS_GD", Const, 0, ""},
+		{"R_386_TLS_GD_32", Const, 0, ""},
+		{"R_386_TLS_GD_CALL", Const, 0, ""},
+		{"R_386_TLS_GD_POP", Const, 0, ""},
+		{"R_386_TLS_GD_PUSH", Const, 0, ""},
+		{"R_386_TLS_GOTDESC", Const, 10, ""},
+		{"R_386_TLS_GOTIE", Const, 0, ""},
+		{"R_386_TLS_IE", Const, 0, ""},
+		{"R_386_TLS_IE_32", Const, 0, ""},
+		{"R_386_TLS_LDM", Const, 0, ""},
+		{"R_386_TLS_LDM_32", Const, 0, ""},
+		{"R_386_TLS_LDM_CALL", Const, 0, ""},
+		{"R_386_TLS_LDM_POP", Const, 0, ""},
+		{"R_386_TLS_LDM_PUSH", Const, 0, ""},
+		{"R_386_TLS_LDO_32", Const, 0, ""},
+		{"R_386_TLS_LE", Const, 0, ""},
+		{"R_386_TLS_LE_32", Const, 0, ""},
+		{"R_386_TLS_TPOFF", Const, 0, ""},
+		{"R_386_TLS_TPOFF32", Const, 0, ""},
+		{"R_390", Type, 7, ""},
+		{"R_390_12", Const, 7, ""},
+		{"R_390_16", Const, 7, ""},
+		{"R_390_20", Const, 7, ""},
+		{"R_390_32", Const, 7, ""},
+		{"R_390_64", Const, 7, ""},
+		{"R_390_8", Const, 7, ""},
+		{"R_390_COPY", Const, 7, ""},
+		{"R_390_GLOB_DAT", Const, 7, ""},
+		{"R_390_GOT12", Const, 7, ""},
+		{"R_390_GOT16", Const, 7, ""},
+		{"R_390_GOT20", Const, 7, ""},
+		{"R_390_GOT32", Const, 7, ""},
+		{"R_390_GOT64", Const, 7, ""},
+		{"R_390_GOTENT", Const, 7, ""},
+		{"R_390_GOTOFF", Const, 7, ""},
+		{"R_390_GOTOFF16", Const, 7, ""},
+		{"R_390_GOTOFF64", Const, 7, ""},
+		{"R_390_GOTPC", Const, 7, ""},
+		{"R_390_GOTPCDBL", Const, 7, ""},
+		{"R_390_GOTPLT12", Const, 7, ""},
+		{"R_390_GOTPLT16", Const, 7, ""},
+		{"R_390_GOTPLT20", Const, 7, ""},
+		{"R_390_GOTPLT32", Const, 7, ""},
+		{"R_390_GOTPLT64", Const, 7, ""},
+		{"R_390_GOTPLTENT", Const, 7, ""},
+		{"R_390_GOTPLTOFF16", Const, 7, ""},
+		{"R_390_GOTPLTOFF32", Const, 7, ""},
+		{"R_390_GOTPLTOFF64", Const, 7, ""},
+		{"R_390_JMP_SLOT", Const, 7, ""},
+		{"R_390_NONE", Const, 7, ""},
+		{"R_390_PC16", Const, 7, ""},
+		{"R_390_PC16DBL", Const, 7, ""},
+		{"R_390_PC32", Const, 7, ""},
+		{"R_390_PC32DBL", Const, 7, ""},
+		{"R_390_PC64", Const, 7, ""},
+		{"R_390_PLT16DBL", Const, 7, ""},
+		{"R_390_PLT32", Const, 7, ""},
+		{"R_390_PLT32DBL", Const, 7, ""},
+		{"R_390_PLT64", Const, 7, ""},
+		{"R_390_RELATIVE", Const, 7, ""},
+		{"R_390_TLS_DTPMOD", Const, 7, ""},
+		{"R_390_TLS_DTPOFF", Const, 7, ""},
+		{"R_390_TLS_GD32", Const, 7, ""},
+		{"R_390_TLS_GD64", Const, 7, ""},
+		{"R_390_TLS_GDCALL", Const, 7, ""},
+		{"R_390_TLS_GOTIE12", Const, 7, ""},
+		{"R_390_TLS_GOTIE20", Const, 7, ""},
+		{"R_390_TLS_GOTIE32", Const, 7, ""},
+		{"R_390_TLS_GOTIE64", Const, 7, ""},
+		{"R_390_TLS_IE32", Const, 7, ""},
+		{"R_390_TLS_IE64", Const, 7, ""},
+		{"R_390_TLS_IEENT", Const, 7, ""},
+		{"R_390_TLS_LDCALL", Const, 7, ""},
+		{"R_390_TLS_LDM32", Const, 7, ""},
+		{"R_390_TLS_LDM64", Const, 7, ""},
+		{"R_390_TLS_LDO32", Const, 7, ""},
+		{"R_390_TLS_LDO64", Const, 7, ""},
+		{"R_390_TLS_LE32", Const, 7, ""},
+		{"R_390_TLS_LE64", Const, 7, ""},
+		{"R_390_TLS_LOAD", Const, 7, ""},
+		{"R_390_TLS_TPOFF", Const, 7, ""},
+		{"R_AARCH64", Type, 4, ""},
+		{"R_AARCH64_ABS16", Const, 4, ""},
+		{"R_AARCH64_ABS32", Const, 4, ""},
+		{"R_AARCH64_ABS64", Const, 4, ""},
+		{"R_AARCH64_ADD_ABS_LO12_NC", Const, 4, ""},
+		{"R_AARCH64_ADR_GOT_PAGE", Const, 4, ""},
+		{"R_AARCH64_ADR_PREL_LO21", Const, 4, ""},
+		{"R_AARCH64_ADR_PREL_PG_HI21", Const, 4, ""},
+		{"R_AARCH64_ADR_PREL_PG_HI21_NC", Const, 4, ""},
+		{"R_AARCH64_CALL26", Const, 4, ""},
+		{"R_AARCH64_CONDBR19", Const, 4, ""},
+		{"R_AARCH64_COPY", Const, 4, ""},
+		{"R_AARCH64_GLOB_DAT", Const, 4, ""},
+		{"R_AARCH64_GOT_LD_PREL19", Const, 4, ""},
+		{"R_AARCH64_IRELATIVE", Const, 4, ""},
+		{"R_AARCH64_JUMP26", Const, 4, ""},
+		{"R_AARCH64_JUMP_SLOT", Const, 4, ""},
+		{"R_AARCH64_LD64_GOTOFF_LO15", Const, 10, ""},
+		{"R_AARCH64_LD64_GOTPAGE_LO15", Const, 10, ""},
+		{"R_AARCH64_LD64_GOT_LO12_NC", Const, 4, ""},
+		{"R_AARCH64_LDST128_ABS_LO12_NC", Const, 4, ""},
+		{"R_AARCH64_LDST16_ABS_LO12_NC", Const, 4, ""},
+		{"R_AARCH64_LDST32_ABS_LO12_NC", Const, 4, ""},
+		{"R_AARCH64_LDST64_ABS_LO12_NC", Const, 4, ""},
+		{"R_AARCH64_LDST8_ABS_LO12_NC", Const, 4, ""},
+		{"R_AARCH64_LD_PREL_LO19", Const, 4, ""},
+		{"R_AARCH64_MOVW_SABS_G0", Const, 4, ""},
+		{"R_AARCH64_MOVW_SABS_G1", Const, 4, ""},
+		{"R_AARCH64_MOVW_SABS_G2", Const, 4, ""},
+		{"R_AARCH64_MOVW_UABS_G0", Const, 4, ""},
+		{"R_AARCH64_MOVW_UABS_G0_NC", Const, 4, ""},
+		{"R_AARCH64_MOVW_UABS_G1", Const, 4, ""},
+		{"R_AARCH64_MOVW_UABS_G1_NC", Const, 4, ""},
+		{"R_AARCH64_MOVW_UABS_G2", Const, 4, ""},
+		{"R_AARCH64_MOVW_UABS_G2_NC", Const, 4, ""},
+		{"R_AARCH64_MOVW_UABS_G3", Const, 4, ""},
+		{"R_AARCH64_NONE", Const, 4, ""},
+		{"R_AARCH64_NULL", Const, 4, ""},
+		{"R_AARCH64_P32_ABS16", Const, 4, ""},
+		{"R_AARCH64_P32_ABS32", Const, 4, ""},
+		{"R_AARCH64_P32_ADD_ABS_LO12_NC", Const, 4, ""},
+		{"R_AARCH64_P32_ADR_GOT_PAGE", Const, 4, ""},
+		{"R_AARCH64_P32_ADR_PREL_LO21", Const, 4, ""},
+		{"R_AARCH64_P32_ADR_PREL_PG_HI21", Const, 4, ""},
+		{"R_AARCH64_P32_CALL26", Const, 4, ""},
+		{"R_AARCH64_P32_CONDBR19", Const, 4, ""},
+		{"R_AARCH64_P32_COPY", Const, 4, ""},
+		{"R_AARCH64_P32_GLOB_DAT", Const, 4, ""},
+		{"R_AARCH64_P32_GOT_LD_PREL19", Const, 4, ""},
+		{"R_AARCH64_P32_IRELATIVE", Const, 4, ""},
+		{"R_AARCH64_P32_JUMP26", Const, 4, ""},
+		{"R_AARCH64_P32_JUMP_SLOT", Const, 4, ""},
+		{"R_AARCH64_P32_LD32_GOT_LO12_NC", Const, 4, ""},
+		{"R_AARCH64_P32_LDST128_ABS_LO12_NC", Const, 4, ""},
+		{"R_AARCH64_P32_LDST16_ABS_LO12_NC", Const, 4, ""},
+		{"R_AARCH64_P32_LDST32_ABS_LO12_NC", Const, 4, ""},
+		{"R_AARCH64_P32_LDST64_ABS_LO12_NC", Const, 4, ""},
+		{"R_AARCH64_P32_LDST8_ABS_LO12_NC", Const, 4, ""},
+		{"R_AARCH64_P32_LD_PREL_LO19", Const, 4, ""},
+		{"R_AARCH64_P32_MOVW_SABS_G0", Const, 4, ""},
+		{"R_AARCH64_P32_MOVW_UABS_G0", Const, 4, ""},
+		{"R_AARCH64_P32_MOVW_UABS_G0_NC", Const, 4, ""},
+		{"R_AARCH64_P32_MOVW_UABS_G1", Const, 4, ""},
+		{"R_AARCH64_P32_PREL16", Const, 4, ""},
+		{"R_AARCH64_P32_PREL32", Const, 4, ""},
+		{"R_AARCH64_P32_RELATIVE", Const, 4, ""},
+		{"R_AARCH64_P32_TLSDESC", Const, 4, ""},
+		{"R_AARCH64_P32_TLSDESC_ADD_LO12_NC", Const, 4, ""},
+		{"R_AARCH64_P32_TLSDESC_ADR_PAGE21", Const, 4, ""},
+		{"R_AARCH64_P32_TLSDESC_ADR_PREL21", Const, 4, ""},
+		{"R_AARCH64_P32_TLSDESC_CALL", Const, 4, ""},
+		{"R_AARCH64_P32_TLSDESC_LD32_LO12_NC", Const, 4, ""},
+		{"R_AARCH64_P32_TLSDESC_LD_PREL19", Const, 4, ""},
+		{"R_AARCH64_P32_TLSGD_ADD_LO12_NC", Const, 4, ""},
+		{"R_AARCH64_P32_TLSGD_ADR_PAGE21", Const, 4, ""},
+		{"R_AARCH64_P32_TLSIE_ADR_GOTTPREL_PAGE21", Const, 4, ""},
+		{"R_AARCH64_P32_TLSIE_LD32_GOTTPREL_LO12_NC", Const, 4, ""},
+		{"R_AARCH64_P32_TLSIE_LD_GOTTPREL_PREL19", Const, 4, ""},
+		{"R_AARCH64_P32_TLSLE_ADD_TPREL_HI12", Const, 4, ""},
+		{"R_AARCH64_P32_TLSLE_ADD_TPREL_LO12", Const, 4, ""},
+		{"R_AARCH64_P32_TLSLE_ADD_TPREL_LO12_NC", Const, 4, ""},
+		{"R_AARCH64_P32_TLSLE_MOVW_TPREL_G0", Const, 4, ""},
+		{"R_AARCH64_P32_TLSLE_MOVW_TPREL_G0_NC", Const, 4, ""},
+		{"R_AARCH64_P32_TLSLE_MOVW_TPREL_G1", Const, 4, ""},
+		{"R_AARCH64_P32_TLS_DTPMOD", Const, 4, ""},
+		{"R_AARCH64_P32_TLS_DTPREL", Const, 4, ""},
+		{"R_AARCH64_P32_TLS_TPREL", Const, 4, ""},
+		{"R_AARCH64_P32_TSTBR14", Const, 4, ""},
+		{"R_AARCH64_PREL16", Const, 4, ""},
+		{"R_AARCH64_PREL32", Const, 4, ""},
+		{"R_AARCH64_PREL64", Const, 4, ""},
+		{"R_AARCH64_RELATIVE", Const, 4, ""},
+		{"R_AARCH64_TLSDESC", Const, 4, ""},
+		{"R_AARCH64_TLSDESC_ADD", Const, 4, ""},
+		{"R_AARCH64_TLSDESC_ADD_LO12_NC", Const, 4, ""},
+		{"R_AARCH64_TLSDESC_ADR_PAGE21", Const, 4, ""},
+		{"R_AARCH64_TLSDESC_ADR_PREL21", Const, 4, ""},
+		{"R_AARCH64_TLSDESC_CALL", Const, 4, ""},
+		{"R_AARCH64_TLSDESC_LD64_LO12_NC", Const, 4, ""},
+		{"R_AARCH64_TLSDESC_LDR", Const, 4, ""},
+		{"R_AARCH64_TLSDESC_LD_PREL19", Const, 4, ""},
+		{"R_AARCH64_TLSDESC_OFF_G0_NC", Const, 4, ""},
+		{"R_AARCH64_TLSDESC_OFF_G1", Const, 4, ""},
+		{"R_AARCH64_TLSGD_ADD_LO12_NC", Const, 4, ""},
+		{"R_AARCH64_TLSGD_ADR_PAGE21", Const, 4, ""},
+		{"R_AARCH64_TLSGD_ADR_PREL21", Const, 10, ""},
+		{"R_AARCH64_TLSGD_MOVW_G0_NC", Const, 10, ""},
+		{"R_AARCH64_TLSGD_MOVW_G1", Const, 10, ""},
+		{"R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21", Const, 4, ""},
+		{"R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC", Const, 4, ""},
+		{"R_AARCH64_TLSIE_LD_GOTTPREL_PREL19", Const, 4, ""},
+		{"R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC", Const, 4, ""},
+		{"R_AARCH64_TLSIE_MOVW_GOTTPREL_G1", Const, 4, ""},
+		{"R_AARCH64_TLSLD_ADR_PAGE21", Const, 10, ""},
+		{"R_AARCH64_TLSLD_ADR_PREL21", Const, 10, ""},
+		{"R_AARCH64_TLSLD_LDST128_DTPREL_LO12", Const, 10, ""},
+		{"R_AARCH64_TLSLD_LDST128_DTPREL_LO12_NC", Const, 10, ""},
+		{"R_AARCH64_TLSLE_ADD_TPREL_HI12", Const, 4, ""},
+		{"R_AARCH64_TLSLE_ADD_TPREL_LO12", Const, 4, ""},
+		{"R_AARCH64_TLSLE_ADD_TPREL_LO12_NC", Const, 4, ""},
+		{"R_AARCH64_TLSLE_LDST128_TPREL_LO12", Const, 10, ""},
+		{"R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC", Const, 10, ""},
+		{"R_AARCH64_TLSLE_MOVW_TPREL_G0", Const, 4, ""},
+		{"R_AARCH64_TLSLE_MOVW_TPREL_G0_NC", Const, 4, ""},
+		{"R_AARCH64_TLSLE_MOVW_TPREL_G1", Const, 4, ""},
+		{"R_AARCH64_TLSLE_MOVW_TPREL_G1_NC", Const, 4, ""},
+		{"R_AARCH64_TLSLE_MOVW_TPREL_G2", Const, 4, ""},
+		{"R_AARCH64_TLS_DTPMOD64", Const, 4, ""},
+		{"R_AARCH64_TLS_DTPREL64", Const, 4, ""},
+		{"R_AARCH64_TLS_TPREL64", Const, 4, ""},
+		{"R_AARCH64_TSTBR14", Const, 4, ""},
+		{"R_ALPHA", Type, 0, ""},
+		{"R_ALPHA_BRADDR", Const, 0, ""},
+		{"R_ALPHA_COPY", Const, 0, ""},
+		{"R_ALPHA_GLOB_DAT", Const, 0, ""},
+		{"R_ALPHA_GPDISP", Const, 0, ""},
+		{"R_ALPHA_GPREL32", Const, 0, ""},
+		{"R_ALPHA_GPRELHIGH", Const, 0, ""},
+		{"R_ALPHA_GPRELLOW", Const, 0, ""},
+		{"R_ALPHA_GPVALUE", Const, 0, ""},
+		{"R_ALPHA_HINT", Const, 0, ""},
+		{"R_ALPHA_IMMED_BR_HI32", Const, 0, ""},
+		{"R_ALPHA_IMMED_GP_16", Const, 0, ""},
+		{"R_ALPHA_IMMED_GP_HI32", Const, 0, ""},
+		{"R_ALPHA_IMMED_LO32", Const, 0, ""},
+		{"R_ALPHA_IMMED_SCN_HI32", Const, 0, ""},
+		{"R_ALPHA_JMP_SLOT", Const, 0, ""},
+		{"R_ALPHA_LITERAL", Const, 0, ""},
+		{"R_ALPHA_LITUSE", Const, 0, ""},
+		{"R_ALPHA_NONE", Const, 0, ""},
+		{"R_ALPHA_OP_PRSHIFT", Const, 0, ""},
+		{"R_ALPHA_OP_PSUB", Const, 0, ""},
+		{"R_ALPHA_OP_PUSH", Const, 0, ""},
+		{"R_ALPHA_OP_STORE", Const, 0, ""},
+		{"R_ALPHA_REFLONG", Const, 0, ""},
+		{"R_ALPHA_REFQUAD", Const, 0, ""},
+		{"R_ALPHA_RELATIVE", Const, 0, ""},
+		{"R_ALPHA_SREL16", Const, 0, ""},
+		{"R_ALPHA_SREL32", Const, 0, ""},
+		{"R_ALPHA_SREL64", Const, 0, ""},
+		{"R_ARM", Type, 0, ""},
+		{"R_ARM_ABS12", Const, 0, ""},
+		{"R_ARM_ABS16", Const, 0, ""},
+		{"R_ARM_ABS32", Const, 0, ""},
+		{"R_ARM_ABS32_NOI", Const, 10, ""},
+		{"R_ARM_ABS8", Const, 0, ""},
+		{"R_ARM_ALU_PCREL_15_8", Const, 10, ""},
+		{"R_ARM_ALU_PCREL_23_15", Const, 10, ""},
+		{"R_ARM_ALU_PCREL_7_0", Const, 10, ""},
+		{"R_ARM_ALU_PC_G0", Const, 10, ""},
+		{"R_ARM_ALU_PC_G0_NC", Const, 10, ""},
+		{"R_ARM_ALU_PC_G1", Const, 10, ""},
+		{"R_ARM_ALU_PC_G1_NC", Const, 10, ""},
+		{"R_ARM_ALU_PC_G2", Const, 10, ""},
+		{"R_ARM_ALU_SBREL_19_12_NC", Const, 10, ""},
+		{"R_ARM_ALU_SBREL_27_20_CK", Const, 10, ""},
+		{"R_ARM_ALU_SB_G0", Const, 10, ""},
+		{"R_ARM_ALU_SB_G0_NC", Const, 10, ""},
+		{"R_ARM_ALU_SB_G1", Const, 10, ""},
+		{"R_ARM_ALU_SB_G1_NC", Const, 10, ""},
+		{"R_ARM_ALU_SB_G2", Const, 10, ""},
+		{"R_ARM_AMP_VCALL9", Const, 0, ""},
+		{"R_ARM_BASE_ABS", Const, 10, ""},
+		{"R_ARM_CALL", Const, 10, ""},
+		{"R_ARM_COPY", Const, 0, ""},
+		{"R_ARM_GLOB_DAT", Const, 0, ""},
+		{"R_ARM_GNU_VTENTRY", Const, 0, ""},
+		{"R_ARM_GNU_VTINHERIT", Const, 0, ""},
+		{"R_ARM_GOT32", Const, 0, ""},
+		{"R_ARM_GOTOFF", Const, 0, ""},
+		{"R_ARM_GOTOFF12", Const, 10, ""},
+		{"R_ARM_GOTPC", Const, 0, ""},
+		{"R_ARM_GOTRELAX", Const, 10, ""},
+		{"R_ARM_GOT_ABS", Const, 10, ""},
+		{"R_ARM_GOT_BREL12", Const, 10, ""},
+		{"R_ARM_GOT_PREL", Const, 10, ""},
+		{"R_ARM_IRELATIVE", Const, 10, ""},
+		{"R_ARM_JUMP24", Const, 10, ""},
+		{"R_ARM_JUMP_SLOT", Const, 0, ""},
+		{"R_ARM_LDC_PC_G0", Const, 10, ""},
+		{"R_ARM_LDC_PC_G1", Const, 10, ""},
+		{"R_ARM_LDC_PC_G2", Const, 10, ""},
+		{"R_ARM_LDC_SB_G0", Const, 10, ""},
+		{"R_ARM_LDC_SB_G1", Const, 10, ""},
+		{"R_ARM_LDC_SB_G2", Const, 10, ""},
+		{"R_ARM_LDRS_PC_G0", Const, 10, ""},
+		{"R_ARM_LDRS_PC_G1", Const, 10, ""},
+		{"R_ARM_LDRS_PC_G2", Const, 10, ""},
+		{"R_ARM_LDRS_SB_G0", Const, 10, ""},
+		{"R_ARM_LDRS_SB_G1", Const, 10, ""},
+		{"R_ARM_LDRS_SB_G2", Const, 10, ""},
+		{"R_ARM_LDR_PC_G1", Const, 10, ""},
+		{"R_ARM_LDR_PC_G2", Const, 10, ""},
+		{"R_ARM_LDR_SBREL_11_10_NC", Const, 10, ""},
+		{"R_ARM_LDR_SB_G0", Const, 10, ""},
+		{"R_ARM_LDR_SB_G1", Const, 10, ""},
+		{"R_ARM_LDR_SB_G2", Const, 10, ""},
+		{"R_ARM_ME_TOO", Const, 10, ""},
+		{"R_ARM_MOVT_ABS", Const, 10, ""},
+		{"R_ARM_MOVT_BREL", Const, 10, ""},
+		{"R_ARM_MOVT_PREL", Const, 10, ""},
+		{"R_ARM_MOVW_ABS_NC", Const, 10, ""},
+		{"R_ARM_MOVW_BREL", Const, 10, ""},
+		{"R_ARM_MOVW_BREL_NC", Const, 10, ""},
+		{"R_ARM_MOVW_PREL_NC", Const, 10, ""},
+		{"R_ARM_NONE", Const, 0, ""},
+		{"R_ARM_PC13", Const, 0, ""},
+		{"R_ARM_PC24", Const, 0, ""},
+		{"R_ARM_PLT32", Const, 0, ""},
+		{"R_ARM_PLT32_ABS", Const, 10, ""},
+		{"R_ARM_PREL31", Const, 10, ""},
+		{"R_ARM_PRIVATE_0", Const, 10, ""},
+		{"R_ARM_PRIVATE_1", Const, 10, ""},
+		{"R_ARM_PRIVATE_10", Const, 10, ""},
+		{"R_ARM_PRIVATE_11", Const, 10, ""},
+		{"R_ARM_PRIVATE_12", Const, 10, ""},
+		{"R_ARM_PRIVATE_13", Const, 10, ""},
+		{"R_ARM_PRIVATE_14", Const, 10, ""},
+		{"R_ARM_PRIVATE_15", Const, 10, ""},
+		{"R_ARM_PRIVATE_2", Const, 10, ""},
+		{"R_ARM_PRIVATE_3", Const, 10, ""},
+		{"R_ARM_PRIVATE_4", Const, 10, ""},
+		{"R_ARM_PRIVATE_5", Const, 10, ""},
+		{"R_ARM_PRIVATE_6", Const, 10, ""},
+		{"R_ARM_PRIVATE_7", Const, 10, ""},
+		{"R_ARM_PRIVATE_8", Const, 10, ""},
+		{"R_ARM_PRIVATE_9", Const, 10, ""},
+		{"R_ARM_RABS32", Const, 0, ""},
+		{"R_ARM_RBASE", Const, 0, ""},
+		{"R_ARM_REL32", Const, 0, ""},
+		{"R_ARM_REL32_NOI", Const, 10, ""},
+		{"R_ARM_RELATIVE", Const, 0, ""},
+		{"R_ARM_RPC24", Const, 0, ""},
+		{"R_ARM_RREL32", Const, 0, ""},
+		{"R_ARM_RSBREL32", Const, 0, ""},
+		{"R_ARM_RXPC25", Const, 10, ""},
+		{"R_ARM_SBREL31", Const, 10, ""},
+		{"R_ARM_SBREL32", Const, 0, ""},
+		{"R_ARM_SWI24", Const, 0, ""},
+		{"R_ARM_TARGET1", Const, 10, ""},
+		{"R_ARM_TARGET2", Const, 10, ""},
+		{"R_ARM_THM_ABS5", Const, 0, ""},
+		{"R_ARM_THM_ALU_ABS_G0_NC", Const, 10, ""},
+		{"R_ARM_THM_ALU_ABS_G1_NC", Const, 10, ""},
+		{"R_ARM_THM_ALU_ABS_G2_NC", Const, 10, ""},
+		{"R_ARM_THM_ALU_ABS_G3", Const, 10, ""},
+		{"R_ARM_THM_ALU_PREL_11_0", Const, 10, ""},
+		{"R_ARM_THM_GOT_BREL12", Const, 10, ""},
+		{"R_ARM_THM_JUMP11", Const, 10, ""},
+		{"R_ARM_THM_JUMP19", Const, 10, ""},
+		{"R_ARM_THM_JUMP24", Const, 10, ""},
+		{"R_ARM_THM_JUMP6", Const, 10, ""},
+		{"R_ARM_THM_JUMP8", Const, 10, ""},
+		{"R_ARM_THM_MOVT_ABS", Const, 10, ""},
+		{"R_ARM_THM_MOVT_BREL", Const, 10, ""},
+		{"R_ARM_THM_MOVT_PREL", Const, 10, ""},
+		{"R_ARM_THM_MOVW_ABS_NC", Const, 10, ""},
+		{"R_ARM_THM_MOVW_BREL", Const, 10, ""},
+		{"R_ARM_THM_MOVW_BREL_NC", Const, 10, ""},
+		{"R_ARM_THM_MOVW_PREL_NC", Const, 10, ""},
+		{"R_ARM_THM_PC12", Const, 10, ""},
+		{"R_ARM_THM_PC22", Const, 0, ""},
+		{"R_ARM_THM_PC8", Const, 0, ""},
+		{"R_ARM_THM_RPC22", Const, 0, ""},
+		{"R_ARM_THM_SWI8", Const, 0, ""},
+		{"R_ARM_THM_TLS_CALL", Const, 10, ""},
+		{"R_ARM_THM_TLS_DESCSEQ16", Const, 10, ""},
+		{"R_ARM_THM_TLS_DESCSEQ32", Const, 10, ""},
+		{"R_ARM_THM_XPC22", Const, 0, ""},
+		{"R_ARM_TLS_CALL", Const, 10, ""},
+		{"R_ARM_TLS_DESCSEQ", Const, 10, ""},
+		{"R_ARM_TLS_DTPMOD32", Const, 10, ""},
+		{"R_ARM_TLS_DTPOFF32", Const, 10, ""},
+		{"R_ARM_TLS_GD32", Const, 10, ""},
+		{"R_ARM_TLS_GOTDESC", Const, 10, ""},
+		{"R_ARM_TLS_IE12GP", Const, 10, ""},
+		{"R_ARM_TLS_IE32", Const, 10, ""},
+		{"R_ARM_TLS_LDM32", Const, 10, ""},
+		{"R_ARM_TLS_LDO12", Const, 10, ""},
+		{"R_ARM_TLS_LDO32", Const, 10, ""},
+		{"R_ARM_TLS_LE12", Const, 10, ""},
+		{"R_ARM_TLS_LE32", Const, 10, ""},
+		{"R_ARM_TLS_TPOFF32", Const, 10, ""},
+		{"R_ARM_V4BX", Const, 10, ""},
+		{"R_ARM_XPC25", Const, 0, ""},
+		{"R_INFO", Func, 0, "func(sym uint32, typ uint32) uint64"},
+		{"R_INFO32", Func, 0, "func(sym uint32, typ uint32) uint32"},
+		{"R_LARCH", Type, 19, ""},
+		{"R_LARCH_32", Const, 19, ""},
+		{"R_LARCH_32_PCREL", Const, 20, ""},
+		{"R_LARCH_64", Const, 19, ""},
+		{"R_LARCH_64_PCREL", Const, 22, ""},
+		{"R_LARCH_ABS64_HI12", Const, 20, ""},
+		{"R_LARCH_ABS64_LO20", Const, 20, ""},
+		{"R_LARCH_ABS_HI20", Const, 20, ""},
+		{"R_LARCH_ABS_LO12", Const, 20, ""},
+		{"R_LARCH_ADD16", Const, 19, ""},
+		{"R_LARCH_ADD24", Const, 19, ""},
+		{"R_LARCH_ADD32", Const, 19, ""},
+		{"R_LARCH_ADD6", Const, 22, ""},
+		{"R_LARCH_ADD64", Const, 19, ""},
+		{"R_LARCH_ADD8", Const, 19, ""},
+		{"R_LARCH_ADD_ULEB128", Const, 22, ""},
+		{"R_LARCH_ALIGN", Const, 22, ""},
+		{"R_LARCH_B16", Const, 20, ""},
+		{"R_LARCH_B21", Const, 20, ""},
+		{"R_LARCH_B26", Const, 20, ""},
+		{"R_LARCH_CFA", Const, 22, ""},
+		{"R_LARCH_COPY", Const, 19, ""},
+		{"R_LARCH_DELETE", Const, 22, ""},
+		{"R_LARCH_GNU_VTENTRY", Const, 20, ""},
+		{"R_LARCH_GNU_VTINHERIT", Const, 20, ""},
+		{"R_LARCH_GOT64_HI12", Const, 20, ""},
+		{"R_LARCH_GOT64_LO20", Const, 20, ""},
+		{"R_LARCH_GOT64_PC_HI12", Const, 20, ""},
+		{"R_LARCH_GOT64_PC_LO20", Const, 20, ""},
+		{"R_LARCH_GOT_HI20", Const, 20, ""},
+		{"R_LARCH_GOT_LO12", Const, 20, ""},
+		{"R_LARCH_GOT_PC_HI20", Const, 20, ""},
+		{"R_LARCH_GOT_PC_LO12", Const, 20, ""},
+		{"R_LARCH_IRELATIVE", Const, 19, ""},
+		{"R_LARCH_JUMP_SLOT", Const, 19, ""},
+		{"R_LARCH_MARK_LA", Const, 19, ""},
+		{"R_LARCH_MARK_PCREL", Const, 19, ""},
+		{"R_LARCH_NONE", Const, 19, ""},
+		{"R_LARCH_PCALA64_HI12", Const, 20, ""},
+		{"R_LARCH_PCALA64_LO20", Const, 20, ""},
+		{"R_LARCH_PCALA_HI20", Const, 20, ""},
+		{"R_LARCH_PCALA_LO12", Const, 20, ""},
+		{"R_LARCH_PCREL20_S2", Const, 22, ""},
+		{"R_LARCH_RELATIVE", Const, 19, ""},
+		{"R_LARCH_RELAX", Const, 20, ""},
+		{"R_LARCH_SOP_ADD", Const, 19, ""},
+		{"R_LARCH_SOP_AND", Const, 19, ""},
+		{"R_LARCH_SOP_ASSERT", Const, 19, ""},
+		{"R_LARCH_SOP_IF_ELSE", Const, 19, ""},
+		{"R_LARCH_SOP_NOT", Const, 19, ""},
+		{"R_LARCH_SOP_POP_32_S_0_10_10_16_S2", Const, 19, ""},
+		{"R_LARCH_SOP_POP_32_S_0_5_10_16_S2", Const, 19, ""},
+		{"R_LARCH_SOP_POP_32_S_10_12", Const, 19, ""},
+		{"R_LARCH_SOP_POP_32_S_10_16", Const, 19, ""},
+		{"R_LARCH_SOP_POP_32_S_10_16_S2", Const, 19, ""},
+		{"R_LARCH_SOP_POP_32_S_10_5", Const, 19, ""},
+		{"R_LARCH_SOP_POP_32_S_5_20", Const, 19, ""},
+		{"R_LARCH_SOP_POP_32_U", Const, 19, ""},
+		{"R_LARCH_SOP_POP_32_U_10_12", Const, 19, ""},
+		{"R_LARCH_SOP_PUSH_ABSOLUTE", Const, 19, ""},
+		{"R_LARCH_SOP_PUSH_DUP", Const, 19, ""},
+		{"R_LARCH_SOP_PUSH_GPREL", Const, 19, ""},
+		{"R_LARCH_SOP_PUSH_PCREL", Const, 19, ""},
+		{"R_LARCH_SOP_PUSH_PLT_PCREL", Const, 19, ""},
+		{"R_LARCH_SOP_PUSH_TLS_GD", Const, 19, ""},
+		{"R_LARCH_SOP_PUSH_TLS_GOT", Const, 19, ""},
+		{"R_LARCH_SOP_PUSH_TLS_TPREL", Const, 19, ""},
+		{"R_LARCH_SOP_SL", Const, 19, ""},
+		{"R_LARCH_SOP_SR", Const, 19, ""},
+		{"R_LARCH_SOP_SUB", Const, 19, ""},
+		{"R_LARCH_SUB16", Const, 19, ""},
+		{"R_LARCH_SUB24", Const, 19, ""},
+		{"R_LARCH_SUB32", Const, 19, ""},
+		{"R_LARCH_SUB6", Const, 22, ""},
+		{"R_LARCH_SUB64", Const, 19, ""},
+		{"R_LARCH_SUB8", Const, 19, ""},
+		{"R_LARCH_SUB_ULEB128", Const, 22, ""},
+		{"R_LARCH_TLS_DTPMOD32", Const, 19, ""},
+		{"R_LARCH_TLS_DTPMOD64", Const, 19, ""},
+		{"R_LARCH_TLS_DTPREL32", Const, 19, ""},
+		{"R_LARCH_TLS_DTPREL64", Const, 19, ""},
+		{"R_LARCH_TLS_GD_HI20", Const, 20, ""},
+		{"R_LARCH_TLS_GD_PC_HI20", Const, 20, ""},
+		{"R_LARCH_TLS_IE64_HI12", Const, 20, ""},
+		{"R_LARCH_TLS_IE64_LO20", Const, 20, ""},
+		{"R_LARCH_TLS_IE64_PC_HI12", Const, 20, ""},
+		{"R_LARCH_TLS_IE64_PC_LO20", Const, 20, ""},
+		{"R_LARCH_TLS_IE_HI20", Const, 20, ""},
+		{"R_LARCH_TLS_IE_LO12", Const, 20, ""},
+		{"R_LARCH_TLS_IE_PC_HI20", Const, 20, ""},
+		{"R_LARCH_TLS_IE_PC_LO12", Const, 20, ""},
+		{"R_LARCH_TLS_LD_HI20", Const, 20, ""},
+		{"R_LARCH_TLS_LD_PC_HI20", Const, 20, ""},
+		{"R_LARCH_TLS_LE64_HI12", Const, 20, ""},
+		{"R_LARCH_TLS_LE64_LO20", Const, 20, ""},
+		{"R_LARCH_TLS_LE_HI20", Const, 20, ""},
+		{"R_LARCH_TLS_LE_LO12", Const, 20, ""},
+		{"R_LARCH_TLS_TPREL32", Const, 19, ""},
+		{"R_LARCH_TLS_TPREL64", Const, 19, ""},
+		{"R_MIPS", Type, 6, ""},
+		{"R_MIPS_16", Const, 6, ""},
+		{"R_MIPS_26", Const, 6, ""},
+		{"R_MIPS_32", Const, 6, ""},
+		{"R_MIPS_64", Const, 6, ""},
+		{"R_MIPS_ADD_IMMEDIATE", Const, 6, ""},
+		{"R_MIPS_CALL16", Const, 6, ""},
+		{"R_MIPS_CALL_HI16", Const, 6, ""},
+		{"R_MIPS_CALL_LO16", Const, 6, ""},
+		{"R_MIPS_DELETE", Const, 6, ""},
+		{"R_MIPS_GOT16", Const, 6, ""},
+		{"R_MIPS_GOT_DISP", Const, 6, ""},
+		{"R_MIPS_GOT_HI16", Const, 6, ""},
+		{"R_MIPS_GOT_LO16", Const, 6, ""},
+		{"R_MIPS_GOT_OFST", Const, 6, ""},
+		{"R_MIPS_GOT_PAGE", Const, 6, ""},
+		{"R_MIPS_GPREL16", Const, 6, ""},
+		{"R_MIPS_GPREL32", Const, 6, ""},
+		{"R_MIPS_HI16", Const, 6, ""},
+		{"R_MIPS_HIGHER", Const, 6, ""},
+		{"R_MIPS_HIGHEST", Const, 6, ""},
+		{"R_MIPS_INSERT_A", Const, 6, ""},
+		{"R_MIPS_INSERT_B", Const, 6, ""},
+		{"R_MIPS_JALR", Const, 6, ""},
+		{"R_MIPS_LITERAL", Const, 6, ""},
+		{"R_MIPS_LO16", Const, 6, ""},
+		{"R_MIPS_NONE", Const, 6, ""},
+		{"R_MIPS_PC16", Const, 6, ""},
+		{"R_MIPS_PC32", Const, 22, ""},
+		{"R_MIPS_PJUMP", Const, 6, ""},
+		{"R_MIPS_REL16", Const, 6, ""},
+		{"R_MIPS_REL32", Const, 6, ""},
+		{"R_MIPS_RELGOT", Const, 6, ""},
+		{"R_MIPS_SCN_DISP", Const, 6, ""},
+		{"R_MIPS_SHIFT5", Const, 6, ""},
+		{"R_MIPS_SHIFT6", Const, 6, ""},
+		{"R_MIPS_SUB", Const, 6, ""},
+		{"R_MIPS_TLS_DTPMOD32", Const, 6, ""},
+		{"R_MIPS_TLS_DTPMOD64", Const, 6, ""},
+		{"R_MIPS_TLS_DTPREL32", Const, 6, ""},
+		{"R_MIPS_TLS_DTPREL64", Const, 6, ""},
+		{"R_MIPS_TLS_DTPREL_HI16", Const, 6, ""},
+		{"R_MIPS_TLS_DTPREL_LO16", Const, 6, ""},
+		{"R_MIPS_TLS_GD", Const, 6, ""},
+		{"R_MIPS_TLS_GOTTPREL", Const, 6, ""},
+		{"R_MIPS_TLS_LDM", Const, 6, ""},
+		{"R_MIPS_TLS_TPREL32", Const, 6, ""},
+		{"R_MIPS_TLS_TPREL64", Const, 6, ""},
+		{"R_MIPS_TLS_TPREL_HI16", Const, 6, ""},
+		{"R_MIPS_TLS_TPREL_LO16", Const, 6, ""},
+		{"R_PPC", Type, 0, ""},
+		{"R_PPC64", Type, 5, ""},
+		{"R_PPC64_ADDR14", Const, 5, ""},
+		{"R_PPC64_ADDR14_BRNTAKEN", Const, 5, ""},
+		{"R_PPC64_ADDR14_BRTAKEN", Const, 5, ""},
+		{"R_PPC64_ADDR16", Const, 5, ""},
+		{"R_PPC64_ADDR16_DS", Const, 5, ""},
+		{"R_PPC64_ADDR16_HA", Const, 5, ""},
+		{"R_PPC64_ADDR16_HI", Const, 5, ""},
+		{"R_PPC64_ADDR16_HIGH", Const, 10, ""},
+		{"R_PPC64_ADDR16_HIGHA", Const, 10, ""},
+		{"R_PPC64_ADDR16_HIGHER", Const, 5, ""},
+		{"R_PPC64_ADDR16_HIGHER34", Const, 20, ""},
+		{"R_PPC64_ADDR16_HIGHERA", Const, 5, ""},
+		{"R_PPC64_ADDR16_HIGHERA34", Const, 20, ""},
+		{"R_PPC64_ADDR16_HIGHEST", Const, 5, ""},
+		{"R_PPC64_ADDR16_HIGHEST34", Const, 20, ""},
+		{"R_PPC64_ADDR16_HIGHESTA", Const, 5, ""},
+		{"R_PPC64_ADDR16_HIGHESTA34", Const, 20, ""},
+		{"R_PPC64_ADDR16_LO", Const, 5, ""},
+		{"R_PPC64_ADDR16_LO_DS", Const, 5, ""},
+		{"R_PPC64_ADDR24", Const, 5, ""},
+		{"R_PPC64_ADDR32", Const, 5, ""},
+		{"R_PPC64_ADDR64", Const, 5, ""},
+		{"R_PPC64_ADDR64_LOCAL", Const, 10, ""},
+		{"R_PPC64_COPY", Const, 20, ""},
+		{"R_PPC64_D28", Const, 20, ""},
+		{"R_PPC64_D34", Const, 20, ""},
+		{"R_PPC64_D34_HA30", Const, 20, ""},
+		{"R_PPC64_D34_HI30", Const, 20, ""},
+		{"R_PPC64_D34_LO", Const, 20, ""},
+		{"R_PPC64_DTPMOD64", Const, 5, ""},
+		{"R_PPC64_DTPREL16", Const, 5, ""},
+		{"R_PPC64_DTPREL16_DS", Const, 5, ""},
+		{"R_PPC64_DTPREL16_HA", Const, 5, ""},
+		{"R_PPC64_DTPREL16_HI", Const, 5, ""},
+		{"R_PPC64_DTPREL16_HIGH", Const, 10, ""},
+		{"R_PPC64_DTPREL16_HIGHA", Const, 10, ""},
+		{"R_PPC64_DTPREL16_HIGHER", Const, 5, ""},
+		{"R_PPC64_DTPREL16_HIGHERA", Const, 5, ""},
+		{"R_PPC64_DTPREL16_HIGHEST", Const, 5, ""},
+		{"R_PPC64_DTPREL16_HIGHESTA", Const, 5, ""},
+		{"R_PPC64_DTPREL16_LO", Const, 5, ""},
+		{"R_PPC64_DTPREL16_LO_DS", Const, 5, ""},
+		{"R_PPC64_DTPREL34", Const, 20, ""},
+		{"R_PPC64_DTPREL64", Const, 5, ""},
+		{"R_PPC64_ENTRY", Const, 10, ""},
+		{"R_PPC64_GLOB_DAT", Const, 20, ""},
+		{"R_PPC64_GNU_VTENTRY", Const, 20, ""},
+		{"R_PPC64_GNU_VTINHERIT", Const, 20, ""},
+		{"R_PPC64_GOT16", Const, 5, ""},
+		{"R_PPC64_GOT16_DS", Const, 5, ""},
+		{"R_PPC64_GOT16_HA", Const, 5, ""},
+		{"R_PPC64_GOT16_HI", Const, 5, ""},
+		{"R_PPC64_GOT16_LO", Const, 5, ""},
+		{"R_PPC64_GOT16_LO_DS", Const, 5, ""},
+		{"R_PPC64_GOT_DTPREL16_DS", Const, 5, ""},
+		{"R_PPC64_GOT_DTPREL16_HA", Const, 5, ""},
+		{"R_PPC64_GOT_DTPREL16_HI", Const, 5, ""},
+		{"R_PPC64_GOT_DTPREL16_LO_DS", Const, 5, ""},
+		{"R_PPC64_GOT_DTPREL_PCREL34", Const, 20, ""},
+		{"R_PPC64_GOT_PCREL34", Const, 20, ""},
+		{"R_PPC64_GOT_TLSGD16", Const, 5, ""},
+		{"R_PPC64_GOT_TLSGD16_HA", Const, 5, ""},
+		{"R_PPC64_GOT_TLSGD16_HI", Const, 5, ""},
+		{"R_PPC64_GOT_TLSGD16_LO", Const, 5, ""},
+		{"R_PPC64_GOT_TLSGD_PCREL34", Const, 20, ""},
+		{"R_PPC64_GOT_TLSLD16", Const, 5, ""},
+		{"R_PPC64_GOT_TLSLD16_HA", Const, 5, ""},
+		{"R_PPC64_GOT_TLSLD16_HI", Const, 5, ""},
+		{"R_PPC64_GOT_TLSLD16_LO", Const, 5, ""},
+		{"R_PPC64_GOT_TLSLD_PCREL34", Const, 20, ""},
+		{"R_PPC64_GOT_TPREL16_DS", Const, 5, ""},
+		{"R_PPC64_GOT_TPREL16_HA", Const, 5, ""},
+		{"R_PPC64_GOT_TPREL16_HI", Const, 5, ""},
+		{"R_PPC64_GOT_TPREL16_LO_DS", Const, 5, ""},
+		{"R_PPC64_GOT_TPREL_PCREL34", Const, 20, ""},
+		{"R_PPC64_IRELATIVE", Const, 10, ""},
+		{"R_PPC64_JMP_IREL", Const, 10, ""},
+		{"R_PPC64_JMP_SLOT", Const, 5, ""},
+		{"R_PPC64_NONE", Const, 5, ""},
+		{"R_PPC64_PCREL28", Const, 20, ""},
+		{"R_PPC64_PCREL34", Const, 20, ""},
+		{"R_PPC64_PCREL_OPT", Const, 20, ""},
+		{"R_PPC64_PLT16_HA", Const, 20, ""},
+		{"R_PPC64_PLT16_HI", Const, 20, ""},
+		{"R_PPC64_PLT16_LO", Const, 20, ""},
+		{"R_PPC64_PLT16_LO_DS", Const, 10, ""},
+		{"R_PPC64_PLT32", Const, 20, ""},
+		{"R_PPC64_PLT64", Const, 20, ""},
+		{"R_PPC64_PLTCALL", Const, 20, ""},
+		{"R_PPC64_PLTCALL_NOTOC", Const, 20, ""},
+		{"R_PPC64_PLTGOT16", Const, 10, ""},
+		{"R_PPC64_PLTGOT16_DS", Const, 10, ""},
+		{"R_PPC64_PLTGOT16_HA", Const, 10, ""},
+		{"R_PPC64_PLTGOT16_HI", Const, 10, ""},
+		{"R_PPC64_PLTGOT16_LO", Const, 10, ""},
+		{"R_PPC64_PLTGOT_LO_DS", Const, 10, ""},
+		{"R_PPC64_PLTREL32", Const, 20, ""},
+		{"R_PPC64_PLTREL64", Const, 20, ""},
+		{"R_PPC64_PLTSEQ", Const, 20, ""},
+		{"R_PPC64_PLTSEQ_NOTOC", Const, 20, ""},
+		{"R_PPC64_PLT_PCREL34", Const, 20, ""},
+		{"R_PPC64_PLT_PCREL34_NOTOC", Const, 20, ""},
+		{"R_PPC64_REL14", Const, 5, ""},
+		{"R_PPC64_REL14_BRNTAKEN", Const, 5, ""},
+		{"R_PPC64_REL14_BRTAKEN", Const, 5, ""},
+		{"R_PPC64_REL16", Const, 5, ""},
+		{"R_PPC64_REL16DX_HA", Const, 10, ""},
+		{"R_PPC64_REL16_HA", Const, 5, ""},
+		{"R_PPC64_REL16_HI", Const, 5, ""},
+		{"R_PPC64_REL16_HIGH", Const, 20, ""},
+		{"R_PPC64_REL16_HIGHA", Const, 20, ""},
+		{"R_PPC64_REL16_HIGHER", Const, 20, ""},
+		{"R_PPC64_REL16_HIGHER34", Const, 20, ""},
+		{"R_PPC64_REL16_HIGHERA", Const, 20, ""},
+		{"R_PPC64_REL16_HIGHERA34", Const, 20, ""},
+		{"R_PPC64_REL16_HIGHEST", Const, 20, ""},
+		{"R_PPC64_REL16_HIGHEST34", Const, 20, ""},
+		{"R_PPC64_REL16_HIGHESTA", Const, 20, ""},
+		{"R_PPC64_REL16_HIGHESTA34", Const, 20, ""},
+		{"R_PPC64_REL16_LO", Const, 5, ""},
+		{"R_PPC64_REL24", Const, 5, ""},
+		{"R_PPC64_REL24_NOTOC", Const, 10, ""},
+		{"R_PPC64_REL24_P9NOTOC", Const, 21, ""},
+		{"R_PPC64_REL30", Const, 20, ""},
+		{"R_PPC64_REL32", Const, 5, ""},
+		{"R_PPC64_REL64", Const, 5, ""},
+		{"R_PPC64_RELATIVE", Const, 18, ""},
+		{"R_PPC64_SECTOFF", Const, 20, ""},
+		{"R_PPC64_SECTOFF_DS", Const, 10, ""},
+		{"R_PPC64_SECTOFF_HA", Const, 20, ""},
+		{"R_PPC64_SECTOFF_HI", Const, 20, ""},
+		{"R_PPC64_SECTOFF_LO", Const, 20, ""},
+		{"R_PPC64_SECTOFF_LO_DS", Const, 10, ""},
+		{"R_PPC64_TLS", Const, 5, ""},
+		{"R_PPC64_TLSGD", Const, 5, ""},
+		{"R_PPC64_TLSLD", Const, 5, ""},
+		{"R_PPC64_TOC", Const, 5, ""},
+		{"R_PPC64_TOC16", Const, 5, ""},
+		{"R_PPC64_TOC16_DS", Const, 5, ""},
+		{"R_PPC64_TOC16_HA", Const, 5, ""},
+		{"R_PPC64_TOC16_HI", Const, 5, ""},
+		{"R_PPC64_TOC16_LO", Const, 5, ""},
+		{"R_PPC64_TOC16_LO_DS", Const, 5, ""},
+		{"R_PPC64_TOCSAVE", Const, 10, ""},
+		{"R_PPC64_TPREL16", Const, 5, ""},
+		{"R_PPC64_TPREL16_DS", Const, 5, ""},
+		{"R_PPC64_TPREL16_HA", Const, 5, ""},
+		{"R_PPC64_TPREL16_HI", Const, 5, ""},
+		{"R_PPC64_TPREL16_HIGH", Const, 10, ""},
+		{"R_PPC64_TPREL16_HIGHA", Const, 10, ""},
+		{"R_PPC64_TPREL16_HIGHER", Const, 5, ""},
+		{"R_PPC64_TPREL16_HIGHERA", Const, 5, ""},
+		{"R_PPC64_TPREL16_HIGHEST", Const, 5, ""},
+		{"R_PPC64_TPREL16_HIGHESTA", Const, 5, ""},
+		{"R_PPC64_TPREL16_LO", Const, 5, ""},
+		{"R_PPC64_TPREL16_LO_DS", Const, 5, ""},
+		{"R_PPC64_TPREL34", Const, 20, ""},
+		{"R_PPC64_TPREL64", Const, 5, ""},
+		{"R_PPC64_UADDR16", Const, 20, ""},
+		{"R_PPC64_UADDR32", Const, 20, ""},
+		{"R_PPC64_UADDR64", Const, 20, ""},
+		{"R_PPC_ADDR14", Const, 0, ""},
+		{"R_PPC_ADDR14_BRNTAKEN", Const, 0, ""},
+		{"R_PPC_ADDR14_BRTAKEN", Const, 0, ""},
+		{"R_PPC_ADDR16", Const, 0, ""},
+		{"R_PPC_ADDR16_HA", Const, 0, ""},
+		{"R_PPC_ADDR16_HI", Const, 0, ""},
+		{"R_PPC_ADDR16_LO", Const, 0, ""},
+		{"R_PPC_ADDR24", Const, 0, ""},
+		{"R_PPC_ADDR32", Const, 0, ""},
+		{"R_PPC_COPY", Const, 0, ""},
+		{"R_PPC_DTPMOD32", Const, 0, ""},
+		{"R_PPC_DTPREL16", Const, 0, ""},
+		{"R_PPC_DTPREL16_HA", Const, 0, ""},
+		{"R_PPC_DTPREL16_HI", Const, 0, ""},
+		{"R_PPC_DTPREL16_LO", Const, 0, ""},
+		{"R_PPC_DTPREL32", Const, 0, ""},
+		{"R_PPC_EMB_BIT_FLD", Const, 0, ""},
+		{"R_PPC_EMB_MRKREF", Const, 0, ""},
+		{"R_PPC_EMB_NADDR16", Const, 0, ""},
+		{"R_PPC_EMB_NADDR16_HA", Const, 0, ""},
+		{"R_PPC_EMB_NADDR16_HI", Const, 0, ""},
+		{"R_PPC_EMB_NADDR16_LO", Const, 0, ""},
+		{"R_PPC_EMB_NADDR32", Const, 0, ""},
+		{"R_PPC_EMB_RELSDA", Const, 0, ""},
+		{"R_PPC_EMB_RELSEC16", Const, 0, ""},
+		{"R_PPC_EMB_RELST_HA", Const, 0, ""},
+		{"R_PPC_EMB_RELST_HI", Const, 0, ""},
+		{"R_PPC_EMB_RELST_LO", Const, 0, ""},
+		{"R_PPC_EMB_SDA21", Const, 0, ""},
+		{"R_PPC_EMB_SDA2I16", Const, 0, ""},
+		{"R_PPC_EMB_SDA2REL", Const, 0, ""},
+		{"R_PPC_EMB_SDAI16", Const, 0, ""},
+		{"R_PPC_GLOB_DAT", Const, 0, ""},
+		{"R_PPC_GOT16", Const, 0, ""},
+		{"R_PPC_GOT16_HA", Const, 0, ""},
+		{"R_PPC_GOT16_HI", Const, 0, ""},
+		{"R_PPC_GOT16_LO", Const, 0, ""},
+		{"R_PPC_GOT_TLSGD16", Const, 0, ""},
+		{"R_PPC_GOT_TLSGD16_HA", Const, 0, ""},
+		{"R_PPC_GOT_TLSGD16_HI", Const, 0, ""},
+		{"R_PPC_GOT_TLSGD16_LO", Const, 0, ""},
+		{"R_PPC_GOT_TLSLD16", Const, 0, ""},
+		{"R_PPC_GOT_TLSLD16_HA", Const, 0, ""},
+		{"R_PPC_GOT_TLSLD16_HI", Const, 0, ""},
+		{"R_PPC_GOT_TLSLD16_LO", Const, 0, ""},
+		{"R_PPC_GOT_TPREL16", Const, 0, ""},
+		{"R_PPC_GOT_TPREL16_HA", Const, 0, ""},
+		{"R_PPC_GOT_TPREL16_HI", Const, 0, ""},
+		{"R_PPC_GOT_TPREL16_LO", Const, 0, ""},
+		{"R_PPC_JMP_SLOT", Const, 0, ""},
+		{"R_PPC_LOCAL24PC", Const, 0, ""},
+		{"R_PPC_NONE", Const, 0, ""},
+		{"R_PPC_PLT16_HA", Const, 0, ""},
+		{"R_PPC_PLT16_HI", Const, 0, ""},
+		{"R_PPC_PLT16_LO", Const, 0, ""},
+		{"R_PPC_PLT32", Const, 0, ""},
+		{"R_PPC_PLTREL24", Const, 0, ""},
+		{"R_PPC_PLTREL32", Const, 0, ""},
+		{"R_PPC_REL14", Const, 0, ""},
+		{"R_PPC_REL14_BRNTAKEN", Const, 0, ""},
+		{"R_PPC_REL14_BRTAKEN", Const, 0, ""},
+		{"R_PPC_REL24", Const, 0, ""},
+		{"R_PPC_REL32", Const, 0, ""},
+		{"R_PPC_RELATIVE", Const, 0, ""},
+		{"R_PPC_SDAREL16", Const, 0, ""},
+		{"R_PPC_SECTOFF", Const, 0, ""},
+		{"R_PPC_SECTOFF_HA", Const, 0, ""},
+		{"R_PPC_SECTOFF_HI", Const, 0, ""},
+		{"R_PPC_SECTOFF_LO", Const, 0, ""},
+		{"R_PPC_TLS", Const, 0, ""},
+		{"R_PPC_TPREL16", Const, 0, ""},
+		{"R_PPC_TPREL16_HA", Const, 0, ""},
+		{"R_PPC_TPREL16_HI", Const, 0, ""},
+		{"R_PPC_TPREL16_LO", Const, 0, ""},
+		{"R_PPC_TPREL32", Const, 0, ""},
+		{"R_PPC_UADDR16", Const, 0, ""},
+		{"R_PPC_UADDR32", Const, 0, ""},
+		{"R_RISCV", Type, 11, ""},
+		{"R_RISCV_32", Const, 11, ""},
+		{"R_RISCV_32_PCREL", Const, 12, ""},
+		{"R_RISCV_64", Const, 11, ""},
+		{"R_RISCV_ADD16", Const, 11, ""},
+		{"R_RISCV_ADD32", Const, 11, ""},
+		{"R_RISCV_ADD64", Const, 11, ""},
+		{"R_RISCV_ADD8", Const, 11, ""},
+		{"R_RISCV_ALIGN", Const, 11, ""},
+		{"R_RISCV_BRANCH", Const, 11, ""},
+		{"R_RISCV_CALL", Const, 11, ""},
+		{"R_RISCV_CALL_PLT", Const, 11, ""},
+		{"R_RISCV_COPY", Const, 11, ""},
+		{"R_RISCV_GNU_VTENTRY", Const, 11, ""},
+		{"R_RISCV_GNU_VTINHERIT", Const, 11, ""},
+		{"R_RISCV_GOT_HI20", Const, 11, ""},
+		{"R_RISCV_GPREL_I", Const, 11, ""},
+		{"R_RISCV_GPREL_S", Const, 11, ""},
+		{"R_RISCV_HI20", Const, 11, ""},
+		{"R_RISCV_JAL", Const, 11, ""},
+		{"R_RISCV_JUMP_SLOT", Const, 11, ""},
+		{"R_RISCV_LO12_I", Const, 11, ""},
+		{"R_RISCV_LO12_S", Const, 11, ""},
+		{"R_RISCV_NONE", Const, 11, ""},
+		{"R_RISCV_PCREL_HI20", Const, 11, ""},
+		{"R_RISCV_PCREL_LO12_I", Const, 11, ""},
+		{"R_RISCV_PCREL_LO12_S", Const, 11, ""},
+		{"R_RISCV_RELATIVE", Const, 11, ""},
+		{"R_RISCV_RELAX", Const, 11, ""},
+		{"R_RISCV_RVC_BRANCH", Const, 11, ""},
+		{"R_RISCV_RVC_JUMP", Const, 11, ""},
+		{"R_RISCV_RVC_LUI", Const, 11, ""},
+		{"R_RISCV_SET16", Const, 11, ""},
+		{"R_RISCV_SET32", Const, 11, ""},
+		{"R_RISCV_SET6", Const, 11, ""},
+		{"R_RISCV_SET8", Const, 11, ""},
+		{"R_RISCV_SUB16", Const, 11, ""},
+		{"R_RISCV_SUB32", Const, 11, ""},
+		{"R_RISCV_SUB6", Const, 11, ""},
+		{"R_RISCV_SUB64", Const, 11, ""},
+		{"R_RISCV_SUB8", Const, 11, ""},
+		{"R_RISCV_TLS_DTPMOD32", Const, 11, ""},
+		{"R_RISCV_TLS_DTPMOD64", Const, 11, ""},
+		{"R_RISCV_TLS_DTPREL32", Const, 11, ""},
+		{"R_RISCV_TLS_DTPREL64", Const, 11, ""},
+		{"R_RISCV_TLS_GD_HI20", Const, 11, ""},
+		{"R_RISCV_TLS_GOT_HI20", Const, 11, ""},
+		{"R_RISCV_TLS_TPREL32", Const, 11, ""},
+		{"R_RISCV_TLS_TPREL64", Const, 11, ""},
+		{"R_RISCV_TPREL_ADD", Const, 11, ""},
+		{"R_RISCV_TPREL_HI20", Const, 11, ""},
+		{"R_RISCV_TPREL_I", Const, 11, ""},
+		{"R_RISCV_TPREL_LO12_I", Const, 11, ""},
+		{"R_RISCV_TPREL_LO12_S", Const, 11, ""},
+		{"R_RISCV_TPREL_S", Const, 11, ""},
+		{"R_SPARC", Type, 0, ""},
+		{"R_SPARC_10", Const, 0, ""},
+		{"R_SPARC_11", Const, 0, ""},
+		{"R_SPARC_13", Const, 0, ""},
+		{"R_SPARC_16", Const, 0, ""},
+		{"R_SPARC_22", Const, 0, ""},
+		{"R_SPARC_32", Const, 0, ""},
+		{"R_SPARC_5", Const, 0, ""},
+		{"R_SPARC_6", Const, 0, ""},
+		{"R_SPARC_64", Const, 0, ""},
+		{"R_SPARC_7", Const, 0, ""},
+		{"R_SPARC_8", Const, 0, ""},
+		{"R_SPARC_COPY", Const, 0, ""},
+		{"R_SPARC_DISP16", Const, 0, ""},
+		{"R_SPARC_DISP32", Const, 0, ""},
+		{"R_SPARC_DISP64", Const, 0, ""},
+		{"R_SPARC_DISP8", Const, 0, ""},
+		{"R_SPARC_GLOB_DAT", Const, 0, ""},
+		{"R_SPARC_GLOB_JMP", Const, 0, ""},
+		{"R_SPARC_GOT10", Const, 0, ""},
+		{"R_SPARC_GOT13", Const, 0, ""},
+		{"R_SPARC_GOT22", Const, 0, ""},
+		{"R_SPARC_H44", Const, 0, ""},
+		{"R_SPARC_HH22", Const, 0, ""},
+		{"R_SPARC_HI22", Const, 0, ""},
+		{"R_SPARC_HIPLT22", Const, 0, ""},
+		{"R_SPARC_HIX22", Const, 0, ""},
+		{"R_SPARC_HM10", Const, 0, ""},
+		{"R_SPARC_JMP_SLOT", Const, 0, ""},
+		{"R_SPARC_L44", Const, 0, ""},
+		{"R_SPARC_LM22", Const, 0, ""},
+		{"R_SPARC_LO10", Const, 0, ""},
+		{"R_SPARC_LOPLT10", Const, 0, ""},
+		{"R_SPARC_LOX10", Const, 0, ""},
+		{"R_SPARC_M44", Const, 0, ""},
+		{"R_SPARC_NONE", Const, 0, ""},
+		{"R_SPARC_OLO10", Const, 0, ""},
+		{"R_SPARC_PC10", Const, 0, ""},
+		{"R_SPARC_PC22", Const, 0, ""},
+		{"R_SPARC_PCPLT10", Const, 0, ""},
+		{"R_SPARC_PCPLT22", Const, 0, ""},
+		{"R_SPARC_PCPLT32", Const, 0, ""},
+		{"R_SPARC_PC_HH22", Const, 0, ""},
+		{"R_SPARC_PC_HM10", Const, 0, ""},
+		{"R_SPARC_PC_LM22", Const, 0, ""},
+		{"R_SPARC_PLT32", Const, 0, ""},
+		{"R_SPARC_PLT64", Const, 0, ""},
+		{"R_SPARC_REGISTER", Const, 0, ""},
+		{"R_SPARC_RELATIVE", Const, 0, ""},
+		{"R_SPARC_UA16", Const, 0, ""},
+		{"R_SPARC_UA32", Const, 0, ""},
+		{"R_SPARC_UA64", Const, 0, ""},
+		{"R_SPARC_WDISP16", Const, 0, ""},
+		{"R_SPARC_WDISP19", Const, 0, ""},
+		{"R_SPARC_WDISP22", Const, 0, ""},
+		{"R_SPARC_WDISP30", Const, 0, ""},
+		{"R_SPARC_WPLT30", Const, 0, ""},
+		{"R_SYM32", Func, 0, "func(info uint32) uint32"},
+		{"R_SYM64", Func, 0, "func(info uint64) uint32"},
+		{"R_TYPE32", Func, 0, "func(info uint32) uint32"},
+		{"R_TYPE64", Func, 0, "func(info uint64) uint32"},
+		{"R_X86_64", Type, 0, ""},
+		{"R_X86_64_16", Const, 0, ""},
+		{"R_X86_64_32", Const, 0, ""},
+		{"R_X86_64_32S", Const, 0, ""},
+		{"R_X86_64_64", Const, 0, ""},
+		{"R_X86_64_8", Const, 0, ""},
+		{"R_X86_64_COPY", Const, 0, ""},
+		{"R_X86_64_DTPMOD64", Const, 0, ""},
+		{"R_X86_64_DTPOFF32", Const, 0, ""},
+		{"R_X86_64_DTPOFF64", Const, 0, ""},
+		{"R_X86_64_GLOB_DAT", Const, 0, ""},
+		{"R_X86_64_GOT32", Const, 0, ""},
+		{"R_X86_64_GOT64", Const, 10, ""},
+		{"R_X86_64_GOTOFF64", Const, 10, ""},
+		{"R_X86_64_GOTPC32", Const, 10, ""},
+		{"R_X86_64_GOTPC32_TLSDESC", Const, 10, ""},
+		{"R_X86_64_GOTPC64", Const, 10, ""},
+		{"R_X86_64_GOTPCREL", Const, 0, ""},
+		{"R_X86_64_GOTPCREL64", Const, 10, ""},
+		{"R_X86_64_GOTPCRELX", Const, 10, ""},
+		{"R_X86_64_GOTPLT64", Const, 10, ""},
+		{"R_X86_64_GOTTPOFF", Const, 0, ""},
+		{"R_X86_64_IRELATIVE", Const, 10, ""},
+		{"R_X86_64_JMP_SLOT", Const, 0, ""},
+		{"R_X86_64_NONE", Const, 0, ""},
+		{"R_X86_64_PC16", Const, 0, ""},
+		{"R_X86_64_PC32", Const, 0, ""},
+		{"R_X86_64_PC32_BND", Const, 10, ""},
+		{"R_X86_64_PC64", Const, 10, ""},
+		{"R_X86_64_PC8", Const, 0, ""},
+		{"R_X86_64_PLT32", Const, 0, ""},
+		{"R_X86_64_PLT32_BND", Const, 10, ""},
+		{"R_X86_64_PLTOFF64", Const, 10, ""},
+		{"R_X86_64_RELATIVE", Const, 0, ""},
+		{"R_X86_64_RELATIVE64", Const, 10, ""},
+		{"R_X86_64_REX_GOTPCRELX", Const, 10, ""},
+		{"R_X86_64_SIZE32", Const, 10, ""},
+		{"R_X86_64_SIZE64", Const, 10, ""},
+		{"R_X86_64_TLSDESC", Const, 10, ""},
+		{"R_X86_64_TLSDESC_CALL", Const, 10, ""},
+		{"R_X86_64_TLSGD", Const, 0, ""},
+		{"R_X86_64_TLSLD", Const, 0, ""},
+		{"R_X86_64_TPOFF32", Const, 0, ""},
+		{"R_X86_64_TPOFF64", Const, 0, ""},
+		{"Rel32", Type, 0, ""},
+		{"Rel32.Info", Field, 0, ""},
+		{"Rel32.Off", Field, 0, ""},
+		{"Rel64", Type, 0, ""},
+		{"Rel64.Info", Field, 0, ""},
+		{"Rel64.Off", Field, 0, ""},
+		{"Rela32", Type, 0, ""},
+		{"Rela32.Addend", Field, 0, ""},
+		{"Rela32.Info", Field, 0, ""},
+		{"Rela32.Off", Field, 0, ""},
+		{"Rela64", Type, 0, ""},
+		{"Rela64.Addend", Field, 0, ""},
+		{"Rela64.Info", Field, 0, ""},
+		{"Rela64.Off", Field, 0, ""},
+		{"SHF_ALLOC", Const, 0, ""},
+		{"SHF_COMPRESSED", Const, 6, ""},
+		{"SHF_EXECINSTR", Const, 0, ""},
+		{"SHF_GROUP", Const, 0, ""},
+		{"SHF_INFO_LINK", Const, 0, ""},
+		{"SHF_LINK_ORDER", Const, 0, ""},
+		{"SHF_MASKOS", Const, 0, ""},
+		{"SHF_MASKPROC", Const, 0, ""},
+		{"SHF_MERGE", Const, 0, ""},
+		{"SHF_OS_NONCONFORMING", Const, 0, ""},
+		{"SHF_STRINGS", Const, 0, ""},
+		{"SHF_TLS", Const, 0, ""},
+		{"SHF_WRITE", Const, 0, ""},
+		{"SHN_ABS", Const, 0, ""},
+		{"SHN_COMMON", Const, 0, ""},
+		{"SHN_HIOS", Const, 0, ""},
+		{"SHN_HIPROC", Const, 0, ""},
+		{"SHN_HIRESERVE", Const, 0, ""},
+		{"SHN_LOOS", Const, 0, ""},
+		{"SHN_LOPROC", Const, 0, ""},
+		{"SHN_LORESERVE", Const, 0, ""},
+		{"SHN_UNDEF", Const, 0, ""},
+		{"SHN_XINDEX", Const, 0, ""},
+		{"SHT_DYNAMIC", Const, 0, ""},
+		{"SHT_DYNSYM", Const, 0, ""},
+		{"SHT_FINI_ARRAY", Const, 0, ""},
+		{"SHT_GNU_ATTRIBUTES", Const, 0, ""},
+		{"SHT_GNU_HASH", Const, 0, ""},
+		{"SHT_GNU_LIBLIST", Const, 0, ""},
+		{"SHT_GNU_VERDEF", Const, 0, ""},
+		{"SHT_GNU_VERNEED", Const, 0, ""},
+		{"SHT_GNU_VERSYM", Const, 0, ""},
+		{"SHT_GROUP", Const, 0, ""},
+		{"SHT_HASH", Const, 0, ""},
+		{"SHT_HIOS", Const, 0, ""},
+		{"SHT_HIPROC", Const, 0, ""},
+		{"SHT_HIUSER", Const, 0, ""},
+		{"SHT_INIT_ARRAY", Const, 0, ""},
+		{"SHT_LOOS", Const, 0, ""},
+		{"SHT_LOPROC", Const, 0, ""},
+		{"SHT_LOUSER", Const, 0, ""},
+		{"SHT_MIPS_ABIFLAGS", Const, 17, ""},
+		{"SHT_NOBITS", Const, 0, ""},
+		{"SHT_NOTE", Const, 0, ""},
+		{"SHT_NULL", Const, 0, ""},
+		{"SHT_PREINIT_ARRAY", Const, 0, ""},
+		{"SHT_PROGBITS", Const, 0, ""},
+		{"SHT_REL", Const, 0, ""},
+		{"SHT_RELA", Const, 0, ""},
+		{"SHT_RISCV_ATTRIBUTES", Const, 25, ""},
+		{"SHT_SHLIB", Const, 0, ""},
+		{"SHT_STRTAB", Const, 0, ""},
+		{"SHT_SYMTAB", Const, 0, ""},
+		{"SHT_SYMTAB_SHNDX", Const, 0, ""},
+		{"STB_GLOBAL", Const, 0, ""},
+		{"STB_HIOS", Const, 0, ""},
+		{"STB_HIPROC", Const, 0, ""},
+		{"STB_LOCAL", Const, 0, ""},
+		{"STB_LOOS", Const, 0, ""},
+		{"STB_LOPROC", Const, 0, ""},
+		{"STB_WEAK", Const, 0, ""},
+		{"STT_COMMON", Const, 0, ""},
+		{"STT_FILE", Const, 0, ""},
+		{"STT_FUNC", Const, 0, ""},
+		{"STT_GNU_IFUNC", Const, 23, ""},
+		{"STT_HIOS", Const, 0, ""},
+		{"STT_HIPROC", Const, 0, ""},
+		{"STT_LOOS", Const, 0, ""},
+		{"STT_LOPROC", Const, 0, ""},
+		{"STT_NOTYPE", Const, 0, ""},
+		{"STT_OBJECT", Const, 0, ""},
+		{"STT_RELC", Const, 23, ""},
+		{"STT_SECTION", Const, 0, ""},
+		{"STT_SRELC", Const, 23, ""},
+		{"STT_TLS", Const, 0, ""},
+		{"STV_DEFAULT", Const, 0, ""},
+		{"STV_HIDDEN", Const, 0, ""},
+		{"STV_INTERNAL", Const, 0, ""},
+		{"STV_PROTECTED", Const, 0, ""},
+		{"ST_BIND", Func, 0, "func(info uint8) SymBind"},
+		{"ST_INFO", Func, 0, "func(bind SymBind, typ SymType) uint8"},
+		{"ST_TYPE", Func, 0, "func(info uint8) SymType"},
+		{"ST_VISIBILITY", Func, 0, "func(other uint8) SymVis"},
+		{"Section", Type, 0, ""},
+		{"Section.ReaderAt", Field, 0, ""},
+		{"Section.SectionHeader", Field, 0, ""},
+		{"Section32", Type, 0, ""},
+		{"Section32.Addr", Field, 0, ""},
+		{"Section32.Addralign", Field, 0, ""},
+		{"Section32.Entsize", Field, 0, ""},
+		{"Section32.Flags", Field, 0, ""},
+		{"Section32.Info", Field, 0, ""},
+		{"Section32.Link", Field, 0, ""},
+		{"Section32.Name", Field, 0, ""},
+		{"Section32.Off", Field, 0, ""},
+		{"Section32.Size", Field, 0, ""},
+		{"Section32.Type", Field, 0, ""},
+		{"Section64", Type, 0, ""},
+		{"Section64.Addr", Field, 0, ""},
+		{"Section64.Addralign", Field, 0, ""},
+		{"Section64.Entsize", Field, 0, ""},
+		{"Section64.Flags", Field, 0, ""},
+		{"Section64.Info", Field, 0, ""},
+		{"Section64.Link", Field, 0, ""},
+		{"Section64.Name", Field, 0, ""},
+		{"Section64.Off", Field, 0, ""},
+		{"Section64.Size", Field, 0, ""},
+		{"Section64.Type", Field, 0, ""},
+		{"SectionFlag", Type, 0, ""},
+		{"SectionHeader", Type, 0, ""},
+		{"SectionHeader.Addr", Field, 0, ""},
+		{"SectionHeader.Addralign", Field, 0, ""},
+		{"SectionHeader.Entsize", Field, 0, ""},
+		{"SectionHeader.FileSize", Field, 6, ""},
+		{"SectionHeader.Flags", Field, 0, ""},
+		{"SectionHeader.Info", Field, 0, ""},
+		{"SectionHeader.Link", Field, 0, ""},
+		{"SectionHeader.Name", Field, 0, ""},
+		{"SectionHeader.Offset", Field, 0, ""},
+		{"SectionHeader.Size", Field, 0, ""},
+		{"SectionHeader.Type", Field, 0, ""},
+		{"SectionIndex", Type, 0, ""},
+		{"SectionType", Type, 0, ""},
+		{"Sym32", Type, 0, ""},
+		{"Sym32.Info", Field, 0, ""},
+		{"Sym32.Name", Field, 0, ""},
+		{"Sym32.Other", Field, 0, ""},
+		{"Sym32.Shndx", Field, 0, ""},
+		{"Sym32.Size", Field, 0, ""},
+		{"Sym32.Value", Field, 0, ""},
+		{"Sym32Size", Const, 0, ""},
+		{"Sym64", Type, 0, ""},
+		{"Sym64.Info", Field, 0, ""},
+		{"Sym64.Name", Field, 0, ""},
+		{"Sym64.Other", Field, 0, ""},
+		{"Sym64.Shndx", Field, 0, ""},
+		{"Sym64.Size", Field, 0, ""},
+		{"Sym64.Value", Field, 0, ""},
+		{"Sym64Size", Const, 0, ""},
+		{"SymBind", Type, 0, ""},
+		{"SymType", Type, 0, ""},
+		{"SymVis", Type, 0, ""},
+		{"Symbol", Type, 0, ""},
+		{"Symbol.HasVersion", Field, 24, ""},
+		{"Symbol.Info", Field, 0, ""},
+		{"Symbol.Library", Field, 13, ""},
+		{"Symbol.Name", Field, 0, ""},
+		{"Symbol.Other", Field, 0, ""},
+		{"Symbol.Section", Field, 0, ""},
+		{"Symbol.Size", Field, 0, ""},
+		{"Symbol.Value", Field, 0, ""},
+		{"Symbol.Version", Field, 13, ""},
+		{"Symbol.VersionIndex", Field, 24, ""},
+		{"Type", Type, 0, ""},
+		{"VER_FLG_BASE", Const, 24, ""},
+		{"VER_FLG_INFO", Const, 24, ""},
+		{"VER_FLG_WEAK", Const, 24, ""},
+		{"Version", Type, 0, ""},
+		{"VersionIndex", Type, 24, ""},
+	},
+	"debug/gosym": {
+		{"(*DecodingError).Error", Method, 0, ""},
+		{"(*LineTable).LineToPC", Method, 0, ""},
+		{"(*LineTable).PCToLine", Method, 0, ""},
+		{"(*Sym).BaseName", Method, 0, ""},
+		{"(*Sym).PackageName", Method, 0, ""},
+		{"(*Sym).ReceiverName", Method, 0, ""},
+		{"(*Sym).Static", Method, 0, ""},
+		{"(*Table).LineToPC", Method, 0, ""},
+		{"(*Table).LookupFunc", Method, 0, ""},
+		{"(*Table).LookupSym", Method, 0, ""},
+		{"(*Table).PCToFunc", Method, 0, ""},
+		{"(*Table).PCToLine", Method, 0, ""},
+		{"(*Table).SymByAddr", Method, 0, ""},
+		{"(*UnknownLineError).Error", Method, 0, ""},
+		{"(Func).BaseName", Method, 0, ""},
+		{"(Func).PackageName", Method, 0, ""},
+		{"(Func).ReceiverName", Method, 0, ""},
+		{"(Func).Static", Method, 0, ""},
+		{"(UnknownFileError).Error", Method, 0, ""},
+		{"DecodingError", Type, 0, ""},
+		{"Func", Type, 0, ""},
+		{"Func.End", Field, 0, ""},
+		{"Func.Entry", Field, 0, ""},
+		{"Func.FrameSize", Field, 0, ""},
+		{"Func.LineTable", Field, 0, ""},
+		{"Func.Locals", Field, 0, ""},
+		{"Func.Obj", Field, 0, ""},
+		{"Func.Params", Field, 0, ""},
+		{"Func.Sym", Field, 0, ""},
+		{"LineTable", Type, 0, ""},
+		{"LineTable.Data", Field, 0, ""},
+		{"LineTable.Line", Field, 0, ""},
+		{"LineTable.PC", Field, 0, ""},
+		{"NewLineTable", Func, 0, "func(data []byte, text uint64) *LineTable"},
+		{"NewTable", Func, 0, "func(symtab []byte, pcln *LineTable) (*Table, error)"},
+		{"Obj", Type, 0, ""},
+		{"Obj.Funcs", Field, 0, ""},
+		{"Obj.Paths", Field, 0, ""},
+		{"Sym", Type, 0, ""},
+		{"Sym.Func", Field, 0, ""},
+		{"Sym.GoType", Field, 0, ""},
+		{"Sym.Name", Field, 0, ""},
+		{"Sym.Type", Field, 0, ""},
+		{"Sym.Value", Field, 0, ""},
+		{"Table", Type, 0, ""},
+		{"Table.Files", Field, 0, ""},
+		{"Table.Funcs", Field, 0, ""},
+		{"Table.Objs", Field, 0, ""},
+		{"Table.Syms", Field, 0, ""},
+		{"UnknownFileError", Type, 0, ""},
+		{"UnknownLineError", Type, 0, ""},
+		{"UnknownLineError.File", Field, 0, ""},
+		{"UnknownLineError.Line", Field, 0, ""},
+	},
+	"debug/macho": {
+		{"(*FatFile).Close", Method, 3, ""},
+		{"(*File).Close", Method, 0, ""},
+		{"(*File).DWARF", Method, 0, ""},
+		{"(*File).ImportedLibraries", Method, 0, ""},
+		{"(*File).ImportedSymbols", Method, 0, ""},
+		{"(*File).Section", Method, 0, ""},
+		{"(*File).Segment", Method, 0, ""},
+		{"(*FormatError).Error", Method, 0, ""},
+		{"(*Section).Data", Method, 0, ""},
+		{"(*Section).Open", Method, 0, ""},
+		{"(*Segment).Data", Method, 0, ""},
+		{"(*Segment).Open", Method, 0, ""},
+		{"(Cpu).GoString", Method, 0, ""},
+		{"(Cpu).String", Method, 0, ""},
+		{"(Dylib).Raw", Method, 0, ""},
+		{"(Dysymtab).Raw", Method, 0, ""},
+		{"(FatArch).Close", Method, 3, ""},
+		{"(FatArch).DWARF", Method, 3, ""},
+		{"(FatArch).ImportedLibraries", Method, 3, ""},
+		{"(FatArch).ImportedSymbols", Method, 3, ""},
+		{"(FatArch).Section", Method, 3, ""},
+		{"(FatArch).Segment", Method, 3, ""},
+		{"(LoadBytes).Raw", Method, 0, ""},
+		{"(LoadCmd).GoString", Method, 0, ""},
+		{"(LoadCmd).String", Method, 0, ""},
+		{"(RelocTypeARM).GoString", Method, 10, ""},
+		{"(RelocTypeARM).String", Method, 10, ""},
+		{"(RelocTypeARM64).GoString", Method, 10, ""},
+		{"(RelocTypeARM64).String", Method, 10, ""},
+		{"(RelocTypeGeneric).GoString", Method, 10, ""},
+		{"(RelocTypeGeneric).String", Method, 10, ""},
+		{"(RelocTypeX86_64).GoString", Method, 10, ""},
+		{"(RelocTypeX86_64).String", Method, 10, ""},
+		{"(Rpath).Raw", Method, 10, ""},
+		{"(Section).ReadAt", Method, 0, ""},
+		{"(Segment).Raw", Method, 0, ""},
+		{"(Segment).ReadAt", Method, 0, ""},
+		{"(Symtab).Raw", Method, 0, ""},
+		{"(Type).GoString", Method, 10, ""},
+		{"(Type).String", Method, 10, ""},
+		{"ARM64_RELOC_ADDEND", Const, 10, ""},
+		{"ARM64_RELOC_BRANCH26", Const, 10, ""},
+		{"ARM64_RELOC_GOT_LOAD_PAGE21", Const, 10, ""},
+		{"ARM64_RELOC_GOT_LOAD_PAGEOFF12", Const, 10, ""},
+		{"ARM64_RELOC_PAGE21", Const, 10, ""},
+		{"ARM64_RELOC_PAGEOFF12", Const, 10, ""},
+		{"ARM64_RELOC_POINTER_TO_GOT", Const, 10, ""},
+		{"ARM64_RELOC_SUBTRACTOR", Const, 10, ""},
+		{"ARM64_RELOC_TLVP_LOAD_PAGE21", Const, 10, ""},
+		{"ARM64_RELOC_TLVP_LOAD_PAGEOFF12", Const, 10, ""},
+		{"ARM64_RELOC_UNSIGNED", Const, 10, ""},
+		{"ARM_RELOC_BR24", Const, 10, ""},
+		{"ARM_RELOC_HALF", Const, 10, ""},
+		{"ARM_RELOC_HALF_SECTDIFF", Const, 10, ""},
+		{"ARM_RELOC_LOCAL_SECTDIFF", Const, 10, ""},
+		{"ARM_RELOC_PAIR", Const, 10, ""},
+		{"ARM_RELOC_PB_LA_PTR", Const, 10, ""},
+		{"ARM_RELOC_SECTDIFF", Const, 10, ""},
+		{"ARM_RELOC_VANILLA", Const, 10, ""},
+		{"ARM_THUMB_32BIT_BRANCH", Const, 10, ""},
+		{"ARM_THUMB_RELOC_BR22", Const, 10, ""},
+		{"Cpu", Type, 0, ""},
+		{"Cpu386", Const, 0, ""},
+		{"CpuAmd64", Const, 0, ""},
+		{"CpuArm", Const, 3, ""},
+		{"CpuArm64", Const, 11, ""},
+		{"CpuPpc", Const, 3, ""},
+		{"CpuPpc64", Const, 3, ""},
+		{"Dylib", Type, 0, ""},
+		{"Dylib.CompatVersion", Field, 0, ""},
+		{"Dylib.CurrentVersion", Field, 0, ""},
+		{"Dylib.LoadBytes", Field, 0, ""},
+		{"Dylib.Name", Field, 0, ""},
+		{"Dylib.Time", Field, 0, ""},
+		{"DylibCmd", Type, 0, ""},
+		{"DylibCmd.Cmd", Field, 0, ""},
+		{"DylibCmd.CompatVersion", Field, 0, ""},
+		{"DylibCmd.CurrentVersion", Field, 0, ""},
+		{"DylibCmd.Len", Field, 0, ""},
+		{"DylibCmd.Name", Field, 0, ""},
+		{"DylibCmd.Time", Field, 0, ""},
+		{"Dysymtab", Type, 0, ""},
+		{"Dysymtab.DysymtabCmd", Field, 0, ""},
+		{"Dysymtab.IndirectSyms", Field, 0, ""},
+		{"Dysymtab.LoadBytes", Field, 0, ""},
+		{"DysymtabCmd", Type, 0, ""},
+		{"DysymtabCmd.Cmd", Field, 0, ""},
+		{"DysymtabCmd.Extrefsymoff", Field, 0, ""},
+		{"DysymtabCmd.Extreloff", Field, 0, ""},
+		{"DysymtabCmd.Iextdefsym", Field, 0, ""},
+		{"DysymtabCmd.Ilocalsym", Field, 0, ""},
+		{"DysymtabCmd.Indirectsymoff", Field, 0, ""},
+		{"DysymtabCmd.Iundefsym", Field, 0, ""},
+		{"DysymtabCmd.Len", Field, 0, ""},
+		{"DysymtabCmd.Locreloff", Field, 0, ""},
+		{"DysymtabCmd.Modtaboff", Field, 0, ""},
+		{"DysymtabCmd.Nextdefsym", Field, 0, ""},
+		{"DysymtabCmd.Nextrefsyms", Field, 0, ""},
+		{"DysymtabCmd.Nextrel", Field, 0, ""},
+		{"DysymtabCmd.Nindirectsyms", Field, 0, ""},
+		{"DysymtabCmd.Nlocalsym", Field, 0, ""},
+		{"DysymtabCmd.Nlocrel", Field, 0, ""},
+		{"DysymtabCmd.Nmodtab", Field, 0, ""},
+		{"DysymtabCmd.Ntoc", Field, 0, ""},
+		{"DysymtabCmd.Nundefsym", Field, 0, ""},
+		{"DysymtabCmd.Tocoffset", Field, 0, ""},
+		{"ErrNotFat", Var, 3, ""},
+		{"FatArch", Type, 3, ""},
+		{"FatArch.FatArchHeader", Field, 3, ""},
+		{"FatArch.File", Field, 3, ""},
+		{"FatArchHeader", Type, 3, ""},
+		{"FatArchHeader.Align", Field, 3, ""},
+		{"FatArchHeader.Cpu", Field, 3, ""},
+		{"FatArchHeader.Offset", Field, 3, ""},
+		{"FatArchHeader.Size", Field, 3, ""},
+		{"FatArchHeader.SubCpu", Field, 3, ""},
+		{"FatFile", Type, 3, ""},
+		{"FatFile.Arches", Field, 3, ""},
+		{"FatFile.Magic", Field, 3, ""},
+		{"File", Type, 0, ""},
+		{"File.ByteOrder", Field, 0, ""},
+		{"File.Dysymtab", Field, 0, ""},
+		{"File.FileHeader", Field, 0, ""},
+		{"File.Loads", Field, 0, ""},
+		{"File.Sections", Field, 0, ""},
+		{"File.Symtab", Field, 0, ""},
+		{"FileHeader", Type, 0, ""},
+		{"FileHeader.Cmdsz", Field, 0, ""},
+		{"FileHeader.Cpu", Field, 0, ""},
+		{"FileHeader.Flags", Field, 0, ""},
+		{"FileHeader.Magic", Field, 0, ""},
+		{"FileHeader.Ncmd", Field, 0, ""},
+		{"FileHeader.SubCpu", Field, 0, ""},
+		{"FileHeader.Type", Field, 0, ""},
+		{"FlagAllModsBound", Const, 10, ""},
+		{"FlagAllowStackExecution", Const, 10, ""},
+		{"FlagAppExtensionSafe", Const, 10, ""},
+		{"FlagBindAtLoad", Const, 10, ""},
+		{"FlagBindsToWeak", Const, 10, ""},
+		{"FlagCanonical", Const, 10, ""},
+		{"FlagDeadStrippableDylib", Const, 10, ""},
+		{"FlagDyldLink", Const, 10, ""},
+		{"FlagForceFlat", Const, 10, ""},
+		{"FlagHasTLVDescriptors", Const, 10, ""},
+		{"FlagIncrLink", Const, 10, ""},
+		{"FlagLazyInit", Const, 10, ""},
+		{"FlagNoFixPrebinding", Const, 10, ""},
+		{"FlagNoHeapExecution", Const, 10, ""},
+		{"FlagNoMultiDefs", Const, 10, ""},
+		{"FlagNoReexportedDylibs", Const, 10, ""},
+		{"FlagNoUndefs", Const, 10, ""},
+		{"FlagPIE", Const, 10, ""},
+		{"FlagPrebindable", Const, 10, ""},
+		{"FlagPrebound", Const, 10, ""},
+		{"FlagRootSafe", Const, 10, ""},
+		{"FlagSetuidSafe", Const, 10, ""},
+		{"FlagSplitSegs", Const, 10, ""},
+		{"FlagSubsectionsViaSymbols", Const, 10, ""},
+		{"FlagTwoLevel", Const, 10, ""},
+		{"FlagWeakDefines", Const, 10, ""},
+		{"FormatError", Type, 0, ""},
+		{"GENERIC_RELOC_LOCAL_SECTDIFF", Const, 10, ""},
+		{"GENERIC_RELOC_PAIR", Const, 10, ""},
+		{"GENERIC_RELOC_PB_LA_PTR", Const, 10, ""},
+		{"GENERIC_RELOC_SECTDIFF", Const, 10, ""},
+		{"GENERIC_RELOC_TLV", Const, 10, ""},
+		{"GENERIC_RELOC_VANILLA", Const, 10, ""},
+		{"Load", Type, 0, ""},
+		{"LoadBytes", Type, 0, ""},
+		{"LoadCmd", Type, 0, ""},
+		{"LoadCmdDylib", Const, 0, ""},
+		{"LoadCmdDylinker", Const, 0, ""},
+		{"LoadCmdDysymtab", Const, 0, ""},
+		{"LoadCmdRpath", Const, 10, ""},
+		{"LoadCmdSegment", Const, 0, ""},
+		{"LoadCmdSegment64", Const, 0, ""},
+		{"LoadCmdSymtab", Const, 0, ""},
+		{"LoadCmdThread", Const, 0, ""},
+		{"LoadCmdUnixThread", Const, 0, ""},
+		{"Magic32", Const, 0, ""},
+		{"Magic64", Const, 0, ""},
+		{"MagicFat", Const, 3, ""},
+		{"NewFatFile", Func, 3, "func(r io.ReaderAt) (*FatFile, error)"},
+		{"NewFile", Func, 0, "func(r io.ReaderAt) (*File, error)"},
+		{"Nlist32", Type, 0, ""},
+		{"Nlist32.Desc", Field, 0, ""},
+		{"Nlist32.Name", Field, 0, ""},
+		{"Nlist32.Sect", Field, 0, ""},
+		{"Nlist32.Type", Field, 0, ""},
+		{"Nlist32.Value", Field, 0, ""},
+		{"Nlist64", Type, 0, ""},
+		{"Nlist64.Desc", Field, 0, ""},
+		{"Nlist64.Name", Field, 0, ""},
+		{"Nlist64.Sect", Field, 0, ""},
+		{"Nlist64.Type", Field, 0, ""},
+		{"Nlist64.Value", Field, 0, ""},
+		{"Open", Func, 0, "func(name string) (*File, error)"},
+		{"OpenFat", Func, 3, "func(name string) (*FatFile, error)"},
+		{"Regs386", Type, 0, ""},
+		{"Regs386.AX", Field, 0, ""},
+		{"Regs386.BP", Field, 0, ""},
+		{"Regs386.BX", Field, 0, ""},
+		{"Regs386.CS", Field, 0, ""},
+		{"Regs386.CX", Field, 0, ""},
+		{"Regs386.DI", Field, 0, ""},
+		{"Regs386.DS", Field, 0, ""},
+		{"Regs386.DX", Field, 0, ""},
+		{"Regs386.ES", Field, 0, ""},
+		{"Regs386.FLAGS", Field, 0, ""},
+		{"Regs386.FS", Field, 0, ""},
+		{"Regs386.GS", Field, 0, ""},
+		{"Regs386.IP", Field, 0, ""},
+		{"Regs386.SI", Field, 0, ""},
+		{"Regs386.SP", Field, 0, ""},
+		{"Regs386.SS", Field, 0, ""},
+		{"RegsAMD64", Type, 0, ""},
+		{"RegsAMD64.AX", Field, 0, ""},
+		{"RegsAMD64.BP", Field, 0, ""},
+		{"RegsAMD64.BX", Field, 0, ""},
+		{"RegsAMD64.CS", Field, 0, ""},
+		{"RegsAMD64.CX", Field, 0, ""},
+		{"RegsAMD64.DI", Field, 0, ""},
+		{"RegsAMD64.DX", Field, 0, ""},
+		{"RegsAMD64.FLAGS", Field, 0, ""},
+		{"RegsAMD64.FS", Field, 0, ""},
+		{"RegsAMD64.GS", Field, 0, ""},
+		{"RegsAMD64.IP", Field, 0, ""},
+		{"RegsAMD64.R10", Field, 0, ""},
+		{"RegsAMD64.R11", Field, 0, ""},
+		{"RegsAMD64.R12", Field, 0, ""},
+		{"RegsAMD64.R13", Field, 0, ""},
+		{"RegsAMD64.R14", Field, 0, ""},
+		{"RegsAMD64.R15", Field, 0, ""},
+		{"RegsAMD64.R8", Field, 0, ""},
+		{"RegsAMD64.R9", Field, 0, ""},
+		{"RegsAMD64.SI", Field, 0, ""},
+		{"RegsAMD64.SP", Field, 0, ""},
+		{"Reloc", Type, 10, ""},
+		{"Reloc.Addr", Field, 10, ""},
+		{"Reloc.Extern", Field, 10, ""},
+		{"Reloc.Len", Field, 10, ""},
+		{"Reloc.Pcrel", Field, 10, ""},
+		{"Reloc.Scattered", Field, 10, ""},
+		{"Reloc.Type", Field, 10, ""},
+		{"Reloc.Value", Field, 10, ""},
+		{"RelocTypeARM", Type, 10, ""},
+		{"RelocTypeARM64", Type, 10, ""},
+		{"RelocTypeGeneric", Type, 10, ""},
+		{"RelocTypeX86_64", Type, 10, ""},
+		{"Rpath", Type, 10, ""},
+		{"Rpath.LoadBytes", Field, 10, ""},
+		{"Rpath.Path", Field, 10, ""},
+		{"RpathCmd", Type, 10, ""},
+		{"RpathCmd.Cmd", Field, 10, ""},
+		{"RpathCmd.Len", Field, 10, ""},
+		{"RpathCmd.Path", Field, 10, ""},
+		{"Section", Type, 0, ""},
+		{"Section.ReaderAt", Field, 0, ""},
+		{"Section.Relocs", Field, 10, ""},
+		{"Section.SectionHeader", Field, 0, ""},
+		{"Section32", Type, 0, ""},
+		{"Section32.Addr", Field, 0, ""},
+		{"Section32.Align", Field, 0, ""},
+		{"Section32.Flags", Field, 0, ""},
+		{"Section32.Name", Field, 0, ""},
+		{"Section32.Nreloc", Field, 0, ""},
+		{"Section32.Offset", Field, 0, ""},
+		{"Section32.Reloff", Field, 0, ""},
+		{"Section32.Reserve1", Field, 0, ""},
+		{"Section32.Reserve2", Field, 0, ""},
+		{"Section32.Seg", Field, 0, ""},
+		{"Section32.Size", Field, 0, ""},
+		{"Section64", Type, 0, ""},
+		{"Section64.Addr", Field, 0, ""},
+		{"Section64.Align", Field, 0, ""},
+		{"Section64.Flags", Field, 0, ""},
+		{"Section64.Name", Field, 0, ""},
+		{"Section64.Nreloc", Field, 0, ""},
+		{"Section64.Offset", Field, 0, ""},
+		{"Section64.Reloff", Field, 0, ""},
+		{"Section64.Reserve1", Field, 0, ""},
+		{"Section64.Reserve2", Field, 0, ""},
+		{"Section64.Reserve3", Field, 0, ""},
+		{"Section64.Seg", Field, 0, ""},
+		{"Section64.Size", Field, 0, ""},
+		{"SectionHeader", Type, 0, ""},
+		{"SectionHeader.Addr", Field, 0, ""},
+		{"SectionHeader.Align", Field, 0, ""},
+		{"SectionHeader.Flags", Field, 0, ""},
+		{"SectionHeader.Name", Field, 0, ""},
+		{"SectionHeader.Nreloc", Field, 0, ""},
+		{"SectionHeader.Offset", Field, 0, ""},
+		{"SectionHeader.Reloff", Field, 0, ""},
+		{"SectionHeader.Seg", Field, 0, ""},
+		{"SectionHeader.Size", Field, 0, ""},
+		{"Segment", Type, 0, ""},
+		{"Segment.LoadBytes", Field, 0, ""},
+		{"Segment.ReaderAt", Field, 0, ""},
+		{"Segment.SegmentHeader", Field, 0, ""},
+		{"Segment32", Type, 0, ""},
+		{"Segment32.Addr", Field, 0, ""},
+		{"Segment32.Cmd", Field, 0, ""},
+		{"Segment32.Filesz", Field, 0, ""},
+		{"Segment32.Flag", Field, 0, ""},
+		{"Segment32.Len", Field, 0, ""},
+		{"Segment32.Maxprot", Field, 0, ""},
+		{"Segment32.Memsz", Field, 0, ""},
+		{"Segment32.Name", Field, 0, ""},
+		{"Segment32.Nsect", Field, 0, ""},
+		{"Segment32.Offset", Field, 0, ""},
+		{"Segment32.Prot", Field, 0, ""},
+		{"Segment64", Type, 0, ""},
+		{"Segment64.Addr", Field, 0, ""},
+		{"Segment64.Cmd", Field, 0, ""},
+		{"Segment64.Filesz", Field, 0, ""},
+		{"Segment64.Flag", Field, 0, ""},
+		{"Segment64.Len", Field, 0, ""},
+		{"Segment64.Maxprot", Field, 0, ""},
+		{"Segment64.Memsz", Field, 0, ""},
+		{"Segment64.Name", Field, 0, ""},
+		{"Segment64.Nsect", Field, 0, ""},
+		{"Segment64.Offset", Field, 0, ""},
+		{"Segment64.Prot", Field, 0, ""},
+		{"SegmentHeader", Type, 0, ""},
+		{"SegmentHeader.Addr", Field, 0, ""},
+		{"SegmentHeader.Cmd", Field, 0, ""},
+		{"SegmentHeader.Filesz", Field, 0, ""},
+		{"SegmentHeader.Flag", Field, 0, ""},
+		{"SegmentHeader.Len", Field, 0, ""},
+		{"SegmentHeader.Maxprot", Field, 0, ""},
+		{"SegmentHeader.Memsz", Field, 0, ""},
+		{"SegmentHeader.Name", Field, 0, ""},
+		{"SegmentHeader.Nsect", Field, 0, ""},
+		{"SegmentHeader.Offset", Field, 0, ""},
+		{"SegmentHeader.Prot", Field, 0, ""},
+		{"Symbol", Type, 0, ""},
+		{"Symbol.Desc", Field, 0, ""},
+		{"Symbol.Name", Field, 0, ""},
+		{"Symbol.Sect", Field, 0, ""},
+		{"Symbol.Type", Field, 0, ""},
+		{"Symbol.Value", Field, 0, ""},
+		{"Symtab", Type, 0, ""},
+		{"Symtab.LoadBytes", Field, 0, ""},
+		{"Symtab.Syms", Field, 0, ""},
+		{"Symtab.SymtabCmd", Field, 0, ""},
+		{"SymtabCmd", Type, 0, ""},
+		{"SymtabCmd.Cmd", Field, 0, ""},
+		{"SymtabCmd.Len", Field, 0, ""},
+		{"SymtabCmd.Nsyms", Field, 0, ""},
+		{"SymtabCmd.Stroff", Field, 0, ""},
+		{"SymtabCmd.Strsize", Field, 0, ""},
+		{"SymtabCmd.Symoff", Field, 0, ""},
+		{"Thread", Type, 0, ""},
+		{"Thread.Cmd", Field, 0, ""},
+		{"Thread.Data", Field, 0, ""},
+		{"Thread.Len", Field, 0, ""},
+		{"Thread.Type", Field, 0, ""},
+		{"Type", Type, 0, ""},
+		{"TypeBundle", Const, 3, ""},
+		{"TypeDylib", Const, 3, ""},
+		{"TypeExec", Const, 0, ""},
+		{"TypeObj", Const, 0, ""},
+		{"X86_64_RELOC_BRANCH", Const, 10, ""},
+		{"X86_64_RELOC_GOT", Const, 10, ""},
+		{"X86_64_RELOC_GOT_LOAD", Const, 10, ""},
+		{"X86_64_RELOC_SIGNED", Const, 10, ""},
+		{"X86_64_RELOC_SIGNED_1", Const, 10, ""},
+		{"X86_64_RELOC_SIGNED_2", Const, 10, ""},
+		{"X86_64_RELOC_SIGNED_4", Const, 10, ""},
+		{"X86_64_RELOC_SUBTRACTOR", Const, 10, ""},
+		{"X86_64_RELOC_TLV", Const, 10, ""},
+		{"X86_64_RELOC_UNSIGNED", Const, 10, ""},
+	},
+	"debug/pe": {
+		{"(*COFFSymbol).FullName", Method, 8, ""},
+		{"(*File).COFFSymbolReadSectionDefAux", Method, 19, ""},
+		{"(*File).Close", Method, 0, ""},
+		{"(*File).DWARF", Method, 0, ""},
+		{"(*File).ImportedLibraries", Method, 0, ""},
+		{"(*File).ImportedSymbols", Method, 0, ""},
+		{"(*File).Section", Method, 0, ""},
+		{"(*FormatError).Error", Method, 0, ""},
+		{"(*Section).Data", Method, 0, ""},
+		{"(*Section).Open", Method, 0, ""},
+		{"(Section).ReadAt", Method, 0, ""},
+		{"(StringTable).String", Method, 8, ""},
+		{"COFFSymbol", Type, 1, ""},
+		{"COFFSymbol.Name", Field, 1, ""},
+		{"COFFSymbol.NumberOfAuxSymbols", Field, 1, ""},
+		{"COFFSymbol.SectionNumber", Field, 1, ""},
+		{"COFFSymbol.StorageClass", Field, 1, ""},
+		{"COFFSymbol.Type", Field, 1, ""},
+		{"COFFSymbol.Value", Field, 1, ""},
+		{"COFFSymbolAuxFormat5", Type, 19, ""},
+		{"COFFSymbolAuxFormat5.Checksum", Field, 19, ""},
+		{"COFFSymbolAuxFormat5.NumLineNumbers", Field, 19, ""},
+		{"COFFSymbolAuxFormat5.NumRelocs", Field, 19, ""},
+		{"COFFSymbolAuxFormat5.SecNum", Field, 19, ""},
+		{"COFFSymbolAuxFormat5.Selection", Field, 19, ""},
+		{"COFFSymbolAuxFormat5.Size", Field, 19, ""},
+		{"COFFSymbolSize", Const, 1, ""},
+		{"DataDirectory", Type, 3, ""},
+		{"DataDirectory.Size", Field, 3, ""},
+		{"DataDirectory.VirtualAddress", Field, 3, ""},
+		{"File", Type, 0, ""},
+		{"File.COFFSymbols", Field, 8, ""},
+		{"File.FileHeader", Field, 0, ""},
+		{"File.OptionalHeader", Field, 3, ""},
+		{"File.Sections", Field, 0, ""},
+		{"File.StringTable", Field, 8, ""},
+		{"File.Symbols", Field, 1, ""},
+		{"FileHeader", Type, 0, ""},
+		{"FileHeader.Characteristics", Field, 0, ""},
+		{"FileHeader.Machine", Field, 0, ""},
+		{"FileHeader.NumberOfSections", Field, 0, ""},
+		{"FileHeader.NumberOfSymbols", Field, 0, ""},
+		{"FileHeader.PointerToSymbolTable", Field, 0, ""},
+		{"FileHeader.SizeOfOptionalHeader", Field, 0, ""},
+		{"FileHeader.TimeDateStamp", Field, 0, ""},
+		{"FormatError", Type, 0, ""},
+		{"IMAGE_COMDAT_SELECT_ANY", Const, 19, ""},
+		{"IMAGE_COMDAT_SELECT_ASSOCIATIVE", Const, 19, ""},
+		{"IMAGE_COMDAT_SELECT_EXACT_MATCH", Const, 19, ""},
+		{"IMAGE_COMDAT_SELECT_LARGEST", Const, 19, ""},
+		{"IMAGE_COMDAT_SELECT_NODUPLICATES", Const, 19, ""},
+		{"IMAGE_COMDAT_SELECT_SAME_SIZE", Const, 19, ""},
+		{"IMAGE_DIRECTORY_ENTRY_ARCHITECTURE", Const, 11, ""},
+		{"IMAGE_DIRECTORY_ENTRY_BASERELOC", Const, 11, ""},
+		{"IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT", Const, 11, ""},
+		{"IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR", Const, 11, ""},
+		{"IMAGE_DIRECTORY_ENTRY_DEBUG", Const, 11, ""},
+		{"IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT", Const, 11, ""},
+		{"IMAGE_DIRECTORY_ENTRY_EXCEPTION", Const, 11, ""},
+		{"IMAGE_DIRECTORY_ENTRY_EXPORT", Const, 11, ""},
+		{"IMAGE_DIRECTORY_ENTRY_GLOBALPTR", Const, 11, ""},
+		{"IMAGE_DIRECTORY_ENTRY_IAT", Const, 11, ""},
+		{"IMAGE_DIRECTORY_ENTRY_IMPORT", Const, 11, ""},
+		{"IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG", Const, 11, ""},
+		{"IMAGE_DIRECTORY_ENTRY_RESOURCE", Const, 11, ""},
+		{"IMAGE_DIRECTORY_ENTRY_SECURITY", Const, 11, ""},
+		{"IMAGE_DIRECTORY_ENTRY_TLS", Const, 11, ""},
+		{"IMAGE_DLLCHARACTERISTICS_APPCONTAINER", Const, 15, ""},
+		{"IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE", Const, 15, ""},
+		{"IMAGE_DLLCHARACTERISTICS_FORCE_INTEGRITY", Const, 15, ""},
+		{"IMAGE_DLLCHARACTERISTICS_GUARD_CF", Const, 15, ""},
+		{"IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA", Const, 15, ""},
+		{"IMAGE_DLLCHARACTERISTICS_NO_BIND", Const, 15, ""},
+		{"IMAGE_DLLCHARACTERISTICS_NO_ISOLATION", Const, 15, ""},
+		{"IMAGE_DLLCHARACTERISTICS_NO_SEH", Const, 15, ""},
+		{"IMAGE_DLLCHARACTERISTICS_NX_COMPAT", Const, 15, ""},
+		{"IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE", Const, 15, ""},
+		{"IMAGE_DLLCHARACTERISTICS_WDM_DRIVER", Const, 15, ""},
+		{"IMAGE_FILE_32BIT_MACHINE", Const, 15, ""},
+		{"IMAGE_FILE_AGGRESIVE_WS_TRIM", Const, 15, ""},
+		{"IMAGE_FILE_BYTES_REVERSED_HI", Const, 15, ""},
+		{"IMAGE_FILE_BYTES_REVERSED_LO", Const, 15, ""},
+		{"IMAGE_FILE_DEBUG_STRIPPED", Const, 15, ""},
+		{"IMAGE_FILE_DLL", Const, 15, ""},
+		{"IMAGE_FILE_EXECUTABLE_IMAGE", Const, 15, ""},
+		{"IMAGE_FILE_LARGE_ADDRESS_AWARE", Const, 15, ""},
+		{"IMAGE_FILE_LINE_NUMS_STRIPPED", Const, 15, ""},
+		{"IMAGE_FILE_LOCAL_SYMS_STRIPPED", Const, 15, ""},
+		{"IMAGE_FILE_MACHINE_AM33", Const, 0, ""},
+		{"IMAGE_FILE_MACHINE_AMD64", Const, 0, ""},
+		{"IMAGE_FILE_MACHINE_ARM", Const, 0, ""},
+		{"IMAGE_FILE_MACHINE_ARM64", Const, 11, ""},
+		{"IMAGE_FILE_MACHINE_ARMNT", Const, 12, ""},
+		{"IMAGE_FILE_MACHINE_EBC", Const, 0, ""},
+		{"IMAGE_FILE_MACHINE_I386", Const, 0, ""},
+		{"IMAGE_FILE_MACHINE_IA64", Const, 0, ""},
+		{"IMAGE_FILE_MACHINE_LOONGARCH32", Const, 19, ""},
+		{"IMAGE_FILE_MACHINE_LOONGARCH64", Const, 19, ""},
+		{"IMAGE_FILE_MACHINE_M32R", Const, 0, ""},
+		{"IMAGE_FILE_MACHINE_MIPS16", Const, 0, ""},
+		{"IMAGE_FILE_MACHINE_MIPSFPU", Const, 0, ""},
+		{"IMAGE_FILE_MACHINE_MIPSFPU16", Const, 0, ""},
+		{"IMAGE_FILE_MACHINE_POWERPC", Const, 0, ""},
+		{"IMAGE_FILE_MACHINE_POWERPCFP", Const, 0, ""},
+		{"IMAGE_FILE_MACHINE_R4000", Const, 0, ""},
+		{"IMAGE_FILE_MACHINE_RISCV128", Const, 20, ""},
+		{"IMAGE_FILE_MACHINE_RISCV32", Const, 20, ""},
+		{"IMAGE_FILE_MACHINE_RISCV64", Const, 20, ""},
+		{"IMAGE_FILE_MACHINE_SH3", Const, 0, ""},
+		{"IMAGE_FILE_MACHINE_SH3DSP", Const, 0, ""},
+		{"IMAGE_FILE_MACHINE_SH4", Const, 0, ""},
+		{"IMAGE_FILE_MACHINE_SH5", Const, 0, ""},
+		{"IMAGE_FILE_MACHINE_THUMB", Const, 0, ""},
+		{"IMAGE_FILE_MACHINE_UNKNOWN", Const, 0, ""},
+		{"IMAGE_FILE_MACHINE_WCEMIPSV2", Const, 0, ""},
+		{"IMAGE_FILE_NET_RUN_FROM_SWAP", Const, 15, ""},
+		{"IMAGE_FILE_RELOCS_STRIPPED", Const, 15, ""},
+		{"IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP", Const, 15, ""},
+		{"IMAGE_FILE_SYSTEM", Const, 15, ""},
+		{"IMAGE_FILE_UP_SYSTEM_ONLY", Const, 15, ""},
+		{"IMAGE_SCN_CNT_CODE", Const, 19, ""},
+		{"IMAGE_SCN_CNT_INITIALIZED_DATA", Const, 19, ""},
+		{"IMAGE_SCN_CNT_UNINITIALIZED_DATA", Const, 19, ""},
+		{"IMAGE_SCN_LNK_COMDAT", Const, 19, ""},
+		{"IMAGE_SCN_MEM_DISCARDABLE", Const, 19, ""},
+		{"IMAGE_SCN_MEM_EXECUTE", Const, 19, ""},
+		{"IMAGE_SCN_MEM_READ", Const, 19, ""},
+		{"IMAGE_SCN_MEM_WRITE", Const, 19, ""},
+		{"IMAGE_SUBSYSTEM_EFI_APPLICATION", Const, 15, ""},
+		{"IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER", Const, 15, ""},
+		{"IMAGE_SUBSYSTEM_EFI_ROM", Const, 15, ""},
+		{"IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER", Const, 15, ""},
+		{"IMAGE_SUBSYSTEM_NATIVE", Const, 15, ""},
+		{"IMAGE_SUBSYSTEM_NATIVE_WINDOWS", Const, 15, ""},
+		{"IMAGE_SUBSYSTEM_OS2_CUI", Const, 15, ""},
+		{"IMAGE_SUBSYSTEM_POSIX_CUI", Const, 15, ""},
+		{"IMAGE_SUBSYSTEM_UNKNOWN", Const, 15, ""},
+		{"IMAGE_SUBSYSTEM_WINDOWS_BOOT_APPLICATION", Const, 15, ""},
+		{"IMAGE_SUBSYSTEM_WINDOWS_CE_GUI", Const, 15, ""},
+		{"IMAGE_SUBSYSTEM_WINDOWS_CUI", Const, 15, ""},
+		{"IMAGE_SUBSYSTEM_WINDOWS_GUI", Const, 15, ""},
+		{"IMAGE_SUBSYSTEM_XBOX", Const, 15, ""},
+		{"ImportDirectory", Type, 0, ""},
+		{"ImportDirectory.FirstThunk", Field, 0, ""},
+		{"ImportDirectory.ForwarderChain", Field, 0, ""},
+		{"ImportDirectory.Name", Field, 0, ""},
+		{"ImportDirectory.OriginalFirstThunk", Field, 0, ""},
+		{"ImportDirectory.TimeDateStamp", Field, 0, ""},
+		{"NewFile", Func, 0, "func(r io.ReaderAt) (*File, error)"},
+		{"Open", Func, 0, "func(name string) (*File, error)"},
+		{"OptionalHeader32", Type, 3, ""},
+		{"OptionalHeader32.AddressOfEntryPoint", Field, 3, ""},
+		{"OptionalHeader32.BaseOfCode", Field, 3, ""},
+		{"OptionalHeader32.BaseOfData", Field, 3, ""},
+		{"OptionalHeader32.CheckSum", Field, 3, ""},
+		{"OptionalHeader32.DataDirectory", Field, 3, ""},
+		{"OptionalHeader32.DllCharacteristics", Field, 3, ""},
+		{"OptionalHeader32.FileAlignment", Field, 3, ""},
+		{"OptionalHeader32.ImageBase", Field, 3, ""},
+		{"OptionalHeader32.LoaderFlags", Field, 3, ""},
+		{"OptionalHeader32.Magic", Field, 3, ""},
+		{"OptionalHeader32.MajorImageVersion", Field, 3, ""},
+		{"OptionalHeader32.MajorLinkerVersion", Field, 3, ""},
+		{"OptionalHeader32.MajorOperatingSystemVersion", Field, 3, ""},
+		{"OptionalHeader32.MajorSubsystemVersion", Field, 3, ""},
+		{"OptionalHeader32.MinorImageVersion", Field, 3, ""},
+		{"OptionalHeader32.MinorLinkerVersion", Field, 3, ""},
+		{"OptionalHeader32.MinorOperatingSystemVersion", Field, 3, ""},
+		{"OptionalHeader32.MinorSubsystemVersion", Field, 3, ""},
+		{"OptionalHeader32.NumberOfRvaAndSizes", Field, 3, ""},
+		{"OptionalHeader32.SectionAlignment", Field, 3, ""},
+		{"OptionalHeader32.SizeOfCode", Field, 3, ""},
+		{"OptionalHeader32.SizeOfHeaders", Field, 3, ""},
+		{"OptionalHeader32.SizeOfHeapCommit", Field, 3, ""},
+		{"OptionalHeader32.SizeOfHeapReserve", Field, 3, ""},
+		{"OptionalHeader32.SizeOfImage", Field, 3, ""},
+		{"OptionalHeader32.SizeOfInitializedData", Field, 3, ""},
+		{"OptionalHeader32.SizeOfStackCommit", Field, 3, ""},
+		{"OptionalHeader32.SizeOfStackReserve", Field, 3, ""},
+		{"OptionalHeader32.SizeOfUninitializedData", Field, 3, ""},
+		{"OptionalHeader32.Subsystem", Field, 3, ""},
+		{"OptionalHeader32.Win32VersionValue", Field, 3, ""},
+		{"OptionalHeader64", Type, 3, ""},
+		{"OptionalHeader64.AddressOfEntryPoint", Field, 3, ""},
+		{"OptionalHeader64.BaseOfCode", Field, 3, ""},
+		{"OptionalHeader64.CheckSum", Field, 3, ""},
+		{"OptionalHeader64.DataDirectory", Field, 3, ""},
+		{"OptionalHeader64.DllCharacteristics", Field, 3, ""},
+		{"OptionalHeader64.FileAlignment", Field, 3, ""},
+		{"OptionalHeader64.ImageBase", Field, 3, ""},
+		{"OptionalHeader64.LoaderFlags", Field, 3, ""},
+		{"OptionalHeader64.Magic", Field, 3, ""},
+		{"OptionalHeader64.MajorImageVersion", Field, 3, ""},
+		{"OptionalHeader64.MajorLinkerVersion", Field, 3, ""},
+		{"OptionalHeader64.MajorOperatingSystemVersion", Field, 3, ""},
+		{"OptionalHeader64.MajorSubsystemVersion", Field, 3, ""},
+		{"OptionalHeader64.MinorImageVersion", Field, 3, ""},
+		{"OptionalHeader64.MinorLinkerVersion", Field, 3, ""},
+		{"OptionalHeader64.MinorOperatingSystemVersion", Field, 3, ""},
+		{"OptionalHeader64.MinorSubsystemVersion", Field, 3, ""},
+		{"OptionalHeader64.NumberOfRvaAndSizes", Field, 3, ""},
+		{"OptionalHeader64.SectionAlignment", Field, 3, ""},
+		{"OptionalHeader64.SizeOfCode", Field, 3, ""},
+		{"OptionalHeader64.SizeOfHeaders", Field, 3, ""},
+		{"OptionalHeader64.SizeOfHeapCommit", Field, 3, ""},
+		{"OptionalHeader64.SizeOfHeapReserve", Field, 3, ""},
+		{"OptionalHeader64.SizeOfImage", Field, 3, ""},
+		{"OptionalHeader64.SizeOfInitializedData", Field, 3, ""},
+		{"OptionalHeader64.SizeOfStackCommit", Field, 3, ""},
+		{"OptionalHeader64.SizeOfStackReserve", Field, 3, ""},
+		{"OptionalHeader64.SizeOfUninitializedData", Field, 3, ""},
+		{"OptionalHeader64.Subsystem", Field, 3, ""},
+		{"OptionalHeader64.Win32VersionValue", Field, 3, ""},
+		{"Reloc", Type, 8, ""},
+		{"Reloc.SymbolTableIndex", Field, 8, ""},
+		{"Reloc.Type", Field, 8, ""},
+		{"Reloc.VirtualAddress", Field, 8, ""},
+		{"Section", Type, 0, ""},
+		{"Section.ReaderAt", Field, 0, ""},
+		{"Section.Relocs", Field, 8, ""},
+		{"Section.SectionHeader", Field, 0, ""},
+		{"SectionHeader", Type, 0, ""},
+		{"SectionHeader.Characteristics", Field, 0, ""},
+		{"SectionHeader.Name", Field, 0, ""},
+		{"SectionHeader.NumberOfLineNumbers", Field, 0, ""},
+		{"SectionHeader.NumberOfRelocations", Field, 0, ""},
+		{"SectionHeader.Offset", Field, 0, ""},
+		{"SectionHeader.PointerToLineNumbers", Field, 0, ""},
+		{"SectionHeader.PointerToRelocations", Field, 0, ""},
+		{"SectionHeader.Size", Field, 0, ""},
+		{"SectionHeader.VirtualAddress", Field, 0, ""},
+		{"SectionHeader.VirtualSize", Field, 0, ""},
+		{"SectionHeader32", Type, 0, ""},
+		{"SectionHeader32.Characteristics", Field, 0, ""},
+		{"SectionHeader32.Name", Field, 0, ""},
+		{"SectionHeader32.NumberOfLineNumbers", Field, 0, ""},
+		{"SectionHeader32.NumberOfRelocations", Field, 0, ""},
+		{"SectionHeader32.PointerToLineNumbers", Field, 0, ""},
+		{"SectionHeader32.PointerToRawData", Field, 0, ""},
+		{"SectionHeader32.PointerToRelocations", Field, 0, ""},
+		{"SectionHeader32.SizeOfRawData", Field, 0, ""},
+		{"SectionHeader32.VirtualAddress", Field, 0, ""},
+		{"SectionHeader32.VirtualSize", Field, 0, ""},
+		{"StringTable", Type, 8, ""},
+		{"Symbol", Type, 1, ""},
+		{"Symbol.Name", Field, 1, ""},
+		{"Symbol.SectionNumber", Field, 1, ""},
+		{"Symbol.StorageClass", Field, 1, ""},
+		{"Symbol.Type", Field, 1, ""},
+		{"Symbol.Value", Field, 1, ""},
+	},
+	"debug/plan9obj": {
+		{"(*File).Close", Method, 3, ""},
+		{"(*File).Section", Method, 3, ""},
+		{"(*File).Symbols", Method, 3, ""},
+		{"(*Section).Data", Method, 3, ""},
+		{"(*Section).Open", Method, 3, ""},
+		{"(Section).ReadAt", Method, 3, ""},
+		{"ErrNoSymbols", Var, 18, ""},
+		{"File", Type, 3, ""},
+		{"File.FileHeader", Field, 3, ""},
+		{"File.Sections", Field, 3, ""},
+		{"FileHeader", Type, 3, ""},
+		{"FileHeader.Bss", Field, 3, ""},
+		{"FileHeader.Entry", Field, 3, ""},
+		{"FileHeader.HdrSize", Field, 4, ""},
+		{"FileHeader.LoadAddress", Field, 4, ""},
+		{"FileHeader.Magic", Field, 3, ""},
+		{"FileHeader.PtrSize", Field, 3, ""},
+		{"Magic386", Const, 3, ""},
+		{"Magic64", Const, 3, ""},
+		{"MagicAMD64", Const, 3, ""},
+		{"MagicARM", Const, 3, ""},
+		{"NewFile", Func, 3, "func(r io.ReaderAt) (*File, error)"},
+		{"Open", Func, 3, "func(name string) (*File, error)"},
+		{"Section", Type, 3, ""},
+		{"Section.ReaderAt", Field, 3, ""},
+		{"Section.SectionHeader", Field, 3, ""},
+		{"SectionHeader", Type, 3, ""},
+		{"SectionHeader.Name", Field, 3, ""},
+		{"SectionHeader.Offset", Field, 3, ""},
+		{"SectionHeader.Size", Field, 3, ""},
+		{"Sym", Type, 3, ""},
+		{"Sym.Name", Field, 3, ""},
+		{"Sym.Type", Field, 3, ""},
+		{"Sym.Value", Field, 3, ""},
+	},
+	"embed": {
+		{"(FS).Open", Method, 16, ""},
+		{"(FS).ReadDir", Method, 16, ""},
+		{"(FS).ReadFile", Method, 16, ""},
+		{"FS", Type, 16, ""},
+	},
+	"encoding": {
+		{"BinaryAppender", Type, 24, ""},
+		{"BinaryMarshaler", Type, 2, ""},
+		{"BinaryUnmarshaler", Type, 2, ""},
+		{"TextAppender", Type, 24, ""},
+		{"TextMarshaler", Type, 2, ""},
+		{"TextUnmarshaler", Type, 2, ""},
+	},
+	"encoding/ascii85": {
+		{"(CorruptInputError).Error", Method, 0, ""},
+		{"CorruptInputError", Type, 0, ""},
+		{"Decode", Func, 0, "func(dst []byte, src []byte, flush bool) (ndst int, nsrc int, err error)"},
+		{"Encode", Func, 0, "func(dst []byte, src []byte) int"},
+		{"MaxEncodedLen", Func, 0, "func(n int) int"},
+		{"NewDecoder", Func, 0, "func(r io.Reader) io.Reader"},
+		{"NewEncoder", Func, 0, "func(w io.Writer) io.WriteCloser"},
+	},
+	"encoding/asn1": {
+		{"(BitString).At", Method, 0, ""},
+		{"(BitString).RightAlign", Method, 0, ""},
+		{"(ObjectIdentifier).Equal", Method, 0, ""},
+		{"(ObjectIdentifier).String", Method, 3, ""},
+		{"(StructuralError).Error", Method, 0, ""},
+		{"(SyntaxError).Error", Method, 0, ""},
+		{"BitString", Type, 0, ""},
+		{"BitString.BitLength", Field, 0, ""},
+		{"BitString.Bytes", Field, 0, ""},
+		{"ClassApplication", Const, 6, ""},
+		{"ClassContextSpecific", Const, 6, ""},
+		{"ClassPrivate", Const, 6, ""},
+		{"ClassUniversal", Const, 6, ""},
+		{"Enumerated", Type, 0, ""},
+		{"Flag", Type, 0, ""},
+		{"Marshal", Func, 0, "func(val any) ([]byte, error)"},
+		{"MarshalWithParams", Func, 10, "func(val any, params string) ([]byte, error)"},
+		{"NullBytes", Var, 9, ""},
+		{"NullRawValue", Var, 9, ""},
+		{"ObjectIdentifier", Type, 0, ""},
+		{"RawContent", Type, 0, ""},
+		{"RawValue", Type, 0, ""},
+		{"RawValue.Bytes", Field, 0, ""},
+		{"RawValue.Class", Field, 0, ""},
+		{"RawValue.FullBytes", Field, 0, ""},
+		{"RawValue.IsCompound", Field, 0, ""},
+		{"RawValue.Tag", Field, 0, ""},
+		{"StructuralError", Type, 0, ""},
+		{"StructuralError.Msg", Field, 0, ""},
+		{"SyntaxError", Type, 0, ""},
+		{"SyntaxError.Msg", Field, 0, ""},
+		{"TagBMPString", Const, 14, ""},
+		{"TagBitString", Const, 6, ""},
+		{"TagBoolean", Const, 6, ""},
+		{"TagEnum", Const, 6, ""},
+		{"TagGeneralString", Const, 6, ""},
+		{"TagGeneralizedTime", Const, 6, ""},
+		{"TagIA5String", Const, 6, ""},
+		{"TagInteger", Const, 6, ""},
+		{"TagNull", Const, 9, ""},
+		{"TagNumericString", Const, 10, ""},
+		{"TagOID", Const, 6, ""},
+		{"TagOctetString", Const, 6, ""},
+		{"TagPrintableString", Const, 6, ""},
+		{"TagSequence", Const, 6, ""},
+		{"TagSet", Const, 6, ""},
+		{"TagT61String", Const, 6, ""},
+		{"TagUTCTime", Const, 6, ""},
+		{"TagUTF8String", Const, 6, ""},
+		{"Unmarshal", Func, 0, "func(b []byte, val any) (rest []byte, err error)"},
+		{"UnmarshalWithParams", Func, 0, "func(b []byte, val any, params string) (rest []byte, err error)"},
+	},
+	"encoding/base32": {
+		{"(*Encoding).AppendDecode", Method, 22, ""},
+		{"(*Encoding).AppendEncode", Method, 22, ""},
+		{"(*Encoding).Decode", Method, 0, ""},
+		{"(*Encoding).DecodeString", Method, 0, ""},
+		{"(*Encoding).DecodedLen", Method, 0, ""},
+		{"(*Encoding).Encode", Method, 0, ""},
+		{"(*Encoding).EncodeToString", Method, 0, ""},
+		{"(*Encoding).EncodedLen", Method, 0, ""},
+		{"(CorruptInputError).Error", Method, 0, ""},
+		{"(Encoding).WithPadding", Method, 9, ""},
+		{"CorruptInputError", Type, 0, ""},
+		{"Encoding", Type, 0, ""},
+		{"HexEncoding", Var, 0, ""},
+		{"NewDecoder", Func, 0, "func(enc *Encoding, r io.Reader) io.Reader"},
+		{"NewEncoder", Func, 0, "func(enc *Encoding, w io.Writer) io.WriteCloser"},
+		{"NewEncoding", Func, 0, "func(encoder string) *Encoding"},
+		{"NoPadding", Const, 9, ""},
+		{"StdEncoding", Var, 0, ""},
+		{"StdPadding", Const, 9, ""},
+	},
+	"encoding/base64": {
+		{"(*Encoding).AppendDecode", Method, 22, ""},
+		{"(*Encoding).AppendEncode", Method, 22, ""},
+		{"(*Encoding).Decode", Method, 0, ""},
+		{"(*Encoding).DecodeString", Method, 0, ""},
+		{"(*Encoding).DecodedLen", Method, 0, ""},
+		{"(*Encoding).Encode", Method, 0, ""},
+		{"(*Encoding).EncodeToString", Method, 0, ""},
+		{"(*Encoding).EncodedLen", Method, 0, ""},
+		{"(CorruptInputError).Error", Method, 0, ""},
+		{"(Encoding).Strict", Method, 8, ""},
+		{"(Encoding).WithPadding", Method, 5, ""},
+		{"CorruptInputError", Type, 0, ""},
+		{"Encoding", Type, 0, ""},
+		{"NewDecoder", Func, 0, "func(enc *Encoding, r io.Reader) io.Reader"},
+		{"NewEncoder", Func, 0, "func(enc *Encoding, w io.Writer) io.WriteCloser"},
+		{"NewEncoding", Func, 0, "func(encoder string) *Encoding"},
+		{"NoPadding", Const, 5, ""},
+		{"RawStdEncoding", Var, 5, ""},
+		{"RawURLEncoding", Var, 5, ""},
+		{"StdEncoding", Var, 0, ""},
+		{"StdPadding", Const, 5, ""},
+		{"URLEncoding", Var, 0, ""},
+	},
+	"encoding/binary": {
+		{"Append", Func, 23, "func(buf []byte, order ByteOrder, data any) ([]byte, error)"},
+		{"AppendByteOrder", Type, 19, ""},
+		{"AppendUvarint", Func, 19, "func(buf []byte, x uint64) []byte"},
+		{"AppendVarint", Func, 19, "func(buf []byte, x int64) []byte"},
+		{"BigEndian", Var, 0, ""},
+		{"ByteOrder", Type, 0, ""},
+		{"Decode", Func, 23, "func(buf []byte, order ByteOrder, data any) (int, error)"},
+		{"Encode", Func, 23, "func(buf []byte, order ByteOrder, data any) (int, error)"},
+		{"LittleEndian", Var, 0, ""},
+		{"MaxVarintLen16", Const, 0, ""},
+		{"MaxVarintLen32", Const, 0, ""},
+		{"MaxVarintLen64", Const, 0, ""},
+		{"NativeEndian", Var, 21, ""},
+		{"PutUvarint", Func, 0, "func(buf []byte, x uint64) int"},
+		{"PutVarint", Func, 0, "func(buf []byte, x int64) int"},
+		{"Read", Func, 0, "func(r io.Reader, order ByteOrder, data any) error"},
+		{"ReadUvarint", Func, 0, "func(r io.ByteReader) (uint64, error)"},
+		{"ReadVarint", Func, 0, "func(r io.ByteReader) (int64, error)"},
+		{"Size", Func, 0, "func(v any) int"},
+		{"Uvarint", Func, 0, "func(buf []byte) (uint64, int)"},
+		{"Varint", Func, 0, "func(buf []byte) (int64, int)"},
+		{"Write", Func, 0, "func(w io.Writer, order ByteOrder, data any) error"},
+	},
+	"encoding/csv": {
+		{"(*ParseError).Error", Method, 0, ""},
+		{"(*ParseError).Unwrap", Method, 13, ""},
+		{"(*Reader).FieldPos", Method, 17, ""},
+		{"(*Reader).InputOffset", Method, 19, ""},
+		{"(*Reader).Read", Method, 0, ""},
+		{"(*Reader).ReadAll", Method, 0, ""},
+		{"(*Writer).Error", Method, 1, ""},
+		{"(*Writer).Flush", Method, 0, ""},
+		{"(*Writer).Write", Method, 0, ""},
+		{"(*Writer).WriteAll", Method, 0, ""},
+		{"ErrBareQuote", Var, 0, ""},
+		{"ErrFieldCount", Var, 0, ""},
+		{"ErrQuote", Var, 0, ""},
+		{"ErrTrailingComma", Var, 0, ""},
+		{"NewReader", Func, 0, "func(r io.Reader) *Reader"},
+		{"NewWriter", Func, 0, "func(w io.Writer) *Writer"},
+		{"ParseError", Type, 0, ""},
+		{"ParseError.Column", Field, 0, ""},
+		{"ParseError.Err", Field, 0, ""},
+		{"ParseError.Line", Field, 0, ""},
+		{"ParseError.StartLine", Field, 10, ""},
+		{"Reader", Type, 0, ""},
+		{"Reader.Comma", Field, 0, ""},
+		{"Reader.Comment", Field, 0, ""},
+		{"Reader.FieldsPerRecord", Field, 0, ""},
+		{"Reader.LazyQuotes", Field, 0, ""},
+		{"Reader.ReuseRecord", Field, 9, ""},
+		{"Reader.TrailingComma", Field, 0, ""},
+		{"Reader.TrimLeadingSpace", Field, 0, ""},
+		{"Writer", Type, 0, ""},
+		{"Writer.Comma", Field, 0, ""},
+		{"Writer.UseCRLF", Field, 0, ""},
+	},
+	"encoding/gob": {
+		{"(*Decoder).Decode", Method, 0, ""},
+		{"(*Decoder).DecodeValue", Method, 0, ""},
+		{"(*Encoder).Encode", Method, 0, ""},
+		{"(*Encoder).EncodeValue", Method, 0, ""},
+		{"CommonType", Type, 0, ""},
+		{"CommonType.Id", Field, 0, ""},
+		{"CommonType.Name", Field, 0, ""},
+		{"Decoder", Type, 0, ""},
+		{"Encoder", Type, 0, ""},
+		{"GobDecoder", Type, 0, ""},
+		{"GobEncoder", Type, 0, ""},
+		{"NewDecoder", Func, 0, "func(r io.Reader) *Decoder"},
+		{"NewEncoder", Func, 0, "func(w io.Writer) *Encoder"},
+		{"Register", Func, 0, "func(value any)"},
+		{"RegisterName", Func, 0, "func(name string, value any)"},
+	},
+	"encoding/hex": {
+		{"(InvalidByteError).Error", Method, 0, ""},
+		{"AppendDecode", Func, 22, "func(dst []byte, src []byte) ([]byte, error)"},
+		{"AppendEncode", Func, 22, "func(dst []byte, src []byte) []byte"},
+		{"Decode", Func, 0, "func(dst []byte, src []byte) (int, error)"},
+		{"DecodeString", Func, 0, "func(s string) ([]byte, error)"},
+		{"DecodedLen", Func, 0, "func(x int) int"},
+		{"Dump", Func, 0, "func(data []byte) string"},
+		{"Dumper", Func, 0, "func(w io.Writer) io.WriteCloser"},
+		{"Encode", Func, 0, "func(dst []byte, src []byte) int"},
+		{"EncodeToString", Func, 0, "func(src []byte) string"},
+		{"EncodedLen", Func, 0, "func(n int) int"},
+		{"ErrLength", Var, 0, ""},
+		{"InvalidByteError", Type, 0, ""},
+		{"NewDecoder", Func, 10, "func(r io.Reader) io.Reader"},
+		{"NewEncoder", Func, 10, "func(w io.Writer) io.Writer"},
+	},
+	"encoding/json": {
+		{"(*Decoder).Buffered", Method, 1, ""},
+		{"(*Decoder).Decode", Method, 0, ""},
+		{"(*Decoder).DisallowUnknownFields", Method, 10, ""},
+		{"(*Decoder).InputOffset", Method, 14, ""},
+		{"(*Decoder).More", Method, 5, ""},
+		{"(*Decoder).Token", Method, 5, ""},
+		{"(*Decoder).UseNumber", Method, 1, ""},
+		{"(*Encoder).Encode", Method, 0, ""},
+		{"(*Encoder).SetEscapeHTML", Method, 7, ""},
+		{"(*Encoder).SetIndent", Method, 7, ""},
+		{"(*InvalidUTF8Error).Error", Method, 0, ""},
+		{"(*InvalidUnmarshalError).Error", Method, 0, ""},
+		{"(*MarshalerError).Error", Method, 0, ""},
+		{"(*MarshalerError).Unwrap", Method, 13, ""},
+		{"(*RawMessage).MarshalJSON", Method, 0, ""},
+		{"(*RawMessage).UnmarshalJSON", Method, 0, ""},
+		{"(*SyntaxError).Error", Method, 0, ""},
+		{"(*UnmarshalFieldError).Error", Method, 0, ""},
+		{"(*UnmarshalTypeError).Error", Method, 0, ""},
+		{"(*UnsupportedTypeError).Error", Method, 0, ""},
+		{"(*UnsupportedValueError).Error", Method, 0, ""},
+		{"(Delim).String", Method, 5, ""},
+		{"(Number).Float64", Method, 1, ""},
+		{"(Number).Int64", Method, 1, ""},
+		{"(Number).String", Method, 1, ""},
+		{"(RawMessage).MarshalJSON", Method, 8, ""},
+		{"Compact", Func, 0, "func(dst *bytes.Buffer, src []byte) error"},
+		{"Decoder", Type, 0, ""},
+		{"Delim", Type, 5, ""},
+		{"Encoder", Type, 0, ""},
+		{"HTMLEscape", Func, 0, "func(dst *bytes.Buffer, src []byte)"},
+		{"Indent", Func, 0, "func(dst *bytes.Buffer, src []byte, prefix string, indent string) error"},
+		{"InvalidUTF8Error", Type, 0, ""},
+		{"InvalidUTF8Error.S", Field, 0, ""},
+		{"InvalidUnmarshalError", Type, 0, ""},
+		{"InvalidUnmarshalError.Type", Field, 0, ""},
+		{"Marshal", Func, 0, "func(v any) ([]byte, error)"},
+		{"MarshalIndent", Func, 0, "func(v any, prefix string, indent string) ([]byte, error)"},
+		{"Marshaler", Type, 0, ""},
+		{"MarshalerError", Type, 0, ""},
+		{"MarshalerError.Err", Field, 0, ""},
+		{"MarshalerError.Type", Field, 0, ""},
+		{"NewDecoder", Func, 0, "func(r io.Reader) *Decoder"},
+		{"NewEncoder", Func, 0, "func(w io.Writer) *Encoder"},
+		{"Number", Type, 1, ""},
+		{"RawMessage", Type, 0, ""},
+		{"SyntaxError", Type, 0, ""},
+		{"SyntaxError.Offset", Field, 0, ""},
+		{"Token", Type, 5, ""},
+		{"Unmarshal", Func, 0, "func(data []byte, v any) error"},
+		{"UnmarshalFieldError", Type, 0, ""},
+		{"UnmarshalFieldError.Field", Field, 0, ""},
+		{"UnmarshalFieldError.Key", Field, 0, ""},
+		{"UnmarshalFieldError.Type", Field, 0, ""},
+		{"UnmarshalTypeError", Type, 0, ""},
+		{"UnmarshalTypeError.Field", Field, 8, ""},
+		{"UnmarshalTypeError.Offset", Field, 5, ""},
+		{"UnmarshalTypeError.Struct", Field, 8, ""},
+		{"UnmarshalTypeError.Type", Field, 0, ""},
+		{"UnmarshalTypeError.Value", Field, 0, ""},
+		{"Unmarshaler", Type, 0, ""},
+		{"UnsupportedTypeError", Type, 0, ""},
+		{"UnsupportedTypeError.Type", Field, 0, ""},
+		{"UnsupportedValueError", Type, 0, ""},
+		{"UnsupportedValueError.Str", Field, 0, ""},
+		{"UnsupportedValueError.Value", Field, 0, ""},
+		{"Valid", Func, 9, "func(data []byte) bool"},
+	},
+	"encoding/pem": {
+		{"Block", Type, 0, ""},
+		{"Block.Bytes", Field, 0, ""},
+		{"Block.Headers", Field, 0, ""},
+		{"Block.Type", Field, 0, ""},
+		{"Decode", Func, 0, "func(data []byte) (p *Block, rest []byte)"},
+		{"Encode", Func, 0, "func(out io.Writer, b *Block) error"},
+		{"EncodeToMemory", Func, 0, "func(b *Block) []byte"},
+	},
+	"encoding/xml": {
+		{"(*Decoder).Decode", Method, 0, ""},
+		{"(*Decoder).DecodeElement", Method, 0, ""},
+		{"(*Decoder).InputOffset", Method, 4, ""},
+		{"(*Decoder).InputPos", Method, 19, ""},
+		{"(*Decoder).RawToken", Method, 0, ""},
+		{"(*Decoder).Skip", Method, 0, ""},
+		{"(*Decoder).Token", Method, 0, ""},
+		{"(*Encoder).Close", Method, 20, ""},
+		{"(*Encoder).Encode", Method, 0, ""},
+		{"(*Encoder).EncodeElement", Method, 2, ""},
+		{"(*Encoder).EncodeToken", Method, 2, ""},
+		{"(*Encoder).Flush", Method, 2, ""},
+		{"(*Encoder).Indent", Method, 1, ""},
+		{"(*SyntaxError).Error", Method, 0, ""},
+		{"(*TagPathError).Error", Method, 0, ""},
+		{"(*UnsupportedTypeError).Error", Method, 0, ""},
+		{"(CharData).Copy", Method, 0, ""},
+		{"(Comment).Copy", Method, 0, ""},
+		{"(Directive).Copy", Method, 0, ""},
+		{"(ProcInst).Copy", Method, 0, ""},
+		{"(StartElement).Copy", Method, 0, ""},
+		{"(StartElement).End", Method, 2, ""},
+		{"(UnmarshalError).Error", Method, 0, ""},
+		{"Attr", Type, 0, ""},
+		{"Attr.Name", Field, 0, ""},
+		{"Attr.Value", Field, 0, ""},
+		{"CharData", Type, 0, ""},
+		{"Comment", Type, 0, ""},
+		{"CopyToken", Func, 0, "func(t Token) Token"},
+		{"Decoder", Type, 0, ""},
+		{"Decoder.AutoClose", Field, 0, ""},
+		{"Decoder.CharsetReader", Field, 0, ""},
+		{"Decoder.DefaultSpace", Field, 1, ""},
+		{"Decoder.Entity", Field, 0, ""},
+		{"Decoder.Strict", Field, 0, ""},
+		{"Directive", Type, 0, ""},
+		{"Encoder", Type, 0, ""},
+		{"EndElement", Type, 0, ""},
+		{"EndElement.Name", Field, 0, ""},
+		{"Escape", Func, 0, "func(w io.Writer, s []byte)"},
+		{"EscapeText", Func, 1, "func(w io.Writer, s []byte) error"},
+		{"HTMLAutoClose", Var, 0, ""},
+		{"HTMLEntity", Var, 0, ""},
+		{"Header", Const, 0, ""},
+		{"Marshal", Func, 0, "func(v any) ([]byte, error)"},
+		{"MarshalIndent", Func, 0, "func(v any, prefix string, indent string) ([]byte, error)"},
+		{"Marshaler", Type, 2, ""},
+		{"MarshalerAttr", Type, 2, ""},
+		{"Name", Type, 0, ""},
+		{"Name.Local", Field, 0, ""},
+		{"Name.Space", Field, 0, ""},
+		{"NewDecoder", Func, 0, "func(r io.Reader) *Decoder"},
+		{"NewEncoder", Func, 0, "func(w io.Writer) *Encoder"},
+		{"NewTokenDecoder", Func, 10, "func(t TokenReader) *Decoder"},
+		{"ProcInst", Type, 0, ""},
+		{"ProcInst.Inst", Field, 0, ""},
+		{"ProcInst.Target", Field, 0, ""},
+		{"StartElement", Type, 0, ""},
+		{"StartElement.Attr", Field, 0, ""},
+		{"StartElement.Name", Field, 0, ""},
+		{"SyntaxError", Type, 0, ""},
+		{"SyntaxError.Line", Field, 0, ""},
+		{"SyntaxError.Msg", Field, 0, ""},
+		{"TagPathError", Type, 0, ""},
+		{"TagPathError.Field1", Field, 0, ""},
+		{"TagPathError.Field2", Field, 0, ""},
+		{"TagPathError.Struct", Field, 0, ""},
+		{"TagPathError.Tag1", Field, 0, ""},
+		{"TagPathError.Tag2", Field, 0, ""},
+		{"Token", Type, 0, ""},
+		{"TokenReader", Type, 10, ""},
+		{"Unmarshal", Func, 0, "func(data []byte, v any) error"},
+		{"UnmarshalError", Type, 0, ""},
+		{"Unmarshaler", Type, 2, ""},
+		{"UnmarshalerAttr", Type, 2, ""},
+		{"UnsupportedTypeError", Type, 0, ""},
+		{"UnsupportedTypeError.Type", Field, 0, ""},
+	},
+	"errors": {
+		{"As", Func, 13, "func(err error, target any) bool"},
+		{"AsType", Func, 26, "func[E error](err error) (E, bool)"},
+		{"ErrUnsupported", Var, 21, ""},
+		{"Is", Func, 13, "func(err error, target error) bool"},
+		{"Join", Func, 20, "func(errs ...error) error"},
+		{"New", Func, 0, "func(text string) error"},
+		{"Unwrap", Func, 13, "func(err error) error"},
+	},
+	"expvar": {
+		{"(*Float).Add", Method, 0, ""},
+		{"(*Float).Set", Method, 0, ""},
+		{"(*Float).String", Method, 0, ""},
+		{"(*Float).Value", Method, 8, ""},
+		{"(*Int).Add", Method, 0, ""},
+		{"(*Int).Set", Method, 0, ""},
+		{"(*Int).String", Method, 0, ""},
+		{"(*Int).Value", Method, 8, ""},
+		{"(*Map).Add", Method, 0, ""},
+		{"(*Map).AddFloat", Method, 0, ""},
+		{"(*Map).Delete", Method, 12, ""},
+		{"(*Map).Do", Method, 0, ""},
+		{"(*Map).Get", Method, 0, ""},
+		{"(*Map).Init", Method, 0, ""},
+		{"(*Map).Set", Method, 0, ""},
+		{"(*Map).String", Method, 0, ""},
+		{"(*String).Set", Method, 0, ""},
+		{"(*String).String", Method, 0, ""},
+		{"(*String).Value", Method, 8, ""},
+		{"(Func).String", Method, 0, ""},
+		{"(Func).Value", Method, 8, ""},
+		{"Do", Func, 0, "func(f func(KeyValue))"},
+		{"Float", Type, 0, ""},
+		{"Func", Type, 0, ""},
+		{"Get", Func, 0, "func(name string) Var"},
+		{"Handler", Func, 8, "func() http.Handler"},
+		{"Int", Type, 0, ""},
+		{"KeyValue", Type, 0, ""},
+		{"KeyValue.Key", Field, 0, ""},
+		{"KeyValue.Value", Field, 0, ""},
+		{"Map", Type, 0, ""},
+		{"NewFloat", Func, 0, "func(name string) *Float"},
+		{"NewInt", Func, 0, "func(name string) *Int"},
+		{"NewMap", Func, 0, "func(name string) *Map"},
+		{"NewString", Func, 0, "func(name string) *String"},
+		{"Publish", Func, 0, "func(name string, v Var)"},
+		{"String", Type, 0, ""},
+		{"Var", Type, 0, ""},
+	},
+	"flag": {
+		{"(*FlagSet).Arg", Method, 0, ""},
+		{"(*FlagSet).Args", Method, 0, ""},
+		{"(*FlagSet).Bool", Method, 0, ""},
+		{"(*FlagSet).BoolFunc", Method, 21, ""},
+		{"(*FlagSet).BoolVar", Method, 0, ""},
+		{"(*FlagSet).Duration", Method, 0, ""},
+		{"(*FlagSet).DurationVar", Method, 0, ""},
+		{"(*FlagSet).ErrorHandling", Method, 10, ""},
+		{"(*FlagSet).Float64", Method, 0, ""},
+		{"(*FlagSet).Float64Var", Method, 0, ""},
+		{"(*FlagSet).Func", Method, 16, ""},
+		{"(*FlagSet).Init", Method, 0, ""},
+		{"(*FlagSet).Int", Method, 0, ""},
+		{"(*FlagSet).Int64", Method, 0, ""},
+		{"(*FlagSet).Int64Var", Method, 0, ""},
+		{"(*FlagSet).IntVar", Method, 0, ""},
+		{"(*FlagSet).Lookup", Method, 0, ""},
+		{"(*FlagSet).NArg", Method, 0, ""},
+		{"(*FlagSet).NFlag", Method, 0, ""},
+		{"(*FlagSet).Name", Method, 10, ""},
+		{"(*FlagSet).Output", Method, 10, ""},
+		{"(*FlagSet).Parse", Method, 0, ""},
+		{"(*FlagSet).Parsed", Method, 0, ""},
+		{"(*FlagSet).PrintDefaults", Method, 0, ""},
+		{"(*FlagSet).Set", Method, 0, ""},
+		{"(*FlagSet).SetOutput", Method, 0, ""},
+		{"(*FlagSet).String", Method, 0, ""},
+		{"(*FlagSet).StringVar", Method, 0, ""},
+		{"(*FlagSet).TextVar", Method, 19, ""},
+		{"(*FlagSet).Uint", Method, 0, ""},
+		{"(*FlagSet).Uint64", Method, 0, ""},
+		{"(*FlagSet).Uint64Var", Method, 0, ""},
+		{"(*FlagSet).UintVar", Method, 0, ""},
+		{"(*FlagSet).Var", Method, 0, ""},
+		{"(*FlagSet).Visit", Method, 0, ""},
+		{"(*FlagSet).VisitAll", Method, 0, ""},
+		{"Arg", Func, 0, "func(i int) string"},
+		{"Args", Func, 0, "func() []string"},
+		{"Bool", Func, 0, "func(name string, value bool, usage string) *bool"},
+		{"BoolFunc", Func, 21, "func(name string, usage string, fn func(string) error)"},
+		{"BoolVar", Func, 0, "func(p *bool, name string, value bool, usage string)"},
+		{"CommandLine", Var, 2, ""},
+		{"ContinueOnError", Const, 0, ""},
+		{"Duration", Func, 0, "func(name string, value time.Duration, usage string) *time.Duration"},
+		{"DurationVar", Func, 0, "func(p *time.Duration, name string, value time.Duration, usage string)"},
+		{"ErrHelp", Var, 0, ""},
+		{"ErrorHandling", Type, 0, ""},
+		{"ExitOnError", Const, 0, ""},
+		{"Flag", Type, 0, ""},
+		{"Flag.DefValue", Field, 0, ""},
+		{"Flag.Name", Field, 0, ""},
+		{"Flag.Usage", Field, 0, ""},
+		{"Flag.Value", Field, 0, ""},
+		{"FlagSet", Type, 0, ""},
+		{"FlagSet.Usage", Field, 0, ""},
+		{"Float64", Func, 0, "func(name string, value float64, usage string) *float64"},
+		{"Float64Var", Func, 0, "func(p *float64, name string, value float64, usage string)"},
+		{"Func", Func, 16, "func(name string, usage string, fn func(string) error)"},
+		{"Getter", Type, 2, ""},
+		{"Int", Func, 0, "func(name string, value int, usage string) *int"},
+		{"Int64", Func, 0, "func(name string, value int64, usage string) *int64"},
+		{"Int64Var", Func, 0, "func(p *int64, name string, value int64, usage string)"},
+		{"IntVar", Func, 0, "func(p *int, name string, value int, usage string)"},
+		{"Lookup", Func, 0, "func(name string) *Flag"},
+		{"NArg", Func, 0, "func() int"},
+		{"NFlag", Func, 0, "func() int"},
+		{"NewFlagSet", Func, 0, "func(name string, errorHandling ErrorHandling) *FlagSet"},
+		{"PanicOnError", Const, 0, ""},
+		{"Parse", Func, 0, "func()"},
+		{"Parsed", Func, 0, "func() bool"},
+		{"PrintDefaults", Func, 0, "func()"},
+		{"Set", Func, 0, "func(name string, value string) error"},
+		{"String", Func, 0, "func(name string, value string, usage string) *string"},
+		{"StringVar", Func, 0, "func(p *string, name string, value string, usage string)"},
+		{"TextVar", Func, 19, "func(p encoding.TextUnmarshaler, name string, value encoding.TextMarshaler, usage string)"},
+		{"Uint", Func, 0, "func(name string, value uint, usage string) *uint"},
+		{"Uint64", Func, 0, "func(name string, value uint64, usage string) *uint64"},
+		{"Uint64Var", Func, 0, "func(p *uint64, name string, value uint64, usage string)"},
+		{"UintVar", Func, 0, "func(p *uint, name string, value uint, usage string)"},
+		{"UnquoteUsage", Func, 5, "func(flag *Flag) (name string, usage string)"},
+		{"Usage", Var, 0, ""},
+		{"Value", Type, 0, ""},
+		{"Var", Func, 0, "func(value Value, name string, usage string)"},
+		{"Visit", Func, 0, "func(fn func(*Flag))"},
+		{"VisitAll", Func, 0, "func(fn func(*Flag))"},
+	},
+	"fmt": {
+		{"Append", Func, 19, "func(b []byte, a ...any) []byte"},
+		{"Appendf", Func, 19, "func(b []byte, format string, a ...any) []byte"},
+		{"Appendln", Func, 19, "func(b []byte, a ...any) []byte"},
+		{"Errorf", Func, 0, "func(format string, a ...any) (err error)"},
+		{"FormatString", Func, 20, "func(state State, verb rune) string"},
+		{"Formatter", Type, 0, ""},
+		{"Fprint", Func, 0, "func(w io.Writer, a ...any) (n int, err error)"},
+		{"Fprintf", Func, 0, "func(w io.Writer, format string, a ...any) (n int, err error)"},
+		{"Fprintln", Func, 0, "func(w io.Writer, a ...any) (n int, err error)"},
+		{"Fscan", Func, 0, "func(r io.Reader, a ...any) (n int, err error)"},
+		{"Fscanf", Func, 0, "func(r io.Reader, format string, a ...any) (n int, err error)"},
+		{"Fscanln", Func, 0, "func(r io.Reader, a ...any) (n int, err error)"},
+		{"GoStringer", Type, 0, ""},
+		{"Print", Func, 0, "func(a ...any) (n int, err error)"},
+		{"Printf", Func, 0, "func(format string, a ...any) (n int, err error)"},
+		{"Println", Func, 0, "func(a ...any) (n int, err error)"},
+		{"Scan", Func, 0, "func(a ...any) (n int, err error)"},
+		{"ScanState", Type, 0, ""},
+		{"Scanf", Func, 0, "func(format string, a ...any) (n int, err error)"},
+		{"Scanln", Func, 0, "func(a ...any) (n int, err error)"},
+		{"Scanner", Type, 0, ""},
+		{"Sprint", Func, 0, "func(a ...any) string"},
+		{"Sprintf", Func, 0, "func(format string, a ...any) string"},
+		{"Sprintln", Func, 0, "func(a ...any) string"},
+		{"Sscan", Func, 0, "func(str string, a ...any) (n int, err error)"},
+		{"Sscanf", Func, 0, "func(str string, format string, a ...any) (n int, err error)"},
+		{"Sscanln", Func, 0, "func(str string, a ...any) (n int, err error)"},
+		{"State", Type, 0, ""},
+		{"Stringer", Type, 0, ""},
+	},
+	"go/ast": {
+		{"(*ArrayType).End", Method, 0, ""},
+		{"(*ArrayType).Pos", Method, 0, ""},
+		{"(*AssignStmt).End", Method, 0, ""},
+		{"(*AssignStmt).Pos", Method, 0, ""},
+		{"(*BadDecl).End", Method, 0, ""},
+		{"(*BadDecl).Pos", Method, 0, ""},
+		{"(*BadExpr).End", Method, 0, ""},
+		{"(*BadExpr).Pos", Method, 0, ""},
+		{"(*BadStmt).End", Method, 0, ""},
+		{"(*BadStmt).Pos", Method, 0, ""},
+		{"(*BasicLit).End", Method, 0, ""},
+		{"(*BasicLit).Pos", Method, 0, ""},
+		{"(*BinaryExpr).End", Method, 0, ""},
+		{"(*BinaryExpr).Pos", Method, 0, ""},
+		{"(*BlockStmt).End", Method, 0, ""},
+		{"(*BlockStmt).Pos", Method, 0, ""},
+		{"(*BranchStmt).End", Method, 0, ""},
+		{"(*BranchStmt).Pos", Method, 0, ""},
+		{"(*CallExpr).End", Method, 0, ""},
+		{"(*CallExpr).Pos", Method, 0, ""},
+		{"(*CaseClause).End", Method, 0, ""},
+		{"(*CaseClause).Pos", Method, 0, ""},
+		{"(*ChanType).End", Method, 0, ""},
+		{"(*ChanType).Pos", Method, 0, ""},
+		{"(*CommClause).End", Method, 0, ""},
+		{"(*CommClause).Pos", Method, 0, ""},
+		{"(*Comment).End", Method, 0, ""},
+		{"(*Comment).Pos", Method, 0, ""},
+		{"(*CommentGroup).End", Method, 0, ""},
+		{"(*CommentGroup).Pos", Method, 0, ""},
+		{"(*CommentGroup).Text", Method, 0, ""},
+		{"(*CompositeLit).End", Method, 0, ""},
+		{"(*CompositeLit).Pos", Method, 0, ""},
+		{"(*DeclStmt).End", Method, 0, ""},
+		{"(*DeclStmt).Pos", Method, 0, ""},
+		{"(*DeferStmt).End", Method, 0, ""},
+		{"(*DeferStmt).Pos", Method, 0, ""},
+		{"(*Directive).End", Method, 26, ""},
+		{"(*Directive).ParseArgs", Method, 26, ""},
+		{"(*Directive).Pos", Method, 26, ""},
+		{"(*Ellipsis).End", Method, 0, ""},
+		{"(*Ellipsis).Pos", Method, 0, ""},
+		{"(*EmptyStmt).End", Method, 0, ""},
+		{"(*EmptyStmt).Pos", Method, 0, ""},
+		{"(*ExprStmt).End", Method, 0, ""},
+		{"(*ExprStmt).Pos", Method, 0, ""},
+		{"(*Field).End", Method, 0, ""},
+		{"(*Field).Pos", Method, 0, ""},
+		{"(*FieldList).End", Method, 0, ""},
+		{"(*FieldList).NumFields", Method, 0, ""},
+		{"(*FieldList).Pos", Method, 0, ""},
+		{"(*File).End", Method, 0, ""},
+		{"(*File).Pos", Method, 0, ""},
+		{"(*ForStmt).End", Method, 0, ""},
+		{"(*ForStmt).Pos", Method, 0, ""},
+		{"(*FuncDecl).End", Method, 0, ""},
+		{"(*FuncDecl).Pos", Method, 0, ""},
+		{"(*FuncLit).End", Method, 0, ""},
+		{"(*FuncLit).Pos", Method, 0, ""},
+		{"(*FuncType).End", Method, 0, ""},
+		{"(*FuncType).Pos", Method, 0, ""},
+		{"(*GenDecl).End", Method, 0, ""},
+		{"(*GenDecl).Pos", Method, 0, ""},
+		{"(*GoStmt).End", Method, 0, ""},
+		{"(*GoStmt).Pos", Method, 0, ""},
+		{"(*Ident).End", Method, 0, ""},
+		{"(*Ident).IsExported", Method, 0, ""},
+		{"(*Ident).Pos", Method, 0, ""},
+		{"(*Ident).String", Method, 0, ""},
+		{"(*IfStmt).End", Method, 0, ""},
+		{"(*IfStmt).Pos", Method, 0, ""},
+		{"(*ImportSpec).End", Method, 0, ""},
+		{"(*ImportSpec).Pos", Method, 0, ""},
+		{"(*IncDecStmt).End", Method, 0, ""},
+		{"(*IncDecStmt).Pos", Method, 0, ""},
+		{"(*IndexExpr).End", Method, 0, ""},
+		{"(*IndexExpr).Pos", Method, 0, ""},
+		{"(*IndexListExpr).End", Method, 18, ""},
+		{"(*IndexListExpr).Pos", Method, 18, ""},
+		{"(*InterfaceType).End", Method, 0, ""},
+		{"(*InterfaceType).Pos", Method, 0, ""},
+		{"(*KeyValueExpr).End", Method, 0, ""},
+		{"(*KeyValueExpr).Pos", Method, 0, ""},
+		{"(*LabeledStmt).End", Method, 0, ""},
+		{"(*LabeledStmt).Pos", Method, 0, ""},
+		{"(*MapType).End", Method, 0, ""},
+		{"(*MapType).Pos", Method, 0, ""},
+		{"(*Object).Pos", Method, 0, ""},
+		{"(*Package).End", Method, 0, ""},
+		{"(*Package).Pos", Method, 0, ""},
+		{"(*ParenExpr).End", Method, 0, ""},
+		{"(*ParenExpr).Pos", Method, 0, ""},
+		{"(*RangeStmt).End", Method, 0, ""},
+		{"(*RangeStmt).Pos", Method, 0, ""},
+		{"(*ReturnStmt).End", Method, 0, ""},
+		{"(*ReturnStmt).Pos", Method, 0, ""},
+		{"(*Scope).Insert", Method, 0, ""},
+		{"(*Scope).Lookup", Method, 0, ""},
+		{"(*Scope).String", Method, 0, ""},
+		{"(*SelectStmt).End", Method, 0, ""},
+		{"(*SelectStmt).Pos", Method, 0, ""},
+		{"(*SelectorExpr).End", Method, 0, ""},
+		{"(*SelectorExpr).Pos", Method, 0, ""},
+		{"(*SendStmt).End", Method, 0, ""},
+		{"(*SendStmt).Pos", Method, 0, ""},
+		{"(*SliceExpr).End", Method, 0, ""},
+		{"(*SliceExpr).Pos", Method, 0, ""},
+		{"(*StarExpr).End", Method, 0, ""},
+		{"(*StarExpr).Pos", Method, 0, ""},
+		{"(*StructType).End", Method, 0, ""},
+		{"(*StructType).Pos", Method, 0, ""},
+		{"(*SwitchStmt).End", Method, 0, ""},
+		{"(*SwitchStmt).Pos", Method, 0, ""},
+		{"(*TypeAssertExpr).End", Method, 0, ""},
+		{"(*TypeAssertExpr).Pos", Method, 0, ""},
+		{"(*TypeSpec).End", Method, 0, ""},
+		{"(*TypeSpec).Pos", Method, 0, ""},
+		{"(*TypeSwitchStmt).End", Method, 0, ""},
+		{"(*TypeSwitchStmt).Pos", Method, 0, ""},
+		{"(*UnaryExpr).End", Method, 0, ""},
+		{"(*UnaryExpr).Pos", Method, 0, ""},
+		{"(*ValueSpec).End", Method, 0, ""},
+		{"(*ValueSpec).Pos", Method, 0, ""},
+		{"(CommentMap).Comments", Method, 1, ""},
+		{"(CommentMap).Filter", Method, 1, ""},
+		{"(CommentMap).String", Method, 1, ""},
+		{"(CommentMap).Update", Method, 1, ""},
+		{"(ObjKind).String", Method, 0, ""},
+		{"ArrayType", Type, 0, ""},
+		{"ArrayType.Elt", Field, 0, ""},
+		{"ArrayType.Lbrack", Field, 0, ""},
+		{"ArrayType.Len", Field, 0, ""},
+		{"AssignStmt", Type, 0, ""},
+		{"AssignStmt.Lhs", Field, 0, ""},
+		{"AssignStmt.Rhs", Field, 0, ""},
+		{"AssignStmt.Tok", Field, 0, ""},
+		{"AssignStmt.TokPos", Field, 0, ""},
+		{"Bad", Const, 0, ""},
+		{"BadDecl", Type, 0, ""},
+		{"BadDecl.From", Field, 0, ""},
+		{"BadDecl.To", Field, 0, ""},
+		{"BadExpr", Type, 0, ""},
+		{"BadExpr.From", Field, 0, ""},
+		{"BadExpr.To", Field, 0, ""},
+		{"BadStmt", Type, 0, ""},
+		{"BadStmt.From", Field, 0, ""},
+		{"BadStmt.To", Field, 0, ""},
+		{"BasicLit", Type, 0, ""},
+		{"BasicLit.Kind", Field, 0, ""},
+		{"BasicLit.Value", Field, 0, ""},
+		{"BasicLit.ValuePos", Field, 0, ""},
+		{"BinaryExpr", Type, 0, ""},
+		{"BinaryExpr.Op", Field, 0, ""},
+		{"BinaryExpr.OpPos", Field, 0, ""},
+		{"BinaryExpr.X", Field, 0, ""},
+		{"BinaryExpr.Y", Field, 0, ""},
+		{"BlockStmt", Type, 0, ""},
+		{"BlockStmt.Lbrace", Field, 0, ""},
+		{"BlockStmt.List", Field, 0, ""},
+		{"BlockStmt.Rbrace", Field, 0, ""},
+		{"BranchStmt", Type, 0, ""},
+		{"BranchStmt.Label", Field, 0, ""},
+		{"BranchStmt.Tok", Field, 0, ""},
+		{"BranchStmt.TokPos", Field, 0, ""},
+		{"CallExpr", Type, 0, ""},
+		{"CallExpr.Args", Field, 0, ""},
+		{"CallExpr.Ellipsis", Field, 0, ""},
+		{"CallExpr.Fun", Field, 0, ""},
+		{"CallExpr.Lparen", Field, 0, ""},
+		{"CallExpr.Rparen", Field, 0, ""},
+		{"CaseClause", Type, 0, ""},
+		{"CaseClause.Body", Field, 0, ""},
+		{"CaseClause.Case", Field, 0, ""},
+		{"CaseClause.Colon", Field, 0, ""},
+		{"CaseClause.List", Field, 0, ""},
+		{"ChanDir", Type, 0, ""},
+		{"ChanType", Type, 0, ""},
+		{"ChanType.Arrow", Field, 1, ""},
+		{"ChanType.Begin", Field, 0, ""},
+		{"ChanType.Dir", Field, 0, ""},
+		{"ChanType.Value", Field, 0, ""},
+		{"CommClause", Type, 0, ""},
+		{"CommClause.Body", Field, 0, ""},
+		{"CommClause.Case", Field, 0, ""},
+		{"CommClause.Colon", Field, 0, ""},
+		{"CommClause.Comm", Field, 0, ""},
+		{"Comment", Type, 0, ""},
+		{"Comment.Slash", Field, 0, ""},
+		{"Comment.Text", Field, 0, ""},
+		{"CommentGroup", Type, 0, ""},
+		{"CommentGroup.List", Field, 0, ""},
+		{"CommentMap", Type, 1, ""},
+		{"CompositeLit", Type, 0, ""},
+		{"CompositeLit.Elts", Field, 0, ""},
+		{"CompositeLit.Incomplete", Field, 11, ""},
+		{"CompositeLit.Lbrace", Field, 0, ""},
+		{"CompositeLit.Rbrace", Field, 0, ""},
+		{"CompositeLit.Type", Field, 0, ""},
+		{"Con", Const, 0, ""},
+		{"Decl", Type, 0, ""},
+		{"DeclStmt", Type, 0, ""},
+		{"DeclStmt.Decl", Field, 0, ""},
+		{"DeferStmt", Type, 0, ""},
+		{"DeferStmt.Call", Field, 0, ""},
+		{"DeferStmt.Defer", Field, 0, ""},
+		{"Directive", Type, 26, ""},
+		{"Directive.Args", Field, 26, ""},
+		{"Directive.ArgsPos", Field, 26, ""},
+		{"Directive.Name", Field, 26, ""},
+		{"Directive.Slash", Field, 26, ""},
+		{"Directive.Tool", Field, 26, ""},
+		{"DirectiveArg", Type, 26, ""},
+		{"DirectiveArg.Arg", Field, 26, ""},
+		{"DirectiveArg.Pos", Field, 26, ""},
+		{"Ellipsis", Type, 0, ""},
+		{"Ellipsis.Ellipsis", Field, 0, ""},
+		{"Ellipsis.Elt", Field, 0, ""},
+		{"EmptyStmt", Type, 0, ""},
+		{"EmptyStmt.Implicit", Field, 5, ""},
+		{"EmptyStmt.Semicolon", Field, 0, ""},
+		{"Expr", Type, 0, ""},
+		{"ExprStmt", Type, 0, ""},
+		{"ExprStmt.X", Field, 0, ""},
+		{"Field", Type, 0, ""},
+		{"Field.Comment", Field, 0, ""},
+		{"Field.Doc", Field, 0, ""},
+		{"Field.Names", Field, 0, ""},
+		{"Field.Tag", Field, 0, ""},
+		{"Field.Type", Field, 0, ""},
+		{"FieldFilter", Type, 0, ""},
+		{"FieldList", Type, 0, ""},
+		{"FieldList.Closing", Field, 0, ""},
+		{"FieldList.List", Field, 0, ""},
+		{"FieldList.Opening", Field, 0, ""},
+		{"File", Type, 0, ""},
+		{"File.Comments", Field, 0, ""},
+		{"File.Decls", Field, 0, ""},
+		{"File.Doc", Field, 0, ""},
+		{"File.FileEnd", Field, 20, ""},
+		{"File.FileStart", Field, 20, ""},
+		{"File.GoVersion", Field, 21, ""},
+		{"File.Imports", Field, 0, ""},
+		{"File.Name", Field, 0, ""},
+		{"File.Package", Field, 0, ""},
+		{"File.Scope", Field, 0, ""},
+		{"File.Unresolved", Field, 0, ""},
+		{"FileExports", Func, 0, "func(src *File) bool"},
+		{"Filter", Type, 0, ""},
+		{"FilterDecl", Func, 0, "func(decl Decl, f Filter) bool"},
+		{"FilterFile", Func, 0, "func(src *File, f Filter) bool"},
+		{"FilterFuncDuplicates", Const, 0, ""},
+		{"FilterImportDuplicates", Const, 0, ""},
+		{"FilterPackage", Func, 0, "func(pkg *Package, f Filter) bool"},
+		{"FilterUnassociatedComments", Const, 0, ""},
+		{"ForStmt", Type, 0, ""},
+		{"ForStmt.Body", Field, 0, ""},
+		{"ForStmt.Cond", Field, 0, ""},
+		{"ForStmt.For", Field, 0, ""},
+		{"ForStmt.Init", Field, 0, ""},
+		{"ForStmt.Post", Field, 0, ""},
+		{"Fprint", Func, 0, "func(w io.Writer, fset *token.FileSet, x any, f FieldFilter) error"},
+		{"Fun", Const, 0, ""},
+		{"FuncDecl", Type, 0, ""},
+		{"FuncDecl.Body", Field, 0, ""},
+		{"FuncDecl.Doc", Field, 0, ""},
+		{"FuncDecl.Name", Field, 0, ""},
+		{"FuncDecl.Recv", Field, 0, ""},
+		{"FuncDecl.Type", Field, 0, ""},
+		{"FuncLit", Type, 0, ""},
+		{"FuncLit.Body", Field, 0, ""},
+		{"FuncLit.Type", Field, 0, ""},
+		{"FuncType", Type, 0, ""},
+		{"FuncType.Func", Field, 0, ""},
+		{"FuncType.Params", Field, 0, ""},
+		{"FuncType.Results", Field, 0, ""},
+		{"FuncType.TypeParams", Field, 18, ""},
+		{"GenDecl", Type, 0, ""},
+		{"GenDecl.Doc", Field, 0, ""},
+		{"GenDecl.Lparen", Field, 0, ""},
+		{"GenDecl.Rparen", Field, 0, ""},
+		{"GenDecl.Specs", Field, 0, ""},
+		{"GenDecl.Tok", Field, 0, ""},
+		{"GenDecl.TokPos", Field, 0, ""},
+		{"GoStmt", Type, 0, ""},
+		{"GoStmt.Call", Field, 0, ""},
+		{"GoStmt.Go", Field, 0, ""},
+		{"Ident", Type, 0, ""},
+		{"Ident.Name", Field, 0, ""},
+		{"Ident.NamePos", Field, 0, ""},
+		{"Ident.Obj", Field, 0, ""},
+		{"IfStmt", Type, 0, ""},
+		{"IfStmt.Body", Field, 0, ""},
+		{"IfStmt.Cond", Field, 0, ""},
+		{"IfStmt.Else", Field, 0, ""},
+		{"IfStmt.If", Field, 0, ""},
+		{"IfStmt.Init", Field, 0, ""},
+		{"ImportSpec", Type, 0, ""},
+		{"ImportSpec.Comment", Field, 0, ""},
+		{"ImportSpec.Doc", Field, 0, ""},
+		{"ImportSpec.EndPos", Field, 0, ""},
+		{"ImportSpec.Name", Field, 0, ""},
+		{"ImportSpec.Path", Field, 0, ""},
+		{"Importer", Type, 0, ""},
+		{"IncDecStmt", Type, 0, ""},
+		{"IncDecStmt.Tok", Field, 0, ""},
+		{"IncDecStmt.TokPos", Field, 0, ""},
+		{"IncDecStmt.X", Field, 0, ""},
+		{"IndexExpr", Type, 0, ""},
+		{"IndexExpr.Index", Field, 0, ""},
+		{"IndexExpr.Lbrack", Field, 0, ""},
+		{"IndexExpr.Rbrack", Field, 0, ""},
+		{"IndexExpr.X", Field, 0, ""},
+		{"IndexListExpr", Type, 18, ""},
+		{"IndexListExpr.Indices", Field, 18, ""},
+		{"IndexListExpr.Lbrack", Field, 18, ""},
+		{"IndexListExpr.Rbrack", Field, 18, ""},
+		{"IndexListExpr.X", Field, 18, ""},
+		{"Inspect", Func, 0, "func(node Node, f func(Node) bool)"},
+		{"InterfaceType", Type, 0, ""},
+		{"InterfaceType.Incomplete", Field, 0, ""},
+		{"InterfaceType.Interface", Field, 0, ""},
+		{"InterfaceType.Methods", Field, 0, ""},
+		{"IsExported", Func, 0, "func(name string) bool"},
+		{"IsGenerated", Func, 21, "func(file *File) bool"},
+		{"KeyValueExpr", Type, 0, ""},
+		{"KeyValueExpr.Colon", Field, 0, ""},
+		{"KeyValueExpr.Key", Field, 0, ""},
+		{"KeyValueExpr.Value", Field, 0, ""},
+		{"LabeledStmt", Type, 0, ""},
+		{"LabeledStmt.Colon", Field, 0, ""},
+		{"LabeledStmt.Label", Field, 0, ""},
+		{"LabeledStmt.Stmt", Field, 0, ""},
+		{"Lbl", Const, 0, ""},
+		{"MapType", Type, 0, ""},
+		{"MapType.Key", Field, 0, ""},
+		{"MapType.Map", Field, 0, ""},
+		{"MapType.Value", Field, 0, ""},
+		{"MergeMode", Type, 0, ""},
+		{"MergePackageFiles", Func, 0, "func(pkg *Package, mode MergeMode) *File"},
+		{"NewCommentMap", Func, 1, "func(fset *token.FileSet, node Node, comments []*CommentGroup) CommentMap"},
+		{"NewIdent", Func, 0, "func(name string) *Ident"},
+		{"NewObj", Func, 0, "func(kind ObjKind, name string) *Object"},
+		{"NewPackage", Func, 0, "func(fset *token.FileSet, files map[string]*File, importer Importer, universe *Scope) (*Package, error)"},
+		{"NewScope", Func, 0, "func(outer *Scope) *Scope"},
+		{"Node", Type, 0, ""},
+		{"NotNilFilter", Func, 0, "func(_ string, v reflect.Value) bool"},
+		{"ObjKind", Type, 0, ""},
+		{"Object", Type, 0, ""},
+		{"Object.Data", Field, 0, ""},
+		{"Object.Decl", Field, 0, ""},
+		{"Object.Kind", Field, 0, ""},
+		{"Object.Name", Field, 0, ""},
+		{"Object.Type", Field, 0, ""},
+		{"Package", Type, 0, ""},
+		{"Package.Files", Field, 0, ""},
+		{"Package.Imports", Field, 0, ""},
+		{"Package.Name", Field, 0, ""},
+		{"Package.Scope", Field, 0, ""},
+		{"PackageExports", Func, 0, "func(pkg *Package) bool"},
+		{"ParenExpr", Type, 0, ""},
+		{"ParenExpr.Lparen", Field, 0, ""},
+		{"ParenExpr.Rparen", Field, 0, ""},
+		{"ParenExpr.X", Field, 0, ""},
+		{"ParseDirective", Func, 26, "func(pos token.Pos, c string) (Directive, bool)"},
+		{"Pkg", Const, 0, ""},
+		{"Preorder", Func, 23, "func(root Node) iter.Seq[Node]"},
+		{"PreorderStack", Func, 25, "func(root Node, stack []Node, f func(n Node, stack []Node) bool)"},
+		{"Print", Func, 0, "func(fset *token.FileSet, x any) error"},
+		{"RECV", Const, 0, ""},
+		{"RangeStmt", Type, 0, ""},
+		{"RangeStmt.Body", Field, 0, ""},
+		{"RangeStmt.For", Field, 0, ""},
+		{"RangeStmt.Key", Field, 0, ""},
+		{"RangeStmt.Range", Field, 20, ""},
+		{"RangeStmt.Tok", Field, 0, ""},
+		{"RangeStmt.TokPos", Field, 0, ""},
+		{"RangeStmt.Value", Field, 0, ""},
+		{"RangeStmt.X", Field, 0, ""},
+		{"ReturnStmt", Type, 0, ""},
+		{"ReturnStmt.Results", Field, 0, ""},
+		{"ReturnStmt.Return", Field, 0, ""},
+		{"SEND", Const, 0, ""},
+		{"Scope", Type, 0, ""},
+		{"Scope.Objects", Field, 0, ""},
+		{"Scope.Outer", Field, 0, ""},
+		{"SelectStmt", Type, 0, ""},
+		{"SelectStmt.Body", Field, 0, ""},
+		{"SelectStmt.Select", Field, 0, ""},
+		{"SelectorExpr", Type, 0, ""},
+		{"SelectorExpr.Sel", Field, 0, ""},
+		{"SelectorExpr.X", Field, 0, ""},
+		{"SendStmt", Type, 0, ""},
+		{"SendStmt.Arrow", Field, 0, ""},
+		{"SendStmt.Chan", Field, 0, ""},
+		{"SendStmt.Value", Field, 0, ""},
+		{"SliceExpr", Type, 0, ""},
+		{"SliceExpr.High", Field, 0, ""},
+		{"SliceExpr.Lbrack", Field, 0, ""},
+		{"SliceExpr.Low", Field, 0, ""},
+		{"SliceExpr.Max", Field, 2, ""},
+		{"SliceExpr.Rbrack", Field, 0, ""},
+		{"SliceExpr.Slice3", Field, 2, ""},
+		{"SliceExpr.X", Field, 0, ""},
+		{"SortImports", Func, 0, "func(fset *token.FileSet, f *File)"},
+		{"Spec", Type, 0, ""},
+		{"StarExpr", Type, 0, ""},
+		{"StarExpr.Star", Field, 0, ""},
+		{"StarExpr.X", Field, 0, ""},
+		{"Stmt", Type, 0, ""},
+		{"StructType", Type, 0, ""},
+		{"StructType.Fields", Field, 0, ""},
+		{"StructType.Incomplete", Field, 0, ""},
+		{"StructType.Struct", Field, 0, ""},
+		{"SwitchStmt", Type, 0, ""},
+		{"SwitchStmt.Body", Field, 0, ""},
+		{"SwitchStmt.Init", Field, 0, ""},
+		{"SwitchStmt.Switch", Field, 0, ""},
+		{"SwitchStmt.Tag", Field, 0, ""},
+		{"Typ", Const, 0, ""},
+		{"TypeAssertExpr", Type, 0, ""},
+		{"TypeAssertExpr.Lparen", Field, 2, ""},
+		{"TypeAssertExpr.Rparen", Field, 2, ""},
+		{"TypeAssertExpr.Type", Field, 0, ""},
+		{"TypeAssertExpr.X", Field, 0, ""},
+		{"TypeSpec", Type, 0, ""},
+		{"TypeSpec.Assign", Field, 9, ""},
+		{"TypeSpec.Comment", Field, 0, ""},
+		{"TypeSpec.Doc", Field, 0, ""},
+		{"TypeSpec.Name", Field, 0, ""},
+		{"TypeSpec.Type", Field, 0, ""},
+		{"TypeSpec.TypeParams", Field, 18, ""},
+		{"TypeSwitchStmt", Type, 0, ""},
+		{"TypeSwitchStmt.Assign", Field, 0, ""},
+		{"TypeSwitchStmt.Body", Field, 0, ""},
+		{"TypeSwitchStmt.Init", Field, 0, ""},
+		{"TypeSwitchStmt.Switch", Field, 0, ""},
+		{"UnaryExpr", Type, 0, ""},
+		{"UnaryExpr.Op", Field, 0, ""},
+		{"UnaryExpr.OpPos", Field, 0, ""},
+		{"UnaryExpr.X", Field, 0, ""},
+		{"Unparen", Func, 22, "func(e Expr) Expr"},
+		{"ValueSpec", Type, 0, ""},
+		{"ValueSpec.Comment", Field, 0, ""},
+		{"ValueSpec.Doc", Field, 0, ""},
+		{"ValueSpec.Names", Field, 0, ""},
+		{"ValueSpec.Type", Field, 0, ""},
+		{"ValueSpec.Values", Field, 0, ""},
+		{"Var", Const, 0, ""},
+		{"Visitor", Type, 0, ""},
+		{"Walk", Func, 0, "func(v Visitor, node Node)"},
+	},
+	"go/build": {
+		{"(*Context).Import", Method, 0, ""},
+		{"(*Context).ImportDir", Method, 0, ""},
+		{"(*Context).MatchFile", Method, 2, ""},
+		{"(*Context).SrcDirs", Method, 0, ""},
+		{"(*MultiplePackageError).Error", Method, 4, ""},
+		{"(*NoGoError).Error", Method, 0, ""},
+		{"(*Package).IsCommand", Method, 0, ""},
+		{"AllowBinary", Const, 0, ""},
+		{"ArchChar", Func, 0, "func(goarch string) (string, error)"},
+		{"Context", Type, 0, ""},
+		{"Context.BuildTags", Field, 0, ""},
+		{"Context.CgoEnabled", Field, 0, ""},
+		{"Context.Compiler", Field, 0, ""},
+		{"Context.Dir", Field, 14, ""},
+		{"Context.GOARCH", Field, 0, ""},
+		{"Context.GOOS", Field, 0, ""},
+		{"Context.GOPATH", Field, 0, ""},
+		{"Context.GOROOT", Field, 0, ""},
+		{"Context.HasSubdir", Field, 0, ""},
+		{"Context.InstallSuffix", Field, 1, ""},
+		{"Context.IsAbsPath", Field, 0, ""},
+		{"Context.IsDir", Field, 0, ""},
+		{"Context.JoinPath", Field, 0, ""},
+		{"Context.OpenFile", Field, 0, ""},
+		{"Context.ReadDir", Field, 0, ""},
+		{"Context.ReleaseTags", Field, 1, ""},
+		{"Context.SplitPathList", Field, 0, ""},
+		{"Context.ToolTags", Field, 17, ""},
+		{"Context.UseAllFiles", Field, 0, ""},
+		{"Default", Var, 0, ""},
+		{"Directive", Type, 21, ""},
+		{"Directive.Pos", Field, 21, ""},
+		{"Directive.Text", Field, 21, ""},
+		{"FindOnly", Const, 0, ""},
+		{"IgnoreVendor", Const, 6, ""},
+		{"Import", Func, 0, "func(path string, srcDir string, mode ImportMode) (*Package, error)"},
+		{"ImportComment", Const, 4, ""},
+		{"ImportDir", Func, 0, "func(dir string, mode ImportMode) (*Package, error)"},
+		{"ImportMode", Type, 0, ""},
+		{"IsLocalImport", Func, 0, "func(path string) bool"},
+		{"MultiplePackageError", Type, 4, ""},
+		{"MultiplePackageError.Dir", Field, 4, ""},
+		{"MultiplePackageError.Files", Field, 4, ""},
+		{"MultiplePackageError.Packages", Field, 4, ""},
+		{"NoGoError", Type, 0, ""},
+		{"NoGoError.Dir", Field, 0, ""},
+		{"Package", Type, 0, ""},
+		{"Package.AllTags", Field, 2, ""},
+		{"Package.BinDir", Field, 0, ""},
+		{"Package.BinaryOnly", Field, 7, ""},
+		{"Package.CFiles", Field, 0, ""},
+		{"Package.CXXFiles", Field, 2, ""},
+		{"Package.CgoCFLAGS", Field, 0, ""},
+		{"Package.CgoCPPFLAGS", Field, 2, ""},
+		{"Package.CgoCXXFLAGS", Field, 2, ""},
+		{"Package.CgoFFLAGS", Field, 7, ""},
+		{"Package.CgoFiles", Field, 0, ""},
+		{"Package.CgoLDFLAGS", Field, 0, ""},
+		{"Package.CgoPkgConfig", Field, 0, ""},
+		{"Package.ConflictDir", Field, 2, ""},
+		{"Package.Dir", Field, 0, ""},
+		{"Package.Directives", Field, 21, ""},
+		{"Package.Doc", Field, 0, ""},
+		{"Package.EmbedPatternPos", Field, 16, ""},
+		{"Package.EmbedPatterns", Field, 16, ""},
+		{"Package.FFiles", Field, 7, ""},
+		{"Package.GoFiles", Field, 0, ""},
+		{"Package.Goroot", Field, 0, ""},
+		{"Package.HFiles", Field, 0, ""},
+		{"Package.IgnoredGoFiles", Field, 1, ""},
+		{"Package.IgnoredOtherFiles", Field, 16, ""},
+		{"Package.ImportComment", Field, 4, ""},
+		{"Package.ImportPath", Field, 0, ""},
+		{"Package.ImportPos", Field, 0, ""},
+		{"Package.Imports", Field, 0, ""},
+		{"Package.InvalidGoFiles", Field, 6, ""},
+		{"Package.MFiles", Field, 3, ""},
+		{"Package.Name", Field, 0, ""},
+		{"Package.PkgObj", Field, 0, ""},
+		{"Package.PkgRoot", Field, 0, ""},
+		{"Package.PkgTargetRoot", Field, 5, ""},
+		{"Package.Root", Field, 0, ""},
+		{"Package.SFiles", Field, 0, ""},
+		{"Package.SrcRoot", Field, 0, ""},
+		{"Package.SwigCXXFiles", Field, 1, ""},
+		{"Package.SwigFiles", Field, 1, ""},
+		{"Package.SysoFiles", Field, 0, ""},
+		{"Package.TestDirectives", Field, 21, ""},
+		{"Package.TestEmbedPatternPos", Field, 16, ""},
+		{"Package.TestEmbedPatterns", Field, 16, ""},
+		{"Package.TestGoFiles", Field, 0, ""},
+		{"Package.TestImportPos", Field, 0, ""},
+		{"Package.TestImports", Field, 0, ""},
+		{"Package.XTestDirectives", Field, 21, ""},
+		{"Package.XTestEmbedPatternPos", Field, 16, ""},
+		{"Package.XTestEmbedPatterns", Field, 16, ""},
+		{"Package.XTestGoFiles", Field, 0, ""},
+		{"Package.XTestImportPos", Field, 0, ""},
+		{"Package.XTestImports", Field, 0, ""},
+		{"ToolDir", Var, 0, ""},
+	},
+	"go/build/constraint": {
+		{"(*AndExpr).Eval", Method, 16, ""},
+		{"(*AndExpr).String", Method, 16, ""},
+		{"(*NotExpr).Eval", Method, 16, ""},
+		{"(*NotExpr).String", Method, 16, ""},
+		{"(*OrExpr).Eval", Method, 16, ""},
+		{"(*OrExpr).String", Method, 16, ""},
+		{"(*SyntaxError).Error", Method, 16, ""},
+		{"(*TagExpr).Eval", Method, 16, ""},
+		{"(*TagExpr).String", Method, 16, ""},
+		{"AndExpr", Type, 16, ""},
+		{"AndExpr.X", Field, 16, ""},
+		{"AndExpr.Y", Field, 16, ""},
+		{"Expr", Type, 16, ""},
+		{"GoVersion", Func, 21, "func(x Expr) string"},
+		{"IsGoBuild", Func, 16, "func(line string) bool"},
+		{"IsPlusBuild", Func, 16, "func(line string) bool"},
+		{"NotExpr", Type, 16, ""},
+		{"NotExpr.X", Field, 16, ""},
+		{"OrExpr", Type, 16, ""},
+		{"OrExpr.X", Field, 16, ""},
+		{"OrExpr.Y", Field, 16, ""},
+		{"Parse", Func, 16, "func(line string) (Expr, error)"},
+		{"PlusBuildLines", Func, 16, "func(x Expr) ([]string, error)"},
+		{"SyntaxError", Type, 16, ""},
+		{"SyntaxError.Err", Field, 16, ""},
+		{"SyntaxError.Offset", Field, 16, ""},
+		{"TagExpr", Type, 16, ""},
+		{"TagExpr.Tag", Field, 16, ""},
+	},
+	"go/constant": {
+		{"(Kind).String", Method, 18, ""},
+		{"BinaryOp", Func, 5, "func(x_ Value, op token.Token, y_ Value) Value"},
+		{"BitLen", Func, 5, "func(x Value) int"},
+		{"Bool", Const, 5, ""},
+		{"BoolVal", Func, 5, "func(x Value) bool"},
+		{"Bytes", Func, 5, "func(x Value) []byte"},
+		{"Compare", Func, 5, "func(x_ Value, op token.Token, y_ Value) bool"},
+		{"Complex", Const, 5, ""},
+		{"Denom", Func, 5, "func(x Value) Value"},
+		{"Float", Const, 5, ""},
+		{"Float32Val", Func, 5, "func(x Value) (float32, bool)"},
+		{"Float64Val", Func, 5, "func(x Value) (float64, bool)"},
+		{"Imag", Func, 5, "func(x Value) Value"},
+		{"Int", Const, 5, ""},
+		{"Int64Val", Func, 5, "func(x Value) (int64, bool)"},
+		{"Kind", Type, 5, ""},
+		{"Make", Func, 13, "func(x any) Value"},
+		{"MakeBool", Func, 5, "func(b bool) Value"},
+		{"MakeFloat64", Func, 5, "func(x float64) Value"},
+		{"MakeFromBytes", Func, 5, "func(bytes []byte) Value"},
+		{"MakeFromLiteral", Func, 5, "func(lit string, tok token.Token, zero uint) Value"},
+		{"MakeImag", Func, 5, "func(x Value) Value"},
+		{"MakeInt64", Func, 5, "func(x int64) Value"},
+		{"MakeString", Func, 5, "func(s string) Value"},
+		{"MakeUint64", Func, 5, "func(x uint64) Value"},
+		{"MakeUnknown", Func, 5, "func() Value"},
+		{"Num", Func, 5, "func(x Value) Value"},
+		{"Real", Func, 5, "func(x Value) Value"},
+		{"Shift", Func, 5, "func(x Value, op token.Token, s uint) Value"},
+		{"Sign", Func, 5, "func(x Value) int"},
+		{"String", Const, 5, ""},
+		{"StringVal", Func, 5, "func(x Value) string"},
+		{"ToComplex", Func, 6, "func(x Value) Value"},
+		{"ToFloat", Func, 6, "func(x Value) Value"},
+		{"ToInt", Func, 6, "func(x Value) Value"},
+		{"Uint64Val", Func, 5, "func(x Value) (uint64, bool)"},
+		{"UnaryOp", Func, 5, "func(op token.Token, y Value, prec uint) Value"},
+		{"Unknown", Const, 5, ""},
+		{"Val", Func, 13, "func(x Value) any"},
+		{"Value", Type, 5, ""},
+	},
+	"go/doc": {
+		{"(*Package).Filter", Method, 0, ""},
+		{"(*Package).HTML", Method, 19, ""},
+		{"(*Package).Markdown", Method, 19, ""},
+		{"(*Package).Parser", Method, 19, ""},
+		{"(*Package).Printer", Method, 19, ""},
+		{"(*Package).Synopsis", Method, 19, ""},
+		{"(*Package).Text", Method, 19, ""},
+		{"AllDecls", Const, 0, ""},
+		{"AllMethods", Const, 0, ""},
+		{"Example", Type, 0, ""},
+		{"Example.Code", Field, 0, ""},
+		{"Example.Comments", Field, 0, ""},
+		{"Example.Doc", Field, 0, ""},
+		{"Example.EmptyOutput", Field, 1, ""},
+		{"Example.Name", Field, 0, ""},
+		{"Example.Order", Field, 1, ""},
+		{"Example.Output", Field, 0, ""},
+		{"Example.Play", Field, 1, ""},
+		{"Example.Suffix", Field, 14, ""},
+		{"Example.Unordered", Field, 7, ""},
+		{"Examples", Func, 0, "func(testFiles ...*ast.File) []*Example"},
+		{"Filter", Type, 0, ""},
+		{"Func", Type, 0, ""},
+		{"Func.Decl", Field, 0, ""},
+		{"Func.Doc", Field, 0, ""},
+		{"Func.Examples", Field, 14, ""},
+		{"Func.Level", Field, 0, ""},
+		{"Func.Name", Field, 0, ""},
+		{"Func.Orig", Field, 0, ""},
+		{"Func.Recv", Field, 0, ""},
+		{"IllegalPrefixes", Var, 1, ""},
+		{"IsPredeclared", Func, 8, "func(s string) bool"},
+		{"Mode", Type, 0, ""},
+		{"New", Func, 0, "func(pkg *ast.Package, importPath string, mode Mode) *Package"},
+		{"NewFromFiles", Func, 14, "func(fset *token.FileSet, files []*ast.File, importPath string, opts ...any) (*Package, error)"},
+		{"Note", Type, 1, ""},
+		{"Note.Body", Field, 1, ""},
+		{"Note.End", Field, 1, ""},
+		{"Note.Pos", Field, 1, ""},
+		{"Note.UID", Field, 1, ""},
+		{"Package", Type, 0, ""},
+		{"Package.Bugs", Field, 0, ""},
+		{"Package.Consts", Field, 0, ""},
+		{"Package.Doc", Field, 0, ""},
+		{"Package.Examples", Field, 14, ""},
+		{"Package.Filenames", Field, 0, ""},
+		{"Package.Funcs", Field, 0, ""},
+		{"Package.ImportPath", Field, 0, ""},
+		{"Package.Imports", Field, 0, ""},
+		{"Package.Name", Field, 0, ""},
+		{"Package.Notes", Field, 1, ""},
+		{"Package.Types", Field, 0, ""},
+		{"Package.Vars", Field, 0, ""},
+		{"PreserveAST", Const, 12, ""},
+		{"Synopsis", Func, 0, "func(text string) string"},
+		{"ToHTML", Func, 0, "func(w io.Writer, text string, words map[string]string)"},
+		{"ToText", Func, 0, "func(w io.Writer, text string, prefix string, codePrefix string, width int)"},
+		{"Type", Type, 0, ""},
+		{"Type.Consts", Field, 0, ""},
+		{"Type.Decl", Field, 0, ""},
+		{"Type.Doc", Field, 0, ""},
+		{"Type.Examples", Field, 14, ""},
+		{"Type.Funcs", Field, 0, ""},
+		{"Type.Methods", Field, 0, ""},
+		{"Type.Name", Field, 0, ""},
+		{"Type.Vars", Field, 0, ""},
+		{"Value", Type, 0, ""},
+		{"Value.Decl", Field, 0, ""},
+		{"Value.Doc", Field, 0, ""},
+		{"Value.Names", Field, 0, ""},
+	},
+	"go/doc/comment": {
+		{"(*DocLink).DefaultURL", Method, 19, ""},
+		{"(*Heading).DefaultID", Method, 19, ""},
+		{"(*List).BlankBefore", Method, 19, ""},
+		{"(*List).BlankBetween", Method, 19, ""},
+		{"(*Parser).Parse", Method, 19, ""},
+		{"(*Printer).Comment", Method, 19, ""},
+		{"(*Printer).HTML", Method, 19, ""},
+		{"(*Printer).Markdown", Method, 19, ""},
+		{"(*Printer).Text", Method, 19, ""},
+		{"Block", Type, 19, ""},
+		{"Code", Type, 19, ""},
+		{"Code.Text", Field, 19, ""},
+		{"DefaultLookupPackage", Func, 19, "func(name string) (importPath string, ok bool)"},
+		{"Doc", Type, 19, ""},
+		{"Doc.Content", Field, 19, ""},
+		{"Doc.Links", Field, 19, ""},
+		{"DocLink", Type, 19, ""},
+		{"DocLink.ImportPath", Field, 19, ""},
+		{"DocLink.Name", Field, 19, ""},
+		{"DocLink.Recv", Field, 19, ""},
+		{"DocLink.Text", Field, 19, ""},
+		{"Heading", Type, 19, ""},
+		{"Heading.Text", Field, 19, ""},
+		{"Italic", Type, 19, ""},
+		{"Link", Type, 19, ""},
+		{"Link.Auto", Field, 19, ""},
+		{"Link.Text", Field, 19, ""},
+		{"Link.URL", Field, 19, ""},
+		{"LinkDef", Type, 19, ""},
+		{"LinkDef.Text", Field, 19, ""},
+		{"LinkDef.URL", Field, 19, ""},
+		{"LinkDef.Used", Field, 19, ""},
+		{"List", Type, 19, ""},
+		{"List.ForceBlankBefore", Field, 19, ""},
+		{"List.ForceBlankBetween", Field, 19, ""},
+		{"List.Items", Field, 19, ""},
+		{"ListItem", Type, 19, ""},
+		{"ListItem.Content", Field, 19, ""},
+		{"ListItem.Number", Field, 19, ""},
+		{"Paragraph", Type, 19, ""},
+		{"Paragraph.Text", Field, 19, ""},
+		{"Parser", Type, 19, ""},
+		{"Parser.LookupPackage", Field, 19, ""},
+		{"Parser.LookupSym", Field, 19, ""},
+		{"Parser.Words", Field, 19, ""},
+		{"Plain", Type, 19, ""},
+		{"Printer", Type, 19, ""},
+		{"Printer.DocLinkBaseURL", Field, 19, ""},
+		{"Printer.DocLinkURL", Field, 19, ""},
+		{"Printer.HeadingID", Field, 19, ""},
+		{"Printer.HeadingLevel", Field, 19, ""},
+		{"Printer.TextCodePrefix", Field, 19, ""},
+		{"Printer.TextPrefix", Field, 19, ""},
+		{"Printer.TextWidth", Field, 19, ""},
+		{"Text", Type, 19, ""},
+	},
+	"go/format": {
+		{"Node", Func, 1, "func(dst io.Writer, fset *token.FileSet, node any) error"},
+		{"Source", Func, 1, "func(src []byte) ([]byte, error)"},
+	},
+	"go/importer": {
+		{"Default", Func, 5, "func() types.Importer"},
+		{"For", Func, 5, "func(compiler string, lookup Lookup) types.Importer"},
+		{"ForCompiler", Func, 12, "func(fset *token.FileSet, compiler string, lookup Lookup) types.Importer"},
+		{"Lookup", Type, 5, ""},
+	},
+	"go/parser": {
+		{"AllErrors", Const, 1, ""},
+		{"DeclarationErrors", Const, 0, ""},
+		{"ImportsOnly", Const, 0, ""},
+		{"Mode", Type, 0, ""},
+		{"PackageClauseOnly", Const, 0, ""},
+		{"ParseComments", Const, 0, ""},
+		{"ParseDir", Func, 0, "func(fset *token.FileSet, path string, filter func(fs.FileInfo) bool, mode Mode) (pkgs map[string]*ast.Package, first error)"},
+		{"ParseExpr", Func, 0, "func(x string) (ast.Expr, error)"},
+		{"ParseExprFrom", Func, 5, "func(fset *token.FileSet, filename string, src any, mode Mode) (expr ast.Expr, err error)"},
+		{"ParseFile", Func, 0, "func(fset *token.FileSet, filename string, src any, mode Mode) (f *ast.File, err error)"},
+		{"SkipObjectResolution", Const, 17, ""},
+		{"SpuriousErrors", Const, 0, ""},
+		{"Trace", Const, 0, ""},
+	},
+	"go/printer": {
+		{"(*Config).Fprint", Method, 0, ""},
+		{"CommentedNode", Type, 0, ""},
+		{"CommentedNode.Comments", Field, 0, ""},
+		{"CommentedNode.Node", Field, 0, ""},
+		{"Config", Type, 0, ""},
+		{"Config.Indent", Field, 1, ""},
+		{"Config.Mode", Field, 0, ""},
+		{"Config.Tabwidth", Field, 0, ""},
+		{"Fprint", Func, 0, "func(output io.Writer, fset *token.FileSet, node any) error"},
+		{"Mode", Type, 0, ""},
+		{"RawFormat", Const, 0, ""},
+		{"SourcePos", Const, 0, ""},
+		{"TabIndent", Const, 0, ""},
+		{"UseSpaces", Const, 0, ""},
+	},
+	"go/scanner": {
+		{"(*ErrorList).Add", Method, 0, ""},
+		{"(*ErrorList).RemoveMultiples", Method, 0, ""},
+		{"(*ErrorList).Reset", Method, 0, ""},
+		{"(*Scanner).Init", Method, 0, ""},
+		{"(*Scanner).Scan", Method, 0, ""},
+		{"(Error).Error", Method, 0, ""},
+		{"(ErrorList).Err", Method, 0, ""},
+		{"(ErrorList).Error", Method, 0, ""},
+		{"(ErrorList).Len", Method, 0, ""},
+		{"(ErrorList).Less", Method, 0, ""},
+		{"(ErrorList).Sort", Method, 0, ""},
+		{"(ErrorList).Swap", Method, 0, ""},
+		{"Error", Type, 0, ""},
+		{"Error.Msg", Field, 0, ""},
+		{"Error.Pos", Field, 0, ""},
+		{"ErrorHandler", Type, 0, ""},
+		{"ErrorList", Type, 0, ""},
+		{"Mode", Type, 0, ""},
+		{"PrintError", Func, 0, "func(w io.Writer, err error)"},
+		{"ScanComments", Const, 0, ""},
+		{"Scanner", Type, 0, ""},
+		{"Scanner.ErrorCount", Field, 0, ""},
+	},
+	"go/token": {
+		{"(*File).AddLine", Method, 0, ""},
+		{"(*File).AddLineColumnInfo", Method, 11, ""},
+		{"(*File).AddLineInfo", Method, 0, ""},
+		{"(*File).Base", Method, 0, ""},
+		{"(*File).Line", Method, 0, ""},
+		{"(*File).LineCount", Method, 0, ""},
+		{"(*File).LineStart", Method, 12, ""},
+		{"(*File).Lines", Method, 21, ""},
+		{"(*File).MergeLine", Method, 2, ""},
+		{"(*File).Name", Method, 0, ""},
+		{"(*File).Offset", Method, 0, ""},
+		{"(*File).Pos", Method, 0, ""},
+		{"(*File).Position", Method, 0, ""},
+		{"(*File).PositionFor", Method, 4, ""},
+		{"(*File).SetLines", Method, 0, ""},
+		{"(*File).SetLinesForContent", Method, 0, ""},
+		{"(*File).Size", Method, 0, ""},
+		{"(*FileSet).AddExistingFiles", Method, 25, ""},
+		{"(*FileSet).AddFile", Method, 0, ""},
+		{"(*FileSet).Base", Method, 0, ""},
+		{"(*FileSet).File", Method, 0, ""},
+		{"(*FileSet).Iterate", Method, 0, ""},
+		{"(*FileSet).Position", Method, 0, ""},
+		{"(*FileSet).PositionFor", Method, 4, ""},
+		{"(*FileSet).Read", Method, 0, ""},
+		{"(*FileSet).RemoveFile", Method, 20, ""},
+		{"(*FileSet).Write", Method, 0, ""},
+		{"(*Position).IsValid", Method, 0, ""},
+		{"(Pos).IsValid", Method, 0, ""},
+		{"(Position).String", Method, 0, ""},
+		{"(Token).IsKeyword", Method, 0, ""},
+		{"(Token).IsLiteral", Method, 0, ""},
+		{"(Token).IsOperator", Method, 0, ""},
+		{"(Token).Precedence", Method, 0, ""},
+		{"(Token).String", Method, 0, ""},
+		{"ADD", Const, 0, ""},
+		{"ADD_ASSIGN", Const, 0, ""},
+		{"AND", Const, 0, ""},
+		{"AND_ASSIGN", Const, 0, ""},
+		{"AND_NOT", Const, 0, ""},
+		{"AND_NOT_ASSIGN", Const, 0, ""},
+		{"ARROW", Const, 0, ""},
+		{"ASSIGN", Const, 0, ""},
+		{"BREAK", Const, 0, ""},
+		{"CASE", Const, 0, ""},
+		{"CHAN", Const, 0, ""},
+		{"CHAR", Const, 0, ""},
+		{"COLON", Const, 0, ""},
+		{"COMMA", Const, 0, ""},
+		{"COMMENT", Const, 0, ""},
+		{"CONST", Const, 0, ""},
+		{"CONTINUE", Const, 0, ""},
+		{"DEC", Const, 0, ""},
+		{"DEFAULT", Const, 0, ""},
+		{"DEFER", Const, 0, ""},
+		{"DEFINE", Const, 0, ""},
+		{"ELLIPSIS", Const, 0, ""},
+		{"ELSE", Const, 0, ""},
+		{"EOF", Const, 0, ""},
+		{"EQL", Const, 0, ""},
+		{"FALLTHROUGH", Const, 0, ""},
+		{"FLOAT", Const, 0, ""},
+		{"FOR", Const, 0, ""},
+		{"FUNC", Const, 0, ""},
+		{"File", Type, 0, ""},
+		{"FileSet", Type, 0, ""},
+		{"GEQ", Const, 0, ""},
+		{"GO", Const, 0, ""},
+		{"GOTO", Const, 0, ""},
+		{"GTR", Const, 0, ""},
+		{"HighestPrec", Const, 0, ""},
+		{"IDENT", Const, 0, ""},
+		{"IF", Const, 0, ""},
+		{"ILLEGAL", Const, 0, ""},
+		{"IMAG", Const, 0, ""},
+		{"IMPORT", Const, 0, ""},
+		{"INC", Const, 0, ""},
+		{"INT", Const, 0, ""},
+		{"INTERFACE", Const, 0, ""},
+		{"IsExported", Func, 13, "func(name string) bool"},
+		{"IsIdentifier", Func, 13, "func(name string) bool"},
+		{"IsKeyword", Func, 13, "func(name string) bool"},
+		{"LAND", Const, 0, ""},
+		{"LBRACE", Const, 0, ""},
+		{"LBRACK", Const, 0, ""},
+		{"LEQ", Const, 0, ""},
+		{"LOR", Const, 0, ""},
+		{"LPAREN", Const, 0, ""},
+		{"LSS", Const, 0, ""},
+		{"Lookup", Func, 0, "func(ident string) Token"},
+		{"LowestPrec", Const, 0, ""},
+		{"MAP", Const, 0, ""},
+		{"MUL", Const, 0, ""},
+		{"MUL_ASSIGN", Const, 0, ""},
+		{"NEQ", Const, 0, ""},
+		{"NOT", Const, 0, ""},
+		{"NewFileSet", Func, 0, "func() *FileSet"},
+		{"NoPos", Const, 0, ""},
+		{"OR", Const, 0, ""},
+		{"OR_ASSIGN", Const, 0, ""},
+		{"PACKAGE", Const, 0, ""},
+		{"PERIOD", Const, 0, ""},
+		{"Pos", Type, 0, ""},
+		{"Position", Type, 0, ""},
+		{"Position.Column", Field, 0, ""},
+		{"Position.Filename", Field, 0, ""},
+		{"Position.Line", Field, 0, ""},
+		{"Position.Offset", Field, 0, ""},
+		{"QUO", Const, 0, ""},
+		{"QUO_ASSIGN", Const, 0, ""},
+		{"RANGE", Const, 0, ""},
+		{"RBRACE", Const, 0, ""},
+		{"RBRACK", Const, 0, ""},
+		{"REM", Const, 0, ""},
+		{"REM_ASSIGN", Const, 0, ""},
+		{"RETURN", Const, 0, ""},
+		{"RPAREN", Const, 0, ""},
+		{"SELECT", Const, 0, ""},
+		{"SEMICOLON", Const, 0, ""},
+		{"SHL", Const, 0, ""},
+		{"SHL_ASSIGN", Const, 0, ""},
+		{"SHR", Const, 0, ""},
+		{"SHR_ASSIGN", Const, 0, ""},
+		{"STRING", Const, 0, ""},
+		{"STRUCT", Const, 0, ""},
+		{"SUB", Const, 0, ""},
+		{"SUB_ASSIGN", Const, 0, ""},
+		{"SWITCH", Const, 0, ""},
+		{"TILDE", Const, 18, ""},
+		{"TYPE", Const, 0, ""},
+		{"Token", Type, 0, ""},
+		{"UnaryPrec", Const, 0, ""},
+		{"VAR", Const, 0, ""},
+		{"XOR", Const, 0, ""},
+		{"XOR_ASSIGN", Const, 0, ""},
+	},
+	"go/types": {
+		{"(*Alias).Obj", Method, 22, ""},
+		{"(*Alias).Origin", Method, 23, ""},
+		{"(*Alias).Rhs", Method, 23, ""},
+		{"(*Alias).SetTypeParams", Method, 23, ""},
+		{"(*Alias).String", Method, 22, ""},
+		{"(*Alias).TypeArgs", Method, 23, ""},
+		{"(*Alias).TypeParams", Method, 23, ""},
+		{"(*Alias).Underlying", Method, 22, ""},
+		{"(*ArgumentError).Error", Method, 18, ""},
+		{"(*ArgumentError).Unwrap", Method, 18, ""},
+		{"(*Array).Elem", Method, 5, ""},
+		{"(*Array).Len", Method, 5, ""},
+		{"(*Array).String", Method, 5, ""},
+		{"(*Array).Underlying", Method, 5, ""},
+		{"(*Basic).Info", Method, 5, ""},
+		{"(*Basic).Kind", Method, 5, ""},
+		{"(*Basic).Name", Method, 5, ""},
+		{"(*Basic).String", Method, 5, ""},
+		{"(*Basic).Underlying", Method, 5, ""},
+		{"(*Builtin).Exported", Method, 5, ""},
+		{"(*Builtin).Id", Method, 5, ""},
+		{"(*Builtin).Name", Method, 5, ""},
+		{"(*Builtin).Parent", Method, 5, ""},
+		{"(*Builtin).Pkg", Method, 5, ""},
+		{"(*Builtin).Pos", Method, 5, ""},
+		{"(*Builtin).String", Method, 5, ""},
+		{"(*Builtin).Type", Method, 5, ""},
+		{"(*Chan).Dir", Method, 5, ""},
+		{"(*Chan).Elem", Method, 5, ""},
+		{"(*Chan).String", Method, 5, ""},
+		{"(*Chan).Underlying", Method, 5, ""},
+		{"(*Checker).Files", Method, 5, ""},
+		{"(*Config).Check", Method, 5, ""},
+		{"(*Const).Exported", Method, 5, ""},
+		{"(*Const).Id", Method, 5, ""},
+		{"(*Const).Name", Method, 5, ""},
+		{"(*Const).Parent", Method, 5, ""},
+		{"(*Const).Pkg", Method, 5, ""},
+		{"(*Const).Pos", Method, 5, ""},
+		{"(*Const).String", Method, 5, ""},
+		{"(*Const).Type", Method, 5, ""},
+		{"(*Const).Val", Method, 5, ""},
+		{"(*Func).Exported", Method, 5, ""},
+		{"(*Func).FullName", Method, 5, ""},
+		{"(*Func).Id", Method, 5, ""},
+		{"(*Func).Name", Method, 5, ""},
+		{"(*Func).Origin", Method, 19, ""},
+		{"(*Func).Parent", Method, 5, ""},
+		{"(*Func).Pkg", Method, 5, ""},
+		{"(*Func).Pos", Method, 5, ""},
+		{"(*Func).Scope", Method, 5, ""},
+		{"(*Func).Signature", Method, 23, ""},
+		{"(*Func).String", Method, 5, ""},
+		{"(*Func).Type", Method, 5, ""},
+		{"(*Info).ObjectOf", Method, 5, ""},
+		{"(*Info).PkgNameOf", Method, 22, ""},
+		{"(*Info).TypeOf", Method, 5, ""},
+		{"(*Initializer).String", Method, 5, ""},
+		{"(*Interface).Complete", Method, 5, ""},
+		{"(*Interface).Embedded", Method, 5, ""},
+		{"(*Interface).EmbeddedType", Method, 11, ""},
+		{"(*Interface).EmbeddedTypes", Method, 24, ""},
+		{"(*Interface).Empty", Method, 5, ""},
+		{"(*Interface).ExplicitMethod", Method, 5, ""},
+		{"(*Interface).ExplicitMethods", Method, 24, ""},
+		{"(*Interface).IsComparable", Method, 18, ""},
+		{"(*Interface).IsImplicit", Method, 18, ""},
+		{"(*Interface).IsMethodSet", Method, 18, ""},
+		{"(*Interface).MarkImplicit", Method, 18, ""},
+		{"(*Interface).Method", Method, 5, ""},
+		{"(*Interface).Methods", Method, 24, ""},
+		{"(*Interface).NumEmbeddeds", Method, 5, ""},
+		{"(*Interface).NumExplicitMethods", Method, 5, ""},
+		{"(*Interface).NumMethods", Method, 5, ""},
+		{"(*Interface).String", Method, 5, ""},
+		{"(*Interface).Underlying", Method, 5, ""},
+		{"(*Label).Exported", Method, 5, ""},
+		{"(*Label).Id", Method, 5, ""},
+		{"(*Label).Name", Method, 5, ""},
+		{"(*Label).Parent", Method, 5, ""},
+		{"(*Label).Pkg", Method, 5, ""},
+		{"(*Label).Pos", Method, 5, ""},
+		{"(*Label).String", Method, 5, ""},
+		{"(*Label).Type", Method, 5, ""},
+		{"(*Map).Elem", Method, 5, ""},
+		{"(*Map).Key", Method, 5, ""},
+		{"(*Map).String", Method, 5, ""},
+		{"(*Map).Underlying", Method, 5, ""},
+		{"(*MethodSet).At", Method, 5, ""},
+		{"(*MethodSet).Len", Method, 5, ""},
+		{"(*MethodSet).Lookup", Method, 5, ""},
+		{"(*MethodSet).Methods", Method, 24, ""},
+		{"(*MethodSet).String", Method, 5, ""},
+		{"(*Named).AddMethod", Method, 5, ""},
+		{"(*Named).Method", Method, 5, ""},
+		{"(*Named).Methods", Method, 24, ""},
+		{"(*Named).NumMethods", Method, 5, ""},
+		{"(*Named).Obj", Method, 5, ""},
+		{"(*Named).Origin", Method, 18, ""},
+		{"(*Named).SetTypeParams", Method, 18, ""},
+		{"(*Named).SetUnderlying", Method, 5, ""},
+		{"(*Named).String", Method, 5, ""},
+		{"(*Named).TypeArgs", Method, 18, ""},
+		{"(*Named).TypeParams", Method, 18, ""},
+		{"(*Named).Underlying", Method, 5, ""},
+		{"(*Nil).Exported", Method, 5, ""},
+		{"(*Nil).Id", Method, 5, ""},
+		{"(*Nil).Name", Method, 5, ""},
+		{"(*Nil).Parent", Method, 5, ""},
+		{"(*Nil).Pkg", Method, 5, ""},
+		{"(*Nil).Pos", Method, 5, ""},
+		{"(*Nil).String", Method, 5, ""},
+		{"(*Nil).Type", Method, 5, ""},
+		{"(*Package).Complete", Method, 5, ""},
+		{"(*Package).GoVersion", Method, 21, ""},
+		{"(*Package).Imports", Method, 5, ""},
+		{"(*Package).MarkComplete", Method, 5, ""},
+		{"(*Package).Name", Method, 5, ""},
+		{"(*Package).Path", Method, 5, ""},
+		{"(*Package).Scope", Method, 5, ""},
+		{"(*Package).SetImports", Method, 5, ""},
+		{"(*Package).SetName", Method, 6, ""},
+		{"(*Package).String", Method, 5, ""},
+		{"(*PkgName).Exported", Method, 5, ""},
+		{"(*PkgName).Id", Method, 5, ""},
+		{"(*PkgName).Imported", Method, 5, ""},
+		{"(*PkgName).Name", Method, 5, ""},
+		{"(*PkgName).Parent", Method, 5, ""},
+		{"(*PkgName).Pkg", Method, 5, ""},
+		{"(*PkgName).Pos", Method, 5, ""},
+		{"(*PkgName).String", Method, 5, ""},
+		{"(*PkgName).Type", Method, 5, ""},
+		{"(*Pointer).Elem", Method, 5, ""},
+		{"(*Pointer).String", Method, 5, ""},
+		{"(*Pointer).Underlying", Method, 5, ""},
+		{"(*Scope).Child", Method, 5, ""},
+		{"(*Scope).Children", Method, 24, ""},
+		{"(*Scope).Contains", Method, 5, ""},
+		{"(*Scope).End", Method, 5, ""},
+		{"(*Scope).Innermost", Method, 5, ""},
+		{"(*Scope).Insert", Method, 5, ""},
+		{"(*Scope).Len", Method, 5, ""},
+		{"(*Scope).Lookup", Method, 5, ""},
+		{"(*Scope).LookupParent", Method, 5, ""},
+		{"(*Scope).Names", Method, 5, ""},
+		{"(*Scope).NumChildren", Method, 5, ""},
+		{"(*Scope).Parent", Method, 5, ""},
+		{"(*Scope).Pos", Method, 5, ""},
+		{"(*Scope).String", Method, 5, ""},
+		{"(*Scope).WriteTo", Method, 5, ""},
+		{"(*Selection).Index", Method, 5, ""},
+		{"(*Selection).Indirect", Method, 5, ""},
+		{"(*Selection).Kind", Method, 5, ""},
+		{"(*Selection).Obj", Method, 5, ""},
+		{"(*Selection).Recv", Method, 5, ""},
+		{"(*Selection).String", Method, 5, ""},
+		{"(*Selection).Type", Method, 5, ""},
+		{"(*Signature).Params", Method, 5, ""},
+		{"(*Signature).Recv", Method, 5, ""},
+		{"(*Signature).RecvTypeParams", Method, 18, ""},
+		{"(*Signature).Results", Method, 5, ""},
+		{"(*Signature).String", Method, 5, ""},
+		{"(*Signature).TypeParams", Method, 18, ""},
+		{"(*Signature).Underlying", Method, 5, ""},
+		{"(*Signature).Variadic", Method, 5, ""},
+		{"(*Slice).Elem", Method, 5, ""},
+		{"(*Slice).String", Method, 5, ""},
+		{"(*Slice).Underlying", Method, 5, ""},
+		{"(*StdSizes).Alignof", Method, 5, ""},
+		{"(*StdSizes).Offsetsof", Method, 5, ""},
+		{"(*StdSizes).Sizeof", Method, 5, ""},
+		{"(*Struct).Field", Method, 5, ""},
+		{"(*Struct).Fields", Method, 24, ""},
+		{"(*Struct).NumFields", Method, 5, ""},
+		{"(*Struct).String", Method, 5, ""},
+		{"(*Struct).Tag", Method, 5, ""},
+		{"(*Struct).Underlying", Method, 5, ""},
+		{"(*Term).String", Method, 18, ""},
+		{"(*Term).Tilde", Method, 18, ""},
+		{"(*Term).Type", Method, 18, ""},
+		{"(*Tuple).At", Method, 5, ""},
+		{"(*Tuple).Len", Method, 5, ""},
+		{"(*Tuple).String", Method, 5, ""},
+		{"(*Tuple).Underlying", Method, 5, ""},
+		{"(*Tuple).Variables", Method, 24, ""},
+		{"(*TypeList).At", Method, 18, ""},
+		{"(*TypeList).Len", Method, 18, ""},
+		{"(*TypeList).Types", Method, 24, ""},
+		{"(*TypeName).Exported", Method, 5, ""},
+		{"(*TypeName).Id", Method, 5, ""},
+		{"(*TypeName).IsAlias", Method, 9, ""},
+		{"(*TypeName).Name", Method, 5, ""},
+		{"(*TypeName).Parent", Method, 5, ""},
+		{"(*TypeName).Pkg", Method, 5, ""},
+		{"(*TypeName).Pos", Method, 5, ""},
+		{"(*TypeName).String", Method, 5, ""},
+		{"(*TypeName).Type", Method, 5, ""},
+		{"(*TypeParam).Constraint", Method, 18, ""},
+		{"(*TypeParam).Index", Method, 18, ""},
+		{"(*TypeParam).Obj", Method, 18, ""},
+		{"(*TypeParam).SetConstraint", Method, 18, ""},
+		{"(*TypeParam).String", Method, 18, ""},
+		{"(*TypeParam).Underlying", Method, 18, ""},
+		{"(*TypeParamList).At", Method, 18, ""},
+		{"(*TypeParamList).Len", Method, 18, ""},
+		{"(*TypeParamList).TypeParams", Method, 24, ""},
+		{"(*Union).Len", Method, 18, ""},
+		{"(*Union).String", Method, 18, ""},
+		{"(*Union).Term", Method, 18, ""},
+		{"(*Union).Terms", Method, 24, ""},
+		{"(*Union).Underlying", Method, 18, ""},
+		{"(*Var).Anonymous", Method, 5, ""},
+		{"(*Var).Embedded", Method, 11, ""},
+		{"(*Var).Exported", Method, 5, ""},
+		{"(*Var).Id", Method, 5, ""},
+		{"(*Var).IsField", Method, 5, ""},
+		{"(*Var).Kind", Method, 25, ""},
+		{"(*Var).Name", Method, 5, ""},
+		{"(*Var).Origin", Method, 19, ""},
+		{"(*Var).Parent", Method, 5, ""},
+		{"(*Var).Pkg", Method, 5, ""},
+		{"(*Var).Pos", Method, 5, ""},
+		{"(*Var).SetKind", Method, 25, ""},
+		{"(*Var).String", Method, 5, ""},
+		{"(*Var).Type", Method, 5, ""},
+		{"(Checker).ObjectOf", Method, 5, ""},
+		{"(Checker).PkgNameOf", Method, 22, ""},
+		{"(Checker).TypeOf", Method, 5, ""},
+		{"(Error).Error", Method, 5, ""},
+		{"(TypeAndValue).Addressable", Method, 5, ""},
+		{"(TypeAndValue).Assignable", Method, 5, ""},
+		{"(TypeAndValue).HasOk", Method, 5, ""},
+		{"(TypeAndValue).IsBuiltin", Method, 5, ""},
+		{"(TypeAndValue).IsNil", Method, 5, ""},
+		{"(TypeAndValue).IsType", Method, 5, ""},
+		{"(TypeAndValue).IsValue", Method, 5, ""},
+		{"(TypeAndValue).IsVoid", Method, 5, ""},
+		{"(VarKind).String", Method, 25, ""},
+		{"Alias", Type, 22, ""},
+		{"ArgumentError", Type, 18, ""},
+		{"ArgumentError.Err", Field, 18, ""},
+		{"ArgumentError.Index", Field, 18, ""},
+		{"Array", Type, 5, ""},
+		{"AssertableTo", Func, 5, "func(V *Interface, T Type) bool"},
+		{"AssignableTo", Func, 5, "func(V Type, T Type) bool"},
+		{"Basic", Type, 5, ""},
+		{"BasicInfo", Type, 5, ""},
+		{"BasicKind", Type, 5, ""},
+		{"Bool", Const, 5, ""},
+		{"Builtin", Type, 5, ""},
+		{"Byte", Const, 5, ""},
+		{"Chan", Type, 5, ""},
+		{"ChanDir", Type, 5, ""},
+		{"CheckExpr", Func, 13, "func(fset *token.FileSet, pkg *Package, pos token.Pos, expr ast.Expr, info *Info) (err error)"},
+		{"Checker", Type, 5, ""},
+		{"Checker.Info", Field, 5, ""},
+		{"Comparable", Func, 5, "func(T Type) bool"},
+		{"Complex128", Const, 5, ""},
+		{"Complex64", Const, 5, ""},
+		{"Config", Type, 5, ""},
+		{"Config.Context", Field, 18, ""},
+		{"Config.DisableUnusedImportCheck", Field, 5, ""},
+		{"Config.Error", Field, 5, ""},
+		{"Config.FakeImportC", Field, 5, ""},
+		{"Config.GoVersion", Field, 18, ""},
+		{"Config.IgnoreFuncBodies", Field, 5, ""},
+		{"Config.Importer", Field, 5, ""},
+		{"Config.Sizes", Field, 5, ""},
+		{"Const", Type, 5, ""},
+		{"Context", Type, 18, ""},
+		{"ConvertibleTo", Func, 5, "func(V Type, T Type) bool"},
+		{"DefPredeclaredTestFuncs", Func, 5, "func()"},
+		{"Default", Func, 8, "func(t Type) Type"},
+		{"Error", Type, 5, ""},
+		{"Error.Fset", Field, 5, ""},
+		{"Error.Msg", Field, 5, ""},
+		{"Error.Pos", Field, 5, ""},
+		{"Error.Soft", Field, 5, ""},
+		{"Eval", Func, 5, "func(fset *token.FileSet, pkg *Package, pos token.Pos, expr string) (_ TypeAndValue, err error)"},
+		{"ExprString", Func, 5, "func(x ast.Expr) string"},
+		{"FieldVal", Const, 5, ""},
+		{"FieldVar", Const, 25, ""},
+		{"Float32", Const, 5, ""},
+		{"Float64", Const, 5, ""},
+		{"Func", Type, 5, ""},
+		{"Id", Func, 5, "func(pkg *Package, name string) string"},
+		{"Identical", Func, 5, "func(x Type, y Type) bool"},
+		{"IdenticalIgnoreTags", Func, 8, "func(x Type, y Type) bool"},
+		{"Implements", Func, 5, "func(V Type, T *Interface) bool"},
+		{"ImportMode", Type, 6, ""},
+		{"Importer", Type, 5, ""},
+		{"ImporterFrom", Type, 6, ""},
+		{"Info", Type, 5, ""},
+		{"Info.Defs", Field, 5, ""},
+		{"Info.FileVersions", Field, 22, ""},
+		{"Info.Implicits", Field, 5, ""},
+		{"Info.InitOrder", Field, 5, ""},
+		{"Info.Instances", Field, 18, ""},
+		{"Info.Scopes", Field, 5, ""},
+		{"Info.Selections", Field, 5, ""},
+		{"Info.Types", Field, 5, ""},
+		{"Info.Uses", Field, 5, ""},
+		{"Initializer", Type, 5, ""},
+		{"Initializer.Lhs", Field, 5, ""},
+		{"Initializer.Rhs", Field, 5, ""},
+		{"Instance", Type, 18, ""},
+		{"Instance.Type", Field, 18, ""},
+		{"Instance.TypeArgs", Field, 18, ""},
+		{"Instantiate", Func, 18, "func(ctxt *Context, orig Type, targs []Type, validate bool) (Type, error)"},
+		{"Int", Const, 5, ""},
+		{"Int16", Const, 5, ""},
+		{"Int32", Const, 5, ""},
+		{"Int64", Const, 5, ""},
+		{"Int8", Const, 5, ""},
+		{"Interface", Type, 5, ""},
+		{"Invalid", Const, 5, ""},
+		{"IsBoolean", Const, 5, ""},
+		{"IsComplex", Const, 5, ""},
+		{"IsConstType", Const, 5, ""},
+		{"IsFloat", Const, 5, ""},
+		{"IsInteger", Const, 5, ""},
+		{"IsInterface", Func, 5, "func(t Type) bool"},
+		{"IsNumeric", Const, 5, ""},
+		{"IsOrdered", Const, 5, ""},
+		{"IsString", Const, 5, ""},
+		{"IsUnsigned", Const, 5, ""},
+		{"IsUntyped", Const, 5, ""},
+		{"Label", Type, 5, ""},
+		{"LocalVar", Const, 25, ""},
+		{"LookupFieldOrMethod", Func, 5, "func(T Type, addressable bool, pkg *Package, name string) (obj Object, index []int, indirect bool)"},
+		{"LookupSelection", Func, 25, "func(T Type, addressable bool, pkg *Package, name string) (Selection, bool)"},
+		{"Map", Type, 5, ""},
+		{"MethodExpr", Const, 5, ""},
+		{"MethodSet", Type, 5, ""},
+		{"MethodVal", Const, 5, ""},
+		{"MissingMethod", Func, 5, "func(V Type, T *Interface, static bool) (method *Func, wrongType bool)"},
+		{"Named", Type, 5, ""},
+		{"NewAlias", Func, 22, "func(obj *TypeName, rhs Type) *Alias"},
+		{"NewArray", Func, 5, "func(elem Type, len int64) *Array"},
+		{"NewChan", Func, 5, "func(dir ChanDir, elem Type) *Chan"},
+		{"NewChecker", Func, 5, "func(conf *Config, fset *token.FileSet, pkg *Package, info *Info) *Checker"},
+		{"NewConst", Func, 5, "func(pos token.Pos, pkg *Package, name string, typ Type, val constant.Value) *Const"},
+		{"NewContext", Func, 18, "func() *Context"},
+		{"NewField", Func, 5, "func(pos token.Pos, pkg *Package, name string, typ Type, embedded bool) *Var"},
+		{"NewFunc", Func, 5, "func(pos token.Pos, pkg *Package, name string, sig *Signature) *Func"},
+		{"NewInterface", Func, 5, "func(methods []*Func, embeddeds []*Named) *Interface"},
+		{"NewInterfaceType", Func, 11, "func(methods []*Func, embeddeds []Type) *Interface"},
+		{"NewLabel", Func, 5, "func(pos token.Pos, pkg *Package, name string) *Label"},
+		{"NewMap", Func, 5, "func(key Type, elem Type) *Map"},
+		{"NewMethodSet", Func, 5, "func(T Type) *MethodSet"},
+		{"NewNamed", Func, 5, "func(obj *TypeName, underlying Type, methods []*Func) *Named"},
+		{"NewPackage", Func, 5, "func(path string, name string) *Package"},
+		{"NewParam", Func, 5, "func(pos token.Pos, pkg *Package, name string, typ Type) *Var"},
+		{"NewPkgName", Func, 5, "func(pos token.Pos, pkg *Package, name string, imported *Package) *PkgName"},
+		{"NewPointer", Func, 5, "func(elem Type) *Pointer"},
+		{"NewScope", Func, 5, "func(parent *Scope, pos token.Pos, end token.Pos, comment string) *Scope"},
+		{"NewSignature", Func, 5, "func(recv *Var, params *Tuple, results *Tuple, variadic bool) *Signature"},
+		{"NewSignatureType", Func, 18, "func(recv *Var, recvTypeParams []*TypeParam, typeParams []*TypeParam, params *Tuple, results *Tuple, variadic bool) *Signature"},
+		{"NewSlice", Func, 5, "func(elem Type) *Slice"},
+		{"NewStruct", Func, 5, "func(fields []*Var, tags []string) *Struct"},
+		{"NewTerm", Func, 18, "func(tilde bool, typ Type) *Term"},
+		{"NewTuple", Func, 5, "func(x ...*Var) *Tuple"},
+		{"NewTypeName", Func, 5, "func(pos token.Pos, pkg *Package, name string, typ Type) *TypeName"},
+		{"NewTypeParam", Func, 18, "func(obj *TypeName, constraint Type) *TypeParam"},
+		{"NewUnion", Func, 18, "func(terms []*Term) *Union"},
+		{"NewVar", Func, 5, "func(pos token.Pos, pkg *Package, name string, typ Type) *Var"},
+		{"Nil", Type, 5, ""},
+		{"Object", Type, 5, ""},
+		{"ObjectString", Func, 5, "func(obj Object, qf Qualifier) string"},
+		{"Package", Type, 5, ""},
+		{"PackageVar", Const, 25, ""},
+		{"ParamVar", Const, 25, ""},
+		{"PkgName", Type, 5, ""},
+		{"Pointer", Type, 5, ""},
+		{"Qualifier", Type, 5, ""},
+		{"RecvOnly", Const, 5, ""},
+		{"RecvVar", Const, 25, ""},
+		{"RelativeTo", Func, 5, "func(pkg *Package) Qualifier"},
+		{"ResultVar", Const, 25, ""},
+		{"Rune", Const, 5, ""},
+		{"Satisfies", Func, 20, "func(V Type, T *Interface) bool"},
+		{"Scope", Type, 5, ""},
+		{"Selection", Type, 5, ""},
+		{"SelectionKind", Type, 5, ""},
+		{"SelectionString", Func, 5, "func(s *Selection, qf Qualifier) string"},
+		{"SendOnly", Const, 5, ""},
+		{"SendRecv", Const, 5, ""},
+		{"Signature", Type, 5, ""},
+		{"Sizes", Type, 5, ""},
+		{"SizesFor", Func, 9, "func(compiler string, arch string) Sizes"},
+		{"Slice", Type, 5, ""},
+		{"StdSizes", Type, 5, ""},
+		{"StdSizes.MaxAlign", Field, 5, ""},
+		{"StdSizes.WordSize", Field, 5, ""},
+		{"String", Const, 5, ""},
+		{"Struct", Type, 5, ""},
+		{"Term", Type, 18, ""},
+		{"Tuple", Type, 5, ""},
+		{"Typ", Var, 5, ""},
+		{"Type", Type, 5, ""},
+		{"TypeAndValue", Type, 5, ""},
+		{"TypeAndValue.Type", Field, 5, ""},
+		{"TypeAndValue.Value", Field, 5, ""},
+		{"TypeList", Type, 18, ""},
+		{"TypeName", Type, 5, ""},
+		{"TypeParam", Type, 18, ""},
+		{"TypeParamList", Type, 18, ""},
+		{"TypeString", Func, 5, "func(typ Type, qf Qualifier) string"},
+		{"Uint", Const, 5, ""},
+		{"Uint16", Const, 5, ""},
+		{"Uint32", Const, 5, ""},
+		{"Uint64", Const, 5, ""},
+		{"Uint8", Const, 5, ""},
+		{"Uintptr", Const, 5, ""},
+		{"Unalias", Func, 22, "func(t Type) Type"},
+		{"Union", Type, 18, ""},
+		{"Universe", Var, 5, ""},
+		{"Unsafe", Var, 5, ""},
+		{"UnsafePointer", Const, 5, ""},
+		{"UntypedBool", Const, 5, ""},
+		{"UntypedComplex", Const, 5, ""},
+		{"UntypedFloat", Const, 5, ""},
+		{"UntypedInt", Const, 5, ""},
+		{"UntypedNil", Const, 5, ""},
+		{"UntypedRune", Const, 5, ""},
+		{"UntypedString", Const, 5, ""},
+		{"Var", Type, 5, ""},
+		{"VarKind", Type, 25, ""},
+		{"WriteExpr", Func, 5, "func(buf *bytes.Buffer, x ast.Expr)"},
+		{"WriteSignature", Func, 5, "func(buf *bytes.Buffer, sig *Signature, qf Qualifier)"},
+		{"WriteType", Func, 5, "func(buf *bytes.Buffer, typ Type, qf Qualifier)"},
+	},
+	"go/version": {
+		{"Compare", Func, 22, "func(x string, y string) int"},
+		{"IsValid", Func, 22, "func(x string) bool"},
+		{"Lang", Func, 22, "func(x string) string"},
+	},
+	"hash": {
+		{"Cloner", Type, 25, ""},
+		{"Hash", Type, 0, ""},
+		{"Hash32", Type, 0, ""},
+		{"Hash64", Type, 0, ""},
+		{"XOF", Type, 25, ""},
+	},
+	"hash/adler32": {
+		{"Checksum", Func, 0, "func(data []byte) uint32"},
+		{"New", Func, 0, "func() hash.Hash32"},
+		{"Size", Const, 0, ""},
+	},
+	"hash/crc32": {
+		{"Castagnoli", Const, 0, ""},
+		{"Checksum", Func, 0, "func(data []byte, tab *Table) uint32"},
+		{"ChecksumIEEE", Func, 0, "func(data []byte) uint32"},
+		{"IEEE", Const, 0, ""},
+		{"IEEETable", Var, 0, ""},
+		{"Koopman", Const, 0, ""},
+		{"MakeTable", Func, 0, "func(poly uint32) *Table"},
+		{"New", Func, 0, "func(tab *Table) hash.Hash32"},
+		{"NewIEEE", Func, 0, "func() hash.Hash32"},
+		{"Size", Const, 0, ""},
+		{"Table", Type, 0, ""},
+		{"Update", Func, 0, "func(crc uint32, tab *Table, p []byte) uint32"},
+	},
+	"hash/crc64": {
+		{"Checksum", Func, 0, "func(data []byte, tab *Table) uint64"},
+		{"ECMA", Const, 0, ""},
+		{"ISO", Const, 0, ""},
+		{"MakeTable", Func, 0, "func(poly uint64) *Table"},
+		{"New", Func, 0, "func(tab *Table) hash.Hash64"},
+		{"Size", Const, 0, ""},
+		{"Table", Type, 0, ""},
+		{"Update", Func, 0, "func(crc uint64, tab *Table, p []byte) uint64"},
+	},
+	"hash/fnv": {
+		{"New128", Func, 9, "func() hash.Hash"},
+		{"New128a", Func, 9, "func() hash.Hash"},
+		{"New32", Func, 0, "func() hash.Hash32"},
+		{"New32a", Func, 0, "func() hash.Hash32"},
+		{"New64", Func, 0, "func() hash.Hash64"},
+		{"New64a", Func, 0, "func() hash.Hash64"},
+	},
+	"hash/maphash": {
+		{"(*Hash).BlockSize", Method, 14, ""},
+		{"(*Hash).Clone", Method, 25, ""},
+		{"(*Hash).Reset", Method, 14, ""},
+		{"(*Hash).Seed", Method, 14, ""},
+		{"(*Hash).SetSeed", Method, 14, ""},
+		{"(*Hash).Size", Method, 14, ""},
+		{"(*Hash).Sum", Method, 14, ""},
+		{"(*Hash).Sum64", Method, 14, ""},
+		{"(*Hash).Write", Method, 14, ""},
+		{"(*Hash).WriteByte", Method, 14, ""},
+		{"(*Hash).WriteString", Method, 14, ""},
+		{"Bytes", Func, 19, "func(seed Seed, b []byte) uint64"},
+		{"Comparable", Func, 24, "func[T comparable](seed Seed, v T) uint64"},
+		{"Hash", Type, 14, ""},
+		{"MakeSeed", Func, 14, "func() Seed"},
+		{"Seed", Type, 14, ""},
+		{"String", Func, 19, "func(seed Seed, s string) uint64"},
+		{"WriteComparable", Func, 24, "func[T comparable](h *Hash, x T)"},
+	},
+	"html": {
+		{"EscapeString", Func, 0, "func(s string) string"},
+		{"UnescapeString", Func, 0, "func(s string) string"},
+	},
+	"html/template": {
+		{"(*Error).Error", Method, 0, ""},
+		{"(*Template).AddParseTree", Method, 0, ""},
+		{"(*Template).Clone", Method, 0, ""},
+		{"(*Template).DefinedTemplates", Method, 6, ""},
+		{"(*Template).Delims", Method, 0, ""},
+		{"(*Template).Execute", Method, 0, ""},
+		{"(*Template).ExecuteTemplate", Method, 0, ""},
+		{"(*Template).Funcs", Method, 0, ""},
+		{"(*Template).Lookup", Method, 0, ""},
+		{"(*Template).Name", Method, 0, ""},
+		{"(*Template).New", Method, 0, ""},
+		{"(*Template).Option", Method, 5, ""},
+		{"(*Template).Parse", Method, 0, ""},
+		{"(*Template).ParseFS", Method, 16, ""},
+		{"(*Template).ParseFiles", Method, 0, ""},
+		{"(*Template).ParseGlob", Method, 0, ""},
+		{"(*Template).Templates", Method, 0, ""},
+		{"CSS", Type, 0, ""},
+		{"ErrAmbigContext", Const, 0, ""},
+		{"ErrBadHTML", Const, 0, ""},
+		{"ErrBranchEnd", Const, 0, ""},
+		{"ErrEndContext", Const, 0, ""},
+		{"ErrJSTemplate", Const, 21, ""},
+		{"ErrNoSuchTemplate", Const, 0, ""},
+		{"ErrOutputContext", Const, 0, ""},
+		{"ErrPartialCharset", Const, 0, ""},
+		{"ErrPartialEscape", Const, 0, ""},
+		{"ErrPredefinedEscaper", Const, 9, ""},
+		{"ErrRangeLoopReentry", Const, 0, ""},
+		{"ErrSlashAmbig", Const, 0, ""},
+		{"Error", Type, 0, ""},
+		{"Error.Description", Field, 0, ""},
+		{"Error.ErrorCode", Field, 0, ""},
+		{"Error.Line", Field, 0, ""},
+		{"Error.Name", Field, 0, ""},
+		{"Error.Node", Field, 4, ""},
+		{"ErrorCode", Type, 0, ""},
+		{"FuncMap", Type, 0, ""},
+		{"HTML", Type, 0, ""},
+		{"HTMLAttr", Type, 0, ""},
+		{"HTMLEscape", Func, 0, "func(w io.Writer, b []byte)"},
+		{"HTMLEscapeString", Func, 0, "func(s string) string"},
+		{"HTMLEscaper", Func, 0, "func(args ...any) string"},
+		{"IsTrue", Func, 6, "func(val any) (truth bool, ok bool)"},
+		{"JS", Type, 0, ""},
+		{"JSEscape", Func, 0, "func(w io.Writer, b []byte)"},
+		{"JSEscapeString", Func, 0, "func(s string) string"},
+		{"JSEscaper", Func, 0, "func(args ...any) string"},
+		{"JSStr", Type, 0, ""},
+		{"Must", Func, 0, "func(t *Template, err error) *Template"},
+		{"New", Func, 0, "func(name string) *Template"},
+		{"OK", Const, 0, ""},
+		{"ParseFS", Func, 16, "func(fs fs.FS, patterns ...string) (*Template, error)"},
+		{"ParseFiles", Func, 0, "func(filenames ...string) (*Template, error)"},
+		{"ParseGlob", Func, 0, "func(pattern string) (*Template, error)"},
+		{"Srcset", Type, 10, ""},
+		{"Template", Type, 0, ""},
+		{"Template.Tree", Field, 2, ""},
+		{"URL", Type, 0, ""},
+		{"URLQueryEscaper", Func, 0, "func(args ...any) string"},
+	},
+	"image": {
+		{"(*Alpha).AlphaAt", Method, 4, ""},
+		{"(*Alpha).At", Method, 0, ""},
+		{"(*Alpha).Bounds", Method, 0, ""},
+		{"(*Alpha).ColorModel", Method, 0, ""},
+		{"(*Alpha).Opaque", Method, 0, ""},
+		{"(*Alpha).PixOffset", Method, 0, ""},
+		{"(*Alpha).RGBA64At", Method, 17, ""},
+		{"(*Alpha).Set", Method, 0, ""},
+		{"(*Alpha).SetAlpha", Method, 0, ""},
+		{"(*Alpha).SetRGBA64", Method, 17, ""},
+		{"(*Alpha).SubImage", Method, 0, ""},
+		{"(*Alpha16).Alpha16At", Method, 4, ""},
+		{"(*Alpha16).At", Method, 0, ""},
+		{"(*Alpha16).Bounds", Method, 0, ""},
+		{"(*Alpha16).ColorModel", Method, 0, ""},
+		{"(*Alpha16).Opaque", Method, 0, ""},
+		{"(*Alpha16).PixOffset", Method, 0, ""},
+		{"(*Alpha16).RGBA64At", Method, 17, ""},
+		{"(*Alpha16).Set", Method, 0, ""},
+		{"(*Alpha16).SetAlpha16", Method, 0, ""},
+		{"(*Alpha16).SetRGBA64", Method, 17, ""},
+		{"(*Alpha16).SubImage", Method, 0, ""},
+		{"(*CMYK).At", Method, 5, ""},
+		{"(*CMYK).Bounds", Method, 5, ""},
+		{"(*CMYK).CMYKAt", Method, 5, ""},
+		{"(*CMYK).ColorModel", Method, 5, ""},
+		{"(*CMYK).Opaque", Method, 5, ""},
+		{"(*CMYK).PixOffset", Method, 5, ""},
+		{"(*CMYK).RGBA64At", Method, 17, ""},
+		{"(*CMYK).Set", Method, 5, ""},
+		{"(*CMYK).SetCMYK", Method, 5, ""},
+		{"(*CMYK).SetRGBA64", Method, 17, ""},
+		{"(*CMYK).SubImage", Method, 5, ""},
+		{"(*Gray).At", Method, 0, ""},
+		{"(*Gray).Bounds", Method, 0, ""},
+		{"(*Gray).ColorModel", Method, 0, ""},
+		{"(*Gray).GrayAt", Method, 4, ""},
+		{"(*Gray).Opaque", Method, 0, ""},
+		{"(*Gray).PixOffset", Method, 0, ""},
+		{"(*Gray).RGBA64At", Method, 17, ""},
+		{"(*Gray).Set", Method, 0, ""},
+		{"(*Gray).SetGray", Method, 0, ""},
+		{"(*Gray).SetRGBA64", Method, 17, ""},
+		{"(*Gray).SubImage", Method, 0, ""},
+		{"(*Gray16).At", Method, 0, ""},
+		{"(*Gray16).Bounds", Method, 0, ""},
+		{"(*Gray16).ColorModel", Method, 0, ""},
+		{"(*Gray16).Gray16At", Method, 4, ""},
+		{"(*Gray16).Opaque", Method, 0, ""},
+		{"(*Gray16).PixOffset", Method, 0, ""},
+		{"(*Gray16).RGBA64At", Method, 17, ""},
+		{"(*Gray16).Set", Method, 0, ""},
+		{"(*Gray16).SetGray16", Method, 0, ""},
+		{"(*Gray16).SetRGBA64", Method, 17, ""},
+		{"(*Gray16).SubImage", Method, 0, ""},
+		{"(*NRGBA).At", Method, 0, ""},
+		{"(*NRGBA).Bounds", Method, 0, ""},
+		{"(*NRGBA).ColorModel", Method, 0, ""},
+		{"(*NRGBA).NRGBAAt", Method, 4, ""},
+		{"(*NRGBA).Opaque", Method, 0, ""},
+		{"(*NRGBA).PixOffset", Method, 0, ""},
+		{"(*NRGBA).RGBA64At", Method, 17, ""},
+		{"(*NRGBA).Set", Method, 0, ""},
+		{"(*NRGBA).SetNRGBA", Method, 0, ""},
+		{"(*NRGBA).SetRGBA64", Method, 17, ""},
+		{"(*NRGBA).SubImage", Method, 0, ""},
+		{"(*NRGBA64).At", Method, 0, ""},
+		{"(*NRGBA64).Bounds", Method, 0, ""},
+		{"(*NRGBA64).ColorModel", Method, 0, ""},
+		{"(*NRGBA64).NRGBA64At", Method, 4, ""},
+		{"(*NRGBA64).Opaque", Method, 0, ""},
+		{"(*NRGBA64).PixOffset", Method, 0, ""},
+		{"(*NRGBA64).RGBA64At", Method, 17, ""},
+		{"(*NRGBA64).Set", Method, 0, ""},
+		{"(*NRGBA64).SetNRGBA64", Method, 0, ""},
+		{"(*NRGBA64).SetRGBA64", Method, 17, ""},
+		{"(*NRGBA64).SubImage", Method, 0, ""},
+		{"(*NYCbCrA).AOffset", Method, 6, ""},
+		{"(*NYCbCrA).At", Method, 6, ""},
+		{"(*NYCbCrA).Bounds", Method, 6, ""},
+		{"(*NYCbCrA).COffset", Method, 6, ""},
+		{"(*NYCbCrA).ColorModel", Method, 6, ""},
+		{"(*NYCbCrA).NYCbCrAAt", Method, 6, ""},
+		{"(*NYCbCrA).Opaque", Method, 6, ""},
+		{"(*NYCbCrA).RGBA64At", Method, 17, ""},
+		{"(*NYCbCrA).SubImage", Method, 6, ""},
+		{"(*NYCbCrA).YCbCrAt", Method, 6, ""},
+		{"(*NYCbCrA).YOffset", Method, 6, ""},
+		{"(*Paletted).At", Method, 0, ""},
+		{"(*Paletted).Bounds", Method, 0, ""},
+		{"(*Paletted).ColorIndexAt", Method, 0, ""},
+		{"(*Paletted).ColorModel", Method, 0, ""},
+		{"(*Paletted).Opaque", Method, 0, ""},
+		{"(*Paletted).PixOffset", Method, 0, ""},
+		{"(*Paletted).RGBA64At", Method, 17, ""},
+		{"(*Paletted).Set", Method, 0, ""},
+		{"(*Paletted).SetColorIndex", Method, 0, ""},
+		{"(*Paletted).SetRGBA64", Method, 17, ""},
+		{"(*Paletted).SubImage", Method, 0, ""},
+		{"(*RGBA).At", Method, 0, ""},
+		{"(*RGBA).Bounds", Method, 0, ""},
+		{"(*RGBA).ColorModel", Method, 0, ""},
+		{"(*RGBA).Opaque", Method, 0, ""},
+		{"(*RGBA).PixOffset", Method, 0, ""},
+		{"(*RGBA).RGBA64At", Method, 17, ""},
+		{"(*RGBA).RGBAAt", Method, 4, ""},
+		{"(*RGBA).Set", Method, 0, ""},
+		{"(*RGBA).SetRGBA", Method, 0, ""},
+		{"(*RGBA).SetRGBA64", Method, 17, ""},
+		{"(*RGBA).SubImage", Method, 0, ""},
+		{"(*RGBA64).At", Method, 0, ""},
+		{"(*RGBA64).Bounds", Method, 0, ""},
+		{"(*RGBA64).ColorModel", Method, 0, ""},
+		{"(*RGBA64).Opaque", Method, 0, ""},
+		{"(*RGBA64).PixOffset", Method, 0, ""},
+		{"(*RGBA64).RGBA64At", Method, 4, ""},
+		{"(*RGBA64).Set", Method, 0, ""},
+		{"(*RGBA64).SetRGBA64", Method, 0, ""},
+		{"(*RGBA64).SubImage", Method, 0, ""},
+		{"(*Uniform).At", Method, 0, ""},
+		{"(*Uniform).Bounds", Method, 0, ""},
+		{"(*Uniform).ColorModel", Method, 0, ""},
+		{"(*Uniform).Convert", Method, 0, ""},
+		{"(*Uniform).Opaque", Method, 0, ""},
+		{"(*Uniform).RGBA", Method, 0, ""},
+		{"(*Uniform).RGBA64At", Method, 17, ""},
+		{"(*YCbCr).At", Method, 0, ""},
+		{"(*YCbCr).Bounds", Method, 0, ""},
+		{"(*YCbCr).COffset", Method, 0, ""},
+		{"(*YCbCr).ColorModel", Method, 0, ""},
+		{"(*YCbCr).Opaque", Method, 0, ""},
+		{"(*YCbCr).RGBA64At", Method, 17, ""},
+		{"(*YCbCr).SubImage", Method, 0, ""},
+		{"(*YCbCr).YCbCrAt", Method, 4, ""},
+		{"(*YCbCr).YOffset", Method, 0, ""},
+		{"(Point).Add", Method, 0, ""},
+		{"(Point).Div", Method, 0, ""},
+		{"(Point).Eq", Method, 0, ""},
+		{"(Point).In", Method, 0, ""},
+		{"(Point).Mod", Method, 0, ""},
+		{"(Point).Mul", Method, 0, ""},
+		{"(Point).String", Method, 0, ""},
+		{"(Point).Sub", Method, 0, ""},
+		{"(Rectangle).Add", Method, 0, ""},
+		{"(Rectangle).At", Method, 5, ""},
+		{"(Rectangle).Bounds", Method, 5, ""},
+		{"(Rectangle).Canon", Method, 0, ""},
+		{"(Rectangle).ColorModel", Method, 5, ""},
+		{"(Rectangle).Dx", Method, 0, ""},
+		{"(Rectangle).Dy", Method, 0, ""},
+		{"(Rectangle).Empty", Method, 0, ""},
+		{"(Rectangle).Eq", Method, 0, ""},
+		{"(Rectangle).In", Method, 0, ""},
+		{"(Rectangle).Inset", Method, 0, ""},
+		{"(Rectangle).Intersect", Method, 0, ""},
+		{"(Rectangle).Overlaps", Method, 0, ""},
+		{"(Rectangle).RGBA64At", Method, 17, ""},
+		{"(Rectangle).Size", Method, 0, ""},
+		{"(Rectangle).String", Method, 0, ""},
+		{"(Rectangle).Sub", Method, 0, ""},
+		{"(Rectangle).Union", Method, 0, ""},
+		{"(YCbCrSubsampleRatio).String", Method, 0, ""},
+		{"Alpha", Type, 0, ""},
+		{"Alpha.Pix", Field, 0, ""},
+		{"Alpha.Rect", Field, 0, ""},
+		{"Alpha.Stride", Field, 0, ""},
+		{"Alpha16", Type, 0, ""},
+		{"Alpha16.Pix", Field, 0, ""},
+		{"Alpha16.Rect", Field, 0, ""},
+		{"Alpha16.Stride", Field, 0, ""},
+		{"Black", Var, 0, ""},
+		{"CMYK", Type, 5, ""},
+		{"CMYK.Pix", Field, 5, ""},
+		{"CMYK.Rect", Field, 5, ""},
+		{"CMYK.Stride", Field, 5, ""},
+		{"Config", Type, 0, ""},
+		{"Config.ColorModel", Field, 0, ""},
+		{"Config.Height", Field, 0, ""},
+		{"Config.Width", Field, 0, ""},
+		{"Decode", Func, 0, "func(r io.Reader) (Image, string, error)"},
+		{"DecodeConfig", Func, 0, "func(r io.Reader) (Config, string, error)"},
+		{"ErrFormat", Var, 0, ""},
+		{"Gray", Type, 0, ""},
+		{"Gray.Pix", Field, 0, ""},
+		{"Gray.Rect", Field, 0, ""},
+		{"Gray.Stride", Field, 0, ""},
+		{"Gray16", Type, 0, ""},
+		{"Gray16.Pix", Field, 0, ""},
+		{"Gray16.Rect", Field, 0, ""},
+		{"Gray16.Stride", Field, 0, ""},
+		{"Image", Type, 0, ""},
+		{"NRGBA", Type, 0, ""},
+		{"NRGBA.Pix", Field, 0, ""},
+		{"NRGBA.Rect", Field, 0, ""},
+		{"NRGBA.Stride", Field, 0, ""},
+		{"NRGBA64", Type, 0, ""},
+		{"NRGBA64.Pix", Field, 0, ""},
+		{"NRGBA64.Rect", Field, 0, ""},
+		{"NRGBA64.Stride", Field, 0, ""},
+		{"NYCbCrA", Type, 6, ""},
+		{"NYCbCrA.A", Field, 6, ""},
+		{"NYCbCrA.AStride", Field, 6, ""},
+		{"NYCbCrA.YCbCr", Field, 6, ""},
+		{"NewAlpha", Func, 0, "func(r Rectangle) *Alpha"},
+		{"NewAlpha16", Func, 0, "func(r Rectangle) *Alpha16"},
+		{"NewCMYK", Func, 5, "func(r Rectangle) *CMYK"},
+		{"NewGray", Func, 0, "func(r Rectangle) *Gray"},
+		{"NewGray16", Func, 0, "func(r Rectangle) *Gray16"},
+		{"NewNRGBA", Func, 0, "func(r Rectangle) *NRGBA"},
+		{"NewNRGBA64", Func, 0, "func(r Rectangle) *NRGBA64"},
+		{"NewNYCbCrA", Func, 6, "func(r Rectangle, subsampleRatio YCbCrSubsampleRatio) *NYCbCrA"},
+		{"NewPaletted", Func, 0, "func(r Rectangle, p color.Palette) *Paletted"},
+		{"NewRGBA", Func, 0, "func(r Rectangle) *RGBA"},
+		{"NewRGBA64", Func, 0, "func(r Rectangle) *RGBA64"},
+		{"NewUniform", Func, 0, "func(c color.Color) *Uniform"},
+		{"NewYCbCr", Func, 0, "func(r Rectangle, subsampleRatio YCbCrSubsampleRatio) *YCbCr"},
+		{"Opaque", Var, 0, ""},
+		{"Paletted", Type, 0, ""},
+		{"Paletted.Palette", Field, 0, ""},
+		{"Paletted.Pix", Field, 0, ""},
+		{"Paletted.Rect", Field, 0, ""},
+		{"Paletted.Stride", Field, 0, ""},
+		{"PalettedImage", Type, 0, ""},
+		{"Point", Type, 0, ""},
+		{"Point.X", Field, 0, ""},
+		{"Point.Y", Field, 0, ""},
+		{"Pt", Func, 0, "func(X int, Y int) Point"},
+		{"RGBA", Type, 0, ""},
+		{"RGBA.Pix", Field, 0, ""},
+		{"RGBA.Rect", Field, 0, ""},
+		{"RGBA.Stride", Field, 0, ""},
+		{"RGBA64", Type, 0, ""},
+		{"RGBA64.Pix", Field, 0, ""},
+		{"RGBA64.Rect", Field, 0, ""},
+		{"RGBA64.Stride", Field, 0, ""},
+		{"RGBA64Image", Type, 17, ""},
+		{"Rect", Func, 0, "func(x0 int, y0 int, x1 int, y1 int) Rectangle"},
+		{"Rectangle", Type, 0, ""},
+		{"Rectangle.Max", Field, 0, ""},
+		{"Rectangle.Min", Field, 0, ""},
+		{"RegisterFormat", Func, 0, "func(name string, magic string, decode func(io.Reader) (Image, error), decodeConfig func(io.Reader) (Config, error))"},
+		{"Transparent", Var, 0, ""},
+		{"Uniform", Type, 0, ""},
+		{"Uniform.C", Field, 0, ""},
+		{"White", Var, 0, ""},
+		{"YCbCr", Type, 0, ""},
+		{"YCbCr.CStride", Field, 0, ""},
+		{"YCbCr.Cb", Field, 0, ""},
+		{"YCbCr.Cr", Field, 0, ""},
+		{"YCbCr.Rect", Field, 0, ""},
+		{"YCbCr.SubsampleRatio", Field, 0, ""},
+		{"YCbCr.Y", Field, 0, ""},
+		{"YCbCr.YStride", Field, 0, ""},
+		{"YCbCrSubsampleRatio", Type, 0, ""},
+		{"YCbCrSubsampleRatio410", Const, 5, ""},
+		{"YCbCrSubsampleRatio411", Const, 5, ""},
+		{"YCbCrSubsampleRatio420", Const, 0, ""},
+		{"YCbCrSubsampleRatio422", Const, 0, ""},
+		{"YCbCrSubsampleRatio440", Const, 1, ""},
+		{"YCbCrSubsampleRatio444", Const, 0, ""},
+		{"ZP", Var, 0, ""},
+		{"ZR", Var, 0, ""},
+	},
+	"image/color": {
+		{"(Alpha).RGBA", Method, 0, ""},
+		{"(Alpha16).RGBA", Method, 0, ""},
+		{"(CMYK).RGBA", Method, 5, ""},
+		{"(Gray).RGBA", Method, 0, ""},
+		{"(Gray16).RGBA", Method, 0, ""},
+		{"(NRGBA).RGBA", Method, 0, ""},
+		{"(NRGBA64).RGBA", Method, 0, ""},
+		{"(NYCbCrA).RGBA", Method, 6, ""},
+		{"(Palette).Convert", Method, 0, ""},
+		{"(Palette).Index", Method, 0, ""},
+		{"(RGBA).RGBA", Method, 0, ""},
+		{"(RGBA64).RGBA", Method, 0, ""},
+		{"(YCbCr).RGBA", Method, 0, ""},
+		{"Alpha", Type, 0, ""},
+		{"Alpha.A", Field, 0, ""},
+		{"Alpha16", Type, 0, ""},
+		{"Alpha16.A", Field, 0, ""},
+		{"Alpha16Model", Var, 0, ""},
+		{"AlphaModel", Var, 0, ""},
+		{"Black", Var, 0, ""},
+		{"CMYK", Type, 5, ""},
+		{"CMYK.C", Field, 5, ""},
+		{"CMYK.K", Field, 5, ""},
+		{"CMYK.M", Field, 5, ""},
+		{"CMYK.Y", Field, 5, ""},
+		{"CMYKModel", Var, 5, ""},
+		{"CMYKToRGB", Func, 5, "func(c uint8, m uint8, y uint8, k uint8) (uint8, uint8, uint8)"},
+		{"Color", Type, 0, ""},
+		{"Gray", Type, 0, ""},
+		{"Gray.Y", Field, 0, ""},
+		{"Gray16", Type, 0, ""},
+		{"Gray16.Y", Field, 0, ""},
+		{"Gray16Model", Var, 0, ""},
+		{"GrayModel", Var, 0, ""},
+		{"Model", Type, 0, ""},
+		{"ModelFunc", Func, 0, "func(f func(Color) Color) Model"},
+		{"NRGBA", Type, 0, ""},
+		{"NRGBA.A", Field, 0, ""},
+		{"NRGBA.B", Field, 0, ""},
+		{"NRGBA.G", Field, 0, ""},
+		{"NRGBA.R", Field, 0, ""},
+		{"NRGBA64", Type, 0, ""},
+		{"NRGBA64.A", Field, 0, ""},
+		{"NRGBA64.B", Field, 0, ""},
+		{"NRGBA64.G", Field, 0, ""},
+		{"NRGBA64.R", Field, 0, ""},
+		{"NRGBA64Model", Var, 0, ""},
+		{"NRGBAModel", Var, 0, ""},
+		{"NYCbCrA", Type, 6, ""},
+		{"NYCbCrA.A", Field, 6, ""},
+		{"NYCbCrA.YCbCr", Field, 6, ""},
+		{"NYCbCrAModel", Var, 6, ""},
+		{"Opaque", Var, 0, ""},
+		{"Palette", Type, 0, ""},
+		{"RGBA", Type, 0, ""},
+		{"RGBA.A", Field, 0, ""},
+		{"RGBA.B", Field, 0, ""},
+		{"RGBA.G", Field, 0, ""},
+		{"RGBA.R", Field, 0, ""},
+		{"RGBA64", Type, 0, ""},
+		{"RGBA64.A", Field, 0, ""},
+		{"RGBA64.B", Field, 0, ""},
+		{"RGBA64.G", Field, 0, ""},
+		{"RGBA64.R", Field, 0, ""},
+		{"RGBA64Model", Var, 0, ""},
+		{"RGBAModel", Var, 0, ""},
+		{"RGBToCMYK", Func, 5, "func(r uint8, g uint8, b uint8) (uint8, uint8, uint8, uint8)"},
+		{"RGBToYCbCr", Func, 0, "func(r uint8, g uint8, b uint8) (uint8, uint8, uint8)"},
+		{"Transparent", Var, 0, ""},
+		{"White", Var, 0, ""},
+		{"YCbCr", Type, 0, ""},
+		{"YCbCr.Cb", Field, 0, ""},
+		{"YCbCr.Cr", Field, 0, ""},
+		{"YCbCr.Y", Field, 0, ""},
+		{"YCbCrModel", Var, 0, ""},
+		{"YCbCrToRGB", Func, 0, "func(y uint8, cb uint8, cr uint8) (uint8, uint8, uint8)"},
+	},
+	"image/color/palette": {
+		{"Plan9", Var, 2, ""},
+		{"WebSafe", Var, 2, ""},
+	},
+	"image/draw": {
+		{"(Op).Draw", Method, 2, ""},
+		{"Draw", Func, 0, "func(dst Image, r image.Rectangle, src image.Image, sp image.Point, op Op)"},
+		{"DrawMask", Func, 0, "func(dst Image, r image.Rectangle, src image.Image, sp image.Point, mask image.Image, mp image.Point, op Op)"},
+		{"Drawer", Type, 2, ""},
+		{"FloydSteinberg", Var, 2, ""},
+		{"Image", Type, 0, ""},
+		{"Op", Type, 0, ""},
+		{"Over", Const, 0, ""},
+		{"Quantizer", Type, 2, ""},
+		{"RGBA64Image", Type, 17, ""},
+		{"Src", Const, 0, ""},
+	},
+	"image/gif": {
+		{"Decode", Func, 0, "func(r io.Reader) (image.Image, error)"},
+		{"DecodeAll", Func, 0, "func(r io.Reader) (*GIF, error)"},
+		{"DecodeConfig", Func, 0, "func(r io.Reader) (image.Config, error)"},
+		{"DisposalBackground", Const, 5, ""},
+		{"DisposalNone", Const, 5, ""},
+		{"DisposalPrevious", Const, 5, ""},
+		{"Encode", Func, 2, "func(w io.Writer, m image.Image, o *Options) error"},
+		{"EncodeAll", Func, 2, "func(w io.Writer, g *GIF) error"},
+		{"GIF", Type, 0, ""},
+		{"GIF.BackgroundIndex", Field, 5, ""},
+		{"GIF.Config", Field, 5, ""},
+		{"GIF.Delay", Field, 0, ""},
+		{"GIF.Disposal", Field, 5, ""},
+		{"GIF.Image", Field, 0, ""},
+		{"GIF.LoopCount", Field, 0, ""},
+		{"Options", Type, 2, ""},
+		{"Options.Drawer", Field, 2, ""},
+		{"Options.NumColors", Field, 2, ""},
+		{"Options.Quantizer", Field, 2, ""},
+	},
+	"image/jpeg": {
+		{"(FormatError).Error", Method, 0, ""},
+		{"(UnsupportedError).Error", Method, 0, ""},
+		{"Decode", Func, 0, "func(r io.Reader) (image.Image, error)"},
+		{"DecodeConfig", Func, 0, "func(r io.Reader) (image.Config, error)"},
+		{"DefaultQuality", Const, 0, ""},
+		{"Encode", Func, 0, "func(w io.Writer, m image.Image, o *Options) error"},
+		{"FormatError", Type, 0, ""},
+		{"Options", Type, 0, ""},
+		{"Options.Quality", Field, 0, ""},
+		{"Reader", Type, 0, ""},
+		{"UnsupportedError", Type, 0, ""},
+	},
+	"image/png": {
+		{"(*Encoder).Encode", Method, 4, ""},
+		{"(FormatError).Error", Method, 0, ""},
+		{"(UnsupportedError).Error", Method, 0, ""},
+		{"BestCompression", Const, 4, ""},
+		{"BestSpeed", Const, 4, ""},
+		{"CompressionLevel", Type, 4, ""},
+		{"Decode", Func, 0, "func(r io.Reader) (image.Image, error)"},
+		{"DecodeConfig", Func, 0, "func(r io.Reader) (image.Config, error)"},
+		{"DefaultCompression", Const, 4, ""},
+		{"Encode", Func, 0, "func(w io.Writer, m image.Image) error"},
+		{"Encoder", Type, 4, ""},
+		{"Encoder.BufferPool", Field, 9, ""},
+		{"Encoder.CompressionLevel", Field, 4, ""},
+		{"EncoderBuffer", Type, 9, ""},
+		{"EncoderBufferPool", Type, 9, ""},
+		{"FormatError", Type, 0, ""},
+		{"NoCompression", Const, 4, ""},
+		{"UnsupportedError", Type, 0, ""},
+	},
+	"index/suffixarray": {
+		{"(*Index).Bytes", Method, 0, ""},
+		{"(*Index).FindAllIndex", Method, 0, ""},
+		{"(*Index).Lookup", Method, 0, ""},
+		{"(*Index).Read", Method, 0, ""},
+		{"(*Index).Write", Method, 0, ""},
+		{"Index", Type, 0, ""},
+		{"New", Func, 0, "func(data []byte) *Index"},
+	},
+	"io": {
+		{"(*LimitedReader).Read", Method, 0, ""},
+		{"(*OffsetWriter).Seek", Method, 20, ""},
+		{"(*OffsetWriter).Write", Method, 20, ""},
+		{"(*OffsetWriter).WriteAt", Method, 20, ""},
+		{"(*PipeReader).Close", Method, 0, ""},
+		{"(*PipeReader).CloseWithError", Method, 0, ""},
+		{"(*PipeReader).Read", Method, 0, ""},
+		{"(*PipeWriter).Close", Method, 0, ""},
+		{"(*PipeWriter).CloseWithError", Method, 0, ""},
+		{"(*PipeWriter).Write", Method, 0, ""},
+		{"(*SectionReader).Outer", Method, 22, ""},
+		{"(*SectionReader).Read", Method, 0, ""},
+		{"(*SectionReader).ReadAt", Method, 0, ""},
+		{"(*SectionReader).Seek", Method, 0, ""},
+		{"(*SectionReader).Size", Method, 0, ""},
+		{"ByteReader", Type, 0, ""},
+		{"ByteScanner", Type, 0, ""},
+		{"ByteWriter", Type, 1, ""},
+		{"Closer", Type, 0, ""},
+		{"Copy", Func, 0, "func(dst Writer, src Reader) (written int64, err error)"},
+		{"CopyBuffer", Func, 5, "func(dst Writer, src Reader, buf []byte) (written int64, err error)"},
+		{"CopyN", Func, 0, "func(dst Writer, src Reader, n int64) (written int64, err error)"},
+		{"Discard", Var, 16, ""},
+		{"EOF", Var, 0, ""},
+		{"ErrClosedPipe", Var, 0, ""},
+		{"ErrNoProgress", Var, 1, ""},
+		{"ErrShortBuffer", Var, 0, ""},
+		{"ErrShortWrite", Var, 0, ""},
+		{"ErrUnexpectedEOF", Var, 0, ""},
+		{"LimitReader", Func, 0, "func(r Reader, n int64) Reader"},
+		{"LimitedReader", Type, 0, ""},
+		{"LimitedReader.N", Field, 0, ""},
+		{"LimitedReader.R", Field, 0, ""},
+		{"MultiReader", Func, 0, "func(readers ...Reader) Reader"},
+		{"MultiWriter", Func, 0, "func(writers ...Writer) Writer"},
+		{"NewOffsetWriter", Func, 20, "func(w WriterAt, off int64) *OffsetWriter"},
+		{"NewSectionReader", Func, 0, "func(r ReaderAt, off int64, n int64) *SectionReader"},
+		{"NopCloser", Func, 16, "func(r Reader) ReadCloser"},
+		{"OffsetWriter", Type, 20, ""},
+		{"Pipe", Func, 0, "func() (*PipeReader, *PipeWriter)"},
+		{"PipeReader", Type, 0, ""},
+		{"PipeWriter", Type, 0, ""},
+		{"ReadAll", Func, 16, "func(r Reader) ([]byte, error)"},
+		{"ReadAtLeast", Func, 0, "func(r Reader, buf []byte, min int) (n int, err error)"},
+		{"ReadCloser", Type, 0, ""},
+		{"ReadFull", Func, 0, "func(r Reader, buf []byte) (n int, err error)"},
+		{"ReadSeekCloser", Type, 16, ""},
+		{"ReadSeeker", Type, 0, ""},
+		{"ReadWriteCloser", Type, 0, ""},
+		{"ReadWriteSeeker", Type, 0, ""},
+		{"ReadWriter", Type, 0, ""},
+		{"Reader", Type, 0, ""},
+		{"ReaderAt", Type, 0, ""},
+		{"ReaderFrom", Type, 0, ""},
+		{"RuneReader", Type, 0, ""},
+		{"RuneScanner", Type, 0, ""},
+		{"SectionReader", Type, 0, ""},
+		{"SeekCurrent", Const, 7, ""},
+		{"SeekEnd", Const, 7, ""},
+		{"SeekStart", Const, 7, ""},
+		{"Seeker", Type, 0, ""},
+		{"StringWriter", Type, 12, ""},
+		{"TeeReader", Func, 0, "func(r Reader, w Writer) Reader"},
+		{"WriteCloser", Type, 0, ""},
+		{"WriteSeeker", Type, 0, ""},
+		{"WriteString", Func, 0, "func(w Writer, s string) (n int, err error)"},
+		{"Writer", Type, 0, ""},
+		{"WriterAt", Type, 0, ""},
+		{"WriterTo", Type, 0, ""},
+	},
+	"io/fs": {
+		{"(*PathError).Error", Method, 16, ""},
+		{"(*PathError).Timeout", Method, 16, ""},
+		{"(*PathError).Unwrap", Method, 16, ""},
+		{"(FileMode).IsDir", Method, 16, ""},
+		{"(FileMode).IsRegular", Method, 16, ""},
+		{"(FileMode).Perm", Method, 16, ""},
+		{"(FileMode).String", Method, 16, ""},
+		{"(FileMode).Type", Method, 16, ""},
+		{"DirEntry", Type, 16, ""},
+		{"ErrClosed", Var, 16, ""},
+		{"ErrExist", Var, 16, ""},
+		{"ErrInvalid", Var, 16, ""},
+		{"ErrNotExist", Var, 16, ""},
+		{"ErrPermission", Var, 16, ""},
+		{"FS", Type, 16, ""},
+		{"File", Type, 16, ""},
+		{"FileInfo", Type, 16, ""},
+		{"FileInfoToDirEntry", Func, 17, "func(info FileInfo) DirEntry"},
+		{"FileMode", Type, 16, ""},
+		{"FormatDirEntry", Func, 21, "func(dir DirEntry) string"},
+		{"FormatFileInfo", Func, 21, "func(info FileInfo) string"},
+		{"Glob", Func, 16, "func(fsys FS, pattern string) (matches []string, err error)"},
+		{"GlobFS", Type, 16, ""},
+		{"Lstat", Func, 25, "func(fsys FS, name string) (FileInfo, error)"},
+		{"ModeAppend", Const, 16, ""},
+		{"ModeCharDevice", Const, 16, ""},
+		{"ModeDevice", Const, 16, ""},
+		{"ModeDir", Const, 16, ""},
+		{"ModeExclusive", Const, 16, ""},
+		{"ModeIrregular", Const, 16, ""},
+		{"ModeNamedPipe", Const, 16, ""},
+		{"ModePerm", Const, 16, ""},
+		{"ModeSetgid", Const, 16, ""},
+		{"ModeSetuid", Const, 16, ""},
+		{"ModeSocket", Const, 16, ""},
+		{"ModeSticky", Const, 16, ""},
+		{"ModeSymlink", Const, 16, ""},
+		{"ModeTemporary", Const, 16, ""},
+		{"ModeType", Const, 16, ""},
+		{"PathError", Type, 16, ""},
+		{"PathError.Err", Field, 16, ""},
+		{"PathError.Op", Field, 16, ""},
+		{"PathError.Path", Field, 16, ""},
+		{"ReadDir", Func, 16, "func(fsys FS, name string) ([]DirEntry, error)"},
+		{"ReadDirFS", Type, 16, ""},
+		{"ReadDirFile", Type, 16, ""},
+		{"ReadFile", Func, 16, "func(fsys FS, name string) ([]byte, error)"},
+		{"ReadFileFS", Type, 16, ""},
+		{"ReadLink", Func, 25, "func(fsys FS, name string) (string, error)"},
+		{"ReadLinkFS", Type, 25, ""},
+		{"SkipAll", Var, 20, ""},
+		{"SkipDir", Var, 16, ""},
+		{"Stat", Func, 16, "func(fsys FS, name string) (FileInfo, error)"},
+		{"StatFS", Type, 16, ""},
+		{"Sub", Func, 16, "func(fsys FS, dir string) (FS, error)"},
+		{"SubFS", Type, 16, ""},
+		{"ValidPath", Func, 16, "func(name string) bool"},
+		{"WalkDir", Func, 16, "func(fsys FS, root string, fn WalkDirFunc) error"},
+		{"WalkDirFunc", Type, 16, ""},
+	},
+	"io/ioutil": {
+		{"Discard", Var, 0, ""},
+		{"NopCloser", Func, 0, "func(r io.Reader) io.ReadCloser"},
+		{"ReadAll", Func, 0, "func(r io.Reader) ([]byte, error)"},
+		{"ReadDir", Func, 0, "func(dirname string) ([]fs.FileInfo, error)"},
+		{"ReadFile", Func, 0, "func(filename string) ([]byte, error)"},
+		{"TempDir", Func, 0, "func(dir string, pattern string) (name string, err error)"},
+		{"TempFile", Func, 0, "func(dir string, pattern string) (f *os.File, err error)"},
+		{"WriteFile", Func, 0, "func(filename string, data []byte, perm fs.FileMode) error"},
+	},
+	"iter": {
+		{"Pull", Func, 23, "func[V any](seq Seq[V]) (next func() (V, bool), stop func())"},
+		{"Pull2", Func, 23, "func[K, V any](seq Seq2[K, V]) (next func() (K, V, bool), stop func())"},
+		{"Seq", Type, 23, ""},
+		{"Seq2", Type, 23, ""},
+	},
+	"log": {
+		{"(*Logger).Fatal", Method, 0, ""},
+		{"(*Logger).Fatalf", Method, 0, ""},
+		{"(*Logger).Fatalln", Method, 0, ""},
+		{"(*Logger).Flags", Method, 0, ""},
+		{"(*Logger).Output", Method, 0, ""},
+		{"(*Logger).Panic", Method, 0, ""},
+		{"(*Logger).Panicf", Method, 0, ""},
+		{"(*Logger).Panicln", Method, 0, ""},
+		{"(*Logger).Prefix", Method, 0, ""},
+		{"(*Logger).Print", Method, 0, ""},
+		{"(*Logger).Printf", Method, 0, ""},
+		{"(*Logger).Println", Method, 0, ""},
+		{"(*Logger).SetFlags", Method, 0, ""},
+		{"(*Logger).SetOutput", Method, 5, ""},
+		{"(*Logger).SetPrefix", Method, 0, ""},
+		{"(*Logger).Writer", Method, 12, ""},
+		{"Default", Func, 16, "func() *Logger"},
+		{"Fatal", Func, 0, "func(v ...any)"},
+		{"Fatalf", Func, 0, "func(format string, v ...any)"},
+		{"Fatalln", Func, 0, "func(v ...any)"},
+		{"Flags", Func, 0, "func() int"},
+		{"LUTC", Const, 5, ""},
+		{"Ldate", Const, 0, ""},
+		{"Llongfile", Const, 0, ""},
+		{"Lmicroseconds", Const, 0, ""},
+		{"Lmsgprefix", Const, 14, ""},
+		{"Logger", Type, 0, ""},
+		{"Lshortfile", Const, 0, ""},
+		{"LstdFlags", Const, 0, ""},
+		{"Ltime", Const, 0, ""},
+		{"New", Func, 0, "func(out io.Writer, prefix string, flag int) *Logger"},
+		{"Output", Func, 5, "func(calldepth int, s string) error"},
+		{"Panic", Func, 0, "func(v ...any)"},
+		{"Panicf", Func, 0, "func(format string, v ...any)"},
+		{"Panicln", Func, 0, "func(v ...any)"},
+		{"Prefix", Func, 0, "func() string"},
+		{"Print", Func, 0, "func(v ...any)"},
+		{"Printf", Func, 0, "func(format string, v ...any)"},
+		{"Println", Func, 0, "func(v ...any)"},
+		{"SetFlags", Func, 0, "func(flag int)"},
+		{"SetOutput", Func, 0, "func(w io.Writer)"},
+		{"SetPrefix", Func, 0, "func(prefix string)"},
+		{"Writer", Func, 13, "func() io.Writer"},
+	},
+	"log/slog": {
+		{"(*JSONHandler).Enabled", Method, 21, ""},
+		{"(*JSONHandler).Handle", Method, 21, ""},
+		{"(*JSONHandler).WithAttrs", Method, 21, ""},
+		{"(*JSONHandler).WithGroup", Method, 21, ""},
+		{"(*Level).UnmarshalJSON", Method, 21, ""},
+		{"(*Level).UnmarshalText", Method, 21, ""},
+		{"(*LevelVar).AppendText", Method, 24, ""},
+		{"(*LevelVar).Level", Method, 21, ""},
+		{"(*LevelVar).MarshalText", Method, 21, ""},
+		{"(*LevelVar).Set", Method, 21, ""},
+		{"(*LevelVar).String", Method, 21, ""},
+		{"(*LevelVar).UnmarshalText", Method, 21, ""},
+		{"(*Logger).Debug", Method, 21, ""},
+		{"(*Logger).DebugContext", Method, 21, ""},
+		{"(*Logger).Enabled", Method, 21, ""},
+		{"(*Logger).Error", Method, 21, ""},
+		{"(*Logger).ErrorContext", Method, 21, ""},
+		{"(*Logger).Handler", Method, 21, ""},
+		{"(*Logger).Info", Method, 21, ""},
+		{"(*Logger).InfoContext", Method, 21, ""},
+		{"(*Logger).Log", Method, 21, ""},
+		{"(*Logger).LogAttrs", Method, 21, ""},
+		{"(*Logger).Warn", Method, 21, ""},
+		{"(*Logger).WarnContext", Method, 21, ""},
+		{"(*Logger).With", Method, 21, ""},
+		{"(*Logger).WithGroup", Method, 21, ""},
+		{"(*MultiHandler).Enabled", Method, 26, ""},
+		{"(*MultiHandler).Handle", Method, 26, ""},
+		{"(*MultiHandler).WithAttrs", Method, 26, ""},
+		{"(*MultiHandler).WithGroup", Method, 26, ""},
+		{"(*Record).Add", Method, 21, ""},
+		{"(*Record).AddAttrs", Method, 21, ""},
+		{"(*TextHandler).Enabled", Method, 21, ""},
+		{"(*TextHandler).Handle", Method, 21, ""},
+		{"(*TextHandler).WithAttrs", Method, 21, ""},
+		{"(*TextHandler).WithGroup", Method, 21, ""},
+		{"(Attr).Equal", Method, 21, ""},
+		{"(Attr).String", Method, 21, ""},
+		{"(Kind).String", Method, 21, ""},
+		{"(Level).AppendText", Method, 24, ""},
+		{"(Level).Level", Method, 21, ""},
+		{"(Level).MarshalJSON", Method, 21, ""},
+		{"(Level).MarshalText", Method, 21, ""},
+		{"(Level).String", Method, 21, ""},
+		{"(Record).Attrs", Method, 21, ""},
+		{"(Record).Clone", Method, 21, ""},
+		{"(Record).NumAttrs", Method, 21, ""},
+		{"(Record).Source", Method, 25, ""},
+		{"(Value).Any", Method, 21, ""},
+		{"(Value).Bool", Method, 21, ""},
+		{"(Value).Duration", Method, 21, ""},
+		{"(Value).Equal", Method, 21, ""},
+		{"(Value).Float64", Method, 21, ""},
+		{"(Value).Group", Method, 21, ""},
+		{"(Value).Int64", Method, 21, ""},
+		{"(Value).Kind", Method, 21, ""},
+		{"(Value).LogValuer", Method, 21, ""},
+		{"(Value).Resolve", Method, 21, ""},
+		{"(Value).String", Method, 21, ""},
+		{"(Value).Time", Method, 21, ""},
+		{"(Value).Uint64", Method, 21, ""},
+		{"Any", Func, 21, "func(key string, value any) Attr"},
+		{"AnyValue", Func, 21, "func(v any) Value"},
+		{"Attr", Type, 21, ""},
+		{"Attr.Key", Field, 21, ""},
+		{"Attr.Value", Field, 21, ""},
+		{"Bool", Func, 21, "func(key string, v bool) Attr"},
+		{"BoolValue", Func, 21, "func(v bool) Value"},
+		{"Debug", Func, 21, "func(msg string, args ...any)"},
+		{"DebugContext", Func, 21, "func(ctx context.Context, msg string, args ...any)"},
+		{"Default", Func, 21, "func() *Logger"},
+		{"DiscardHandler", Var, 24, ""},
+		{"Duration", Func, 21, "func(key string, v time.Duration) Attr"},
+		{"DurationValue", Func, 21, "func(v time.Duration) Value"},
+		{"Error", Func, 21, "func(msg string, args ...any)"},
+		{"ErrorContext", Func, 21, "func(ctx context.Context, msg string, args ...any)"},
+		{"Float64", Func, 21, "func(key string, v float64) Attr"},
+		{"Float64Value", Func, 21, "func(v float64) Value"},
+		{"Group", Func, 21, "func(key string, args ...any) Attr"},
+		{"GroupAttrs", Func, 25, "func(key string, attrs ...Attr) Attr"},
+		{"GroupValue", Func, 21, "func(as ...Attr) Value"},
+		{"Handler", Type, 21, ""},
+		{"HandlerOptions", Type, 21, ""},
+		{"HandlerOptions.AddSource", Field, 21, ""},
+		{"HandlerOptions.Level", Field, 21, ""},
+		{"HandlerOptions.ReplaceAttr", Field, 21, ""},
+		{"Info", Func, 21, "func(msg string, args ...any)"},
+		{"InfoContext", Func, 21, "func(ctx context.Context, msg string, args ...any)"},
+		{"Int", Func, 21, "func(key string, value int) Attr"},
+		{"Int64", Func, 21, "func(key string, value int64) Attr"},
+		{"Int64Value", Func, 21, "func(v int64) Value"},
+		{"IntValue", Func, 21, "func(v int) Value"},
+		{"JSONHandler", Type, 21, ""},
+		{"Kind", Type, 21, ""},
+		{"KindAny", Const, 21, ""},
+		{"KindBool", Const, 21, ""},
+		{"KindDuration", Const, 21, ""},
+		{"KindFloat64", Const, 21, ""},
+		{"KindGroup", Const, 21, ""},
+		{"KindInt64", Const, 21, ""},
+		{"KindLogValuer", Const, 21, ""},
+		{"KindString", Const, 21, ""},
+		{"KindTime", Const, 21, ""},
+		{"KindUint64", Const, 21, ""},
+		{"Level", Type, 21, ""},
+		{"LevelDebug", Const, 21, ""},
+		{"LevelError", Const, 21, ""},
+		{"LevelInfo", Const, 21, ""},
+		{"LevelKey", Const, 21, ""},
+		{"LevelVar", Type, 21, ""},
+		{"LevelWarn", Const, 21, ""},
+		{"Leveler", Type, 21, ""},
+		{"Log", Func, 21, "func(ctx context.Context, level Level, msg string, args ...any)"},
+		{"LogAttrs", Func, 21, "func(ctx context.Context, level Level, msg string, attrs ...Attr)"},
+		{"LogValuer", Type, 21, ""},
+		{"Logger", Type, 21, ""},
+		{"MessageKey", Const, 21, ""},
+		{"MultiHandler", Type, 26, ""},
+		{"New", Func, 21, "func(h Handler) *Logger"},
+		{"NewJSONHandler", Func, 21, "func(w io.Writer, opts *HandlerOptions) *JSONHandler"},
+		{"NewLogLogger", Func, 21, "func(h Handler, level Level) *log.Logger"},
+		{"NewMultiHandler", Func, 26, "func(handlers ...Handler) *MultiHandler"},
+		{"NewRecord", Func, 21, "func(t time.Time, level Level, msg string, pc uintptr) Record"},
+		{"NewTextHandler", Func, 21, "func(w io.Writer, opts *HandlerOptions) *TextHandler"},
+		{"Record", Type, 21, ""},
+		{"Record.Level", Field, 21, ""},
+		{"Record.Message", Field, 21, ""},
+		{"Record.PC", Field, 21, ""},
+		{"Record.Time", Field, 21, ""},
+		{"SetDefault", Func, 21, "func(l *Logger)"},
+		{"SetLogLoggerLevel", Func, 22, "func(level Level) (oldLevel Level)"},
+		{"Source", Type, 21, ""},
+		{"Source.File", Field, 21, ""},
+		{"Source.Function", Field, 21, ""},
+		{"Source.Line", Field, 21, ""},
+		{"SourceKey", Const, 21, ""},
+		{"String", Func, 21, "func(key string, value string) Attr"},
+		{"StringValue", Func, 21, "func(value string) Value"},
+		{"TextHandler", Type, 21, ""},
+		{"Time", Func, 21, "func(key string, v time.Time) Attr"},
+		{"TimeKey", Const, 21, ""},
+		{"TimeValue", Func, 21, "func(v time.Time) Value"},
+		{"Uint64", Func, 21, "func(key string, v uint64) Attr"},
+		{"Uint64Value", Func, 21, "func(v uint64) Value"},
+		{"Value", Type, 21, ""},
+		{"Warn", Func, 21, "func(msg string, args ...any)"},
+		{"WarnContext", Func, 21, "func(ctx context.Context, msg string, args ...any)"},
+		{"With", Func, 21, "func(args ...any) *Logger"},
+	},
+	"log/syslog": {
+		{"(*Writer).Alert", Method, 0, ""},
+		{"(*Writer).Close", Method, 0, ""},
+		{"(*Writer).Crit", Method, 0, ""},
+		{"(*Writer).Debug", Method, 0, ""},
+		{"(*Writer).Emerg", Method, 0, ""},
+		{"(*Writer).Err", Method, 0, ""},
+		{"(*Writer).Info", Method, 0, ""},
+		{"(*Writer).Notice", Method, 0, ""},
+		{"(*Writer).Warning", Method, 0, ""},
+		{"(*Writer).Write", Method, 0, ""},
+		{"Dial", Func, 0, "func(network string, raddr string, priority Priority, tag string) (*Writer, error)"},
+		{"LOG_ALERT", Const, 0, ""},
+		{"LOG_AUTH", Const, 1, ""},
+		{"LOG_AUTHPRIV", Const, 1, ""},
+		{"LOG_CRIT", Const, 0, ""},
+		{"LOG_CRON", Const, 1, ""},
+		{"LOG_DAEMON", Const, 1, ""},
+		{"LOG_DEBUG", Const, 0, ""},
+		{"LOG_EMERG", Const, 0, ""},
+		{"LOG_ERR", Const, 0, ""},
+		{"LOG_FTP", Const, 1, ""},
+		{"LOG_INFO", Const, 0, ""},
+		{"LOG_KERN", Const, 1, ""},
+		{"LOG_LOCAL0", Const, 1, ""},
+		{"LOG_LOCAL1", Const, 1, ""},
+		{"LOG_LOCAL2", Const, 1, ""},
+		{"LOG_LOCAL3", Const, 1, ""},
+		{"LOG_LOCAL4", Const, 1, ""},
+		{"LOG_LOCAL5", Const, 1, ""},
+		{"LOG_LOCAL6", Const, 1, ""},
+		{"LOG_LOCAL7", Const, 1, ""},
+		{"LOG_LPR", Const, 1, ""},
+		{"LOG_MAIL", Const, 1, ""},
+		{"LOG_NEWS", Const, 1, ""},
+		{"LOG_NOTICE", Const, 0, ""},
+		{"LOG_SYSLOG", Const, 1, ""},
+		{"LOG_USER", Const, 1, ""},
+		{"LOG_UUCP", Const, 1, ""},
+		{"LOG_WARNING", Const, 0, ""},
+		{"New", Func, 0, "func(priority Priority, tag string) (*Writer, error)"},
+		{"NewLogger", Func, 0, "func(p Priority, logFlag int) (*log.Logger, error)"},
+		{"Priority", Type, 0, ""},
+		{"Writer", Type, 0, ""},
+	},
+	"maps": {
+		{"All", Func, 23, "func[Map ~map[K]V, K comparable, V any](m Map) iter.Seq2[K, V]"},
+		{"Clone", Func, 21, "func[M ~map[K]V, K comparable, V any](m M) M"},
+		{"Collect", Func, 23, "func[K comparable, V any](seq iter.Seq2[K, V]) map[K]V"},
+		{"Copy", Func, 21, "func[M1 ~map[K]V, M2 ~map[K]V, K comparable, V any](dst M1, src M2)"},
+		{"DeleteFunc", Func, 21, "func[M ~map[K]V, K comparable, V any](m M, del func(K, V) bool)"},
+		{"Equal", Func, 21, "func[M1, M2 ~map[K]V, K, V comparable](m1 M1, m2 M2) bool"},
+		{"EqualFunc", Func, 21, "func[M1 ~map[K]V1, M2 ~map[K]V2, K comparable, V1, V2 any](m1 M1, m2 M2, eq func(V1, V2) bool) bool"},
+		{"Insert", Func, 23, "func[Map ~map[K]V, K comparable, V any](m Map, seq iter.Seq2[K, V])"},
+		{"Keys", Func, 23, "func[Map ~map[K]V, K comparable, V any](m Map) iter.Seq[K]"},
+		{"Values", Func, 23, "func[Map ~map[K]V, K comparable, V any](m Map) iter.Seq[V]"},
+	},
+	"math": {
+		{"Abs", Func, 0, "func(x float64) float64"},
+		{"Acos", Func, 0, "func(x float64) float64"},
+		{"Acosh", Func, 0, "func(x float64) float64"},
+		{"Asin", Func, 0, "func(x float64) float64"},
+		{"Asinh", Func, 0, "func(x float64) float64"},
+		{"Atan", Func, 0, "func(x float64) float64"},
+		{"Atan2", Func, 0, "func(y float64, x float64) float64"},
+		{"Atanh", Func, 0, "func(x float64) float64"},
+		{"Cbrt", Func, 0, "func(x float64) float64"},
+		{"Ceil", Func, 0, "func(x float64) float64"},
+		{"Copysign", Func, 0, "func(f float64, sign float64) float64"},
+		{"Cos", Func, 0, "func(x float64) float64"},
+		{"Cosh", Func, 0, "func(x float64) float64"},
+		{"Dim", Func, 0, "func(x float64, y float64) float64"},
+		{"E", Const, 0, ""},
+		{"Erf", Func, 0, "func(x float64) float64"},
+		{"Erfc", Func, 0, "func(x float64) float64"},
+		{"Erfcinv", Func, 10, "func(x float64) float64"},
+		{"Erfinv", Func, 10, "func(x float64) float64"},
+		{"Exp", Func, 0, "func(x float64) float64"},
+		{"Exp2", Func, 0, "func(x float64) float64"},
+		{"Expm1", Func, 0, "func(x float64) float64"},
+		{"FMA", Func, 14, "func(x float64, y float64, z float64) float64"},
+		{"Float32bits", Func, 0, "func(f float32) uint32"},
+		{"Float32frombits", Func, 0, "func(b uint32) float32"},
+		{"Float64bits", Func, 0, "func(f float64) uint64"},
+		{"Float64frombits", Func, 0, "func(b uint64) float64"},
+		{"Floor", Func, 0, "func(x float64) float64"},
+		{"Frexp", Func, 0, "func(f float64) (frac float64, exp int)"},
+		{"Gamma", Func, 0, "func(x float64) float64"},
+		{"Hypot", Func, 0, "func(p float64, q float64) float64"},
+		{"Ilogb", Func, 0, "func(x float64) int"},
+		{"Inf", Func, 0, "func(sign int) float64"},
+		{"IsInf", Func, 0, "func(f float64, sign int) bool"},
+		{"IsNaN", Func, 0, "func(f float64) (is bool)"},
+		{"J0", Func, 0, "func(x float64) float64"},
+		{"J1", Func, 0, "func(x float64) float64"},
+		{"Jn", Func, 0, "func(n int, x float64) float64"},
+		{"Ldexp", Func, 0, "func(frac float64, exp int) float64"},
+		{"Lgamma", Func, 0, "func(x float64) (lgamma float64, sign int)"},
+		{"Ln10", Const, 0, ""},
+		{"Ln2", Const, 0, ""},
+		{"Log", Func, 0, "func(x float64) float64"},
+		{"Log10", Func, 0, "func(x float64) float64"},
+		{"Log10E", Const, 0, ""},
+		{"Log1p", Func, 0, "func(x float64) float64"},
+		{"Log2", Func, 0, "func(x float64) float64"},
+		{"Log2E", Const, 0, ""},
+		{"Logb", Func, 0, "func(x float64) float64"},
+		{"Max", Func, 0, "func(x float64, y float64) float64"},
+		{"MaxFloat32", Const, 0, ""},
+		{"MaxFloat64", Const, 0, ""},
+		{"MaxInt", Const, 17, ""},
+		{"MaxInt16", Const, 0, ""},
+		{"MaxInt32", Const, 0, ""},
+		{"MaxInt64", Const, 0, ""},
+		{"MaxInt8", Const, 0, ""},
+		{"MaxUint", Const, 17, ""},
+		{"MaxUint16", Const, 0, ""},
+		{"MaxUint32", Const, 0, ""},
+		{"MaxUint64", Const, 0, ""},
+		{"MaxUint8", Const, 0, ""},
+		{"Min", Func, 0, "func(x float64, y float64) float64"},
+		{"MinInt", Const, 17, ""},
+		{"MinInt16", Const, 0, ""},
+		{"MinInt32", Const, 0, ""},
+		{"MinInt64", Const, 0, ""},
+		{"MinInt8", Const, 0, ""},
+		{"Mod", Func, 0, "func(x float64, y float64) float64"},
+		{"Modf", Func, 0, "func(f float64) (integer float64, fractional float64)"},
+		{"NaN", Func, 0, "func() float64"},
+		{"Nextafter", Func, 0, "func(x float64, y float64) (r float64)"},
+		{"Nextafter32", Func, 4, "func(x float32, y float32) (r float32)"},
+		{"Phi", Const, 0, ""},
+		{"Pi", Const, 0, ""},
+		{"Pow", Func, 0, "func(x float64, y float64) float64"},
+		{"Pow10", Func, 0, "func(n int) float64"},
+		{"Remainder", Func, 0, "func(x float64, y float64) float64"},
+		{"Round", Func, 10, "func(x float64) float64"},
+		{"RoundToEven", Func, 10, "func(x float64) float64"},
+		{"Signbit", Func, 0, "func(x float64) bool"},
+		{"Sin", Func, 0, "func(x float64) float64"},
+		{"Sincos", Func, 0, "func(x float64) (sin float64, cos float64)"},
+		{"Sinh", Func, 0, "func(x float64) float64"},
+		{"SmallestNonzeroFloat32", Const, 0, ""},
+		{"SmallestNonzeroFloat64", Const, 0, ""},
+		{"Sqrt", Func, 0, "func(x float64) float64"},
+		{"Sqrt2", Const, 0, ""},
+		{"SqrtE", Const, 0, ""},
+		{"SqrtPhi", Const, 0, ""},
+		{"SqrtPi", Const, 0, ""},
+		{"Tan", Func, 0, "func(x float64) float64"},
+		{"Tanh", Func, 0, "func(x float64) float64"},
+		{"Trunc", Func, 0, "func(x float64) float64"},
+		{"Y0", Func, 0, "func(x float64) float64"},
+		{"Y1", Func, 0, "func(x float64) float64"},
+		{"Yn", Func, 0, "func(n int, x float64) float64"},
+	},
+	"math/big": {
+		{"(*Float).Abs", Method, 5, ""},
+		{"(*Float).Acc", Method, 5, ""},
+		{"(*Float).Add", Method, 5, ""},
+		{"(*Float).Append", Method, 5, ""},
+		{"(*Float).AppendText", Method, 24, ""},
+		{"(*Float).Cmp", Method, 5, ""},
+		{"(*Float).Copy", Method, 5, ""},
+		{"(*Float).Float32", Method, 5, ""},
+		{"(*Float).Float64", Method, 5, ""},
+		{"(*Float).Format", Method, 5, ""},
+		{"(*Float).GobDecode", Method, 7, ""},
+		{"(*Float).GobEncode", Method, 7, ""},
+		{"(*Float).Int", Method, 5, ""},
+		{"(*Float).Int64", Method, 5, ""},
+		{"(*Float).IsInf", Method, 5, ""},
+		{"(*Float).IsInt", Method, 5, ""},
+		{"(*Float).MantExp", Method, 5, ""},
+		{"(*Float).MarshalText", Method, 6, ""},
+		{"(*Float).MinPrec", Method, 5, ""},
+		{"(*Float).Mode", Method, 5, ""},
+		{"(*Float).Mul", Method, 5, ""},
+		{"(*Float).Neg", Method, 5, ""},
+		{"(*Float).Parse", Method, 5, ""},
+		{"(*Float).Prec", Method, 5, ""},
+		{"(*Float).Quo", Method, 5, ""},
+		{"(*Float).Rat", Method, 5, ""},
+		{"(*Float).Scan", Method, 8, ""},
+		{"(*Float).Set", Method, 5, ""},
+		{"(*Float).SetFloat64", Method, 5, ""},
+		{"(*Float).SetInf", Method, 5, ""},
+		{"(*Float).SetInt", Method, 5, ""},
+		{"(*Float).SetInt64", Method, 5, ""},
+		{"(*Float).SetMantExp", Method, 5, ""},
+		{"(*Float).SetMode", Method, 5, ""},
+		{"(*Float).SetPrec", Method, 5, ""},
+		{"(*Float).SetRat", Method, 5, ""},
+		{"(*Float).SetString", Method, 5, ""},
+		{"(*Float).SetUint64", Method, 5, ""},
+		{"(*Float).Sign", Method, 5, ""},
+		{"(*Float).Signbit", Method, 5, ""},
+		{"(*Float).Sqrt", Method, 10, ""},
+		{"(*Float).String", Method, 5, ""},
+		{"(*Float).Sub", Method, 5, ""},
+		{"(*Float).Text", Method, 5, ""},
+		{"(*Float).Uint64", Method, 5, ""},
+		{"(*Float).UnmarshalText", Method, 6, ""},
+		{"(*Int).Abs", Method, 0, ""},
+		{"(*Int).Add", Method, 0, ""},
+		{"(*Int).And", Method, 0, ""},
+		{"(*Int).AndNot", Method, 0, ""},
+		{"(*Int).Append", Method, 6, ""},
+		{"(*Int).AppendText", Method, 24, ""},
+		{"(*Int).Binomial", Method, 0, ""},
+		{"(*Int).Bit", Method, 0, ""},
+		{"(*Int).BitLen", Method, 0, ""},
+		{"(*Int).Bits", Method, 0, ""},
+		{"(*Int).Bytes", Method, 0, ""},
+		{"(*Int).Cmp", Method, 0, ""},
+		{"(*Int).CmpAbs", Method, 10, ""},
+		{"(*Int).Div", Method, 0, ""},
+		{"(*Int).DivMod", Method, 0, ""},
+		{"(*Int).Exp", Method, 0, ""},
+		{"(*Int).FillBytes", Method, 15, ""},
+		{"(*Int).Float64", Method, 21, ""},
+		{"(*Int).Format", Method, 0, ""},
+		{"(*Int).GCD", Method, 0, ""},
+		{"(*Int).GobDecode", Method, 0, ""},
+		{"(*Int).GobEncode", Method, 0, ""},
+		{"(*Int).Int64", Method, 0, ""},
+		{"(*Int).IsInt64", Method, 9, ""},
+		{"(*Int).IsUint64", Method, 9, ""},
+		{"(*Int).Lsh", Method, 0, ""},
+		{"(*Int).MarshalJSON", Method, 1, ""},
+		{"(*Int).MarshalText", Method, 3, ""},
+		{"(*Int).Mod", Method, 0, ""},
+		{"(*Int).ModInverse", Method, 0, ""},
+		{"(*Int).ModSqrt", Method, 5, ""},
+		{"(*Int).Mul", Method, 0, ""},
+		{"(*Int).MulRange", Method, 0, ""},
+		{"(*Int).Neg", Method, 0, ""},
+		{"(*Int).Not", Method, 0, ""},
+		{"(*Int).Or", Method, 0, ""},
+		{"(*Int).ProbablyPrime", Method, 0, ""},
+		{"(*Int).Quo", Method, 0, ""},
+		{"(*Int).QuoRem", Method, 0, ""},
+		{"(*Int).Rand", Method, 0, ""},
+		{"(*Int).Rem", Method, 0, ""},
+		{"(*Int).Rsh", Method, 0, ""},
+		{"(*Int).Scan", Method, 0, ""},
+		{"(*Int).Set", Method, 0, ""},
+		{"(*Int).SetBit", Method, 0, ""},
+		{"(*Int).SetBits", Method, 0, ""},
+		{"(*Int).SetBytes", Method, 0, ""},
+		{"(*Int).SetInt64", Method, 0, ""},
+		{"(*Int).SetString", Method, 0, ""},
+		{"(*Int).SetUint64", Method, 1, ""},
+		{"(*Int).Sign", Method, 0, ""},
+		{"(*Int).Sqrt", Method, 8, ""},
+		{"(*Int).String", Method, 0, ""},
+		{"(*Int).Sub", Method, 0, ""},
+		{"(*Int).Text", Method, 6, ""},
+		{"(*Int).TrailingZeroBits", Method, 13, ""},
+		{"(*Int).Uint64", Method, 1, ""},
+		{"(*Int).UnmarshalJSON", Method, 1, ""},
+		{"(*Int).UnmarshalText", Method, 3, ""},
+		{"(*Int).Xor", Method, 0, ""},
+		{"(*Rat).Abs", Method, 0, ""},
+		{"(*Rat).Add", Method, 0, ""},
+		{"(*Rat).AppendText", Method, 24, ""},
+		{"(*Rat).Cmp", Method, 0, ""},
+		{"(*Rat).Denom", Method, 0, ""},
+		{"(*Rat).Float32", Method, 4, ""},
+		{"(*Rat).Float64", Method, 1, ""},
+		{"(*Rat).FloatPrec", Method, 22, ""},
+		{"(*Rat).FloatString", Method, 0, ""},
+		{"(*Rat).GobDecode", Method, 0, ""},
+		{"(*Rat).GobEncode", Method, 0, ""},
+		{"(*Rat).Inv", Method, 0, ""},
+		{"(*Rat).IsInt", Method, 0, ""},
+		{"(*Rat).MarshalText", Method, 3, ""},
+		{"(*Rat).Mul", Method, 0, ""},
+		{"(*Rat).Neg", Method, 0, ""},
+		{"(*Rat).Num", Method, 0, ""},
+		{"(*Rat).Quo", Method, 0, ""},
+		{"(*Rat).RatString", Method, 0, ""},
+		{"(*Rat).Scan", Method, 0, ""},
+		{"(*Rat).Set", Method, 0, ""},
+		{"(*Rat).SetFloat64", Method, 1, ""},
+		{"(*Rat).SetFrac", Method, 0, ""},
+		{"(*Rat).SetFrac64", Method, 0, ""},
+		{"(*Rat).SetInt", Method, 0, ""},
+		{"(*Rat).SetInt64", Method, 0, ""},
+		{"(*Rat).SetString", Method, 0, ""},
+		{"(*Rat).SetUint64", Method, 13, ""},
+		{"(*Rat).Sign", Method, 0, ""},
+		{"(*Rat).String", Method, 0, ""},
+		{"(*Rat).Sub", Method, 0, ""},
+		{"(*Rat).UnmarshalText", Method, 3, ""},
+		{"(Accuracy).String", Method, 5, ""},
+		{"(ErrNaN).Error", Method, 5, ""},
+		{"(RoundingMode).String", Method, 5, ""},
+		{"Above", Const, 5, ""},
+		{"Accuracy", Type, 5, ""},
+		{"AwayFromZero", Const, 5, ""},
+		{"Below", Const, 5, ""},
+		{"ErrNaN", Type, 5, ""},
+		{"Exact", Const, 5, ""},
+		{"Float", Type, 5, ""},
+		{"Int", Type, 0, ""},
+		{"Jacobi", Func, 5, "func(x *Int, y *Int) int"},
+		{"MaxBase", Const, 0, ""},
+		{"MaxExp", Const, 5, ""},
+		{"MaxPrec", Const, 5, ""},
+		{"MinExp", Const, 5, ""},
+		{"NewFloat", Func, 5, "func(x float64) *Float"},
+		{"NewInt", Func, 0, "func(x int64) *Int"},
+		{"NewRat", Func, 0, "func(a int64, b int64) *Rat"},
+		{"ParseFloat", Func, 5, "func(s string, base int, prec uint, mode RoundingMode) (f *Float, b int, err error)"},
+		{"Rat", Type, 0, ""},
+		{"RoundingMode", Type, 5, ""},
+		{"ToNearestAway", Const, 5, ""},
+		{"ToNearestEven", Const, 5, ""},
+		{"ToNegativeInf", Const, 5, ""},
+		{"ToPositiveInf", Const, 5, ""},
+		{"ToZero", Const, 5, ""},
+		{"Word", Type, 0, ""},
+	},
+	"math/bits": {
+		{"Add", Func, 12, "func(x uint, y uint, carry uint) (sum uint, carryOut uint)"},
+		{"Add32", Func, 12, "func(x uint32, y uint32, carry uint32) (sum uint32, carryOut uint32)"},
+		{"Add64", Func, 12, "func(x uint64, y uint64, carry uint64) (sum uint64, carryOut uint64)"},
+		{"Div", Func, 12, "func(hi uint, lo uint, y uint) (quo uint, rem uint)"},
+		{"Div32", Func, 12, "func(hi uint32, lo uint32, y uint32) (quo uint32, rem uint32)"},
+		{"Div64", Func, 12, "func(hi uint64, lo uint64, y uint64) (quo uint64, rem uint64)"},
+		{"LeadingZeros", Func, 9, "func(x uint) int"},
+		{"LeadingZeros16", Func, 9, "func(x uint16) int"},
+		{"LeadingZeros32", Func, 9, "func(x uint32) int"},
+		{"LeadingZeros64", Func, 9, "func(x uint64) int"},
+		{"LeadingZeros8", Func, 9, "func(x uint8) int"},
+		{"Len", Func, 9, "func(x uint) int"},
+		{"Len16", Func, 9, "func(x uint16) (n int)"},
+		{"Len32", Func, 9, "func(x uint32) (n int)"},
+		{"Len64", Func, 9, "func(x uint64) (n int)"},
+		{"Len8", Func, 9, "func(x uint8) int"},
+		{"Mul", Func, 12, "func(x uint, y uint) (hi uint, lo uint)"},
+		{"Mul32", Func, 12, "func(x uint32, y uint32) (hi uint32, lo uint32)"},
+		{"Mul64", Func, 12, "func(x uint64, y uint64) (hi uint64, lo uint64)"},
+		{"OnesCount", Func, 9, "func(x uint) int"},
+		{"OnesCount16", Func, 9, "func(x uint16) int"},
+		{"OnesCount32", Func, 9, "func(x uint32) int"},
+		{"OnesCount64", Func, 9, "func(x uint64) int"},
+		{"OnesCount8", Func, 9, "func(x uint8) int"},
+		{"Rem", Func, 14, "func(hi uint, lo uint, y uint) uint"},
+		{"Rem32", Func, 14, "func(hi uint32, lo uint32, y uint32) uint32"},
+		{"Rem64", Func, 14, "func(hi uint64, lo uint64, y uint64) uint64"},
+		{"Reverse", Func, 9, "func(x uint) uint"},
+		{"Reverse16", Func, 9, "func(x uint16) uint16"},
+		{"Reverse32", Func, 9, "func(x uint32) uint32"},
+		{"Reverse64", Func, 9, "func(x uint64) uint64"},
+		{"Reverse8", Func, 9, "func(x uint8) uint8"},
+		{"ReverseBytes", Func, 9, "func(x uint) uint"},
+		{"ReverseBytes16", Func, 9, "func(x uint16) uint16"},
+		{"ReverseBytes32", Func, 9, "func(x uint32) uint32"},
+		{"ReverseBytes64", Func, 9, "func(x uint64) uint64"},
+		{"RotateLeft", Func, 9, "func(x uint, k int) uint"},
+		{"RotateLeft16", Func, 9, "func(x uint16, k int) uint16"},
+		{"RotateLeft32", Func, 9, "func(x uint32, k int) uint32"},
+		{"RotateLeft64", Func, 9, "func(x uint64, k int) uint64"},
+		{"RotateLeft8", Func, 9, "func(x uint8, k int) uint8"},
+		{"Sub", Func, 12, "func(x uint, y uint, borrow uint) (diff uint, borrowOut uint)"},
+		{"Sub32", Func, 12, "func(x uint32, y uint32, borrow uint32) (diff uint32, borrowOut uint32)"},
+		{"Sub64", Func, 12, "func(x uint64, y uint64, borrow uint64) (diff uint64, borrowOut uint64)"},
+		{"TrailingZeros", Func, 9, "func(x uint) int"},
+		{"TrailingZeros16", Func, 9, "func(x uint16) int"},
+		{"TrailingZeros32", Func, 9, "func(x uint32) int"},
+		{"TrailingZeros64", Func, 9, "func(x uint64) int"},
+		{"TrailingZeros8", Func, 9, "func(x uint8) int"},
+		{"UintSize", Const, 9, ""},
+	},
+	"math/cmplx": {
+		{"Abs", Func, 0, "func(x complex128) float64"},
+		{"Acos", Func, 0, "func(x complex128) complex128"},
+		{"Acosh", Func, 0, "func(x complex128) complex128"},
+		{"Asin", Func, 0, "func(x complex128) complex128"},
+		{"Asinh", Func, 0, "func(x complex128) complex128"},
+		{"Atan", Func, 0, "func(x complex128) complex128"},
+		{"Atanh", Func, 0, "func(x complex128) complex128"},
+		{"Conj", Func, 0, "func(x complex128) complex128"},
+		{"Cos", Func, 0, "func(x complex128) complex128"},
+		{"Cosh", Func, 0, "func(x complex128) complex128"},
+		{"Cot", Func, 0, "func(x complex128) complex128"},
+		{"Exp", Func, 0, "func(x complex128) complex128"},
+		{"Inf", Func, 0, "func() complex128"},
+		{"IsInf", Func, 0, "func(x complex128) bool"},
+		{"IsNaN", Func, 0, "func(x complex128) bool"},
+		{"Log", Func, 0, "func(x complex128) complex128"},
+		{"Log10", Func, 0, "func(x complex128) complex128"},
+		{"NaN", Func, 0, "func() complex128"},
+		{"Phase", Func, 0, "func(x complex128) float64"},
+		{"Polar", Func, 0, "func(x complex128) (r float64, θ float64)"},
+		{"Pow", Func, 0, "func(x complex128, y complex128) complex128"},
+		{"Rect", Func, 0, "func(r float64, θ float64) complex128"},
+		{"Sin", Func, 0, "func(x complex128) complex128"},
+		{"Sinh", Func, 0, "func(x complex128) complex128"},
+		{"Sqrt", Func, 0, "func(x complex128) complex128"},
+		{"Tan", Func, 0, "func(x complex128) complex128"},
+		{"Tanh", Func, 0, "func(x complex128) complex128"},
+	},
+	"math/rand": {
+		{"(*Rand).ExpFloat64", Method, 0, ""},
+		{"(*Rand).Float32", Method, 0, ""},
+		{"(*Rand).Float64", Method, 0, ""},
+		{"(*Rand).Int", Method, 0, ""},
+		{"(*Rand).Int31", Method, 0, ""},
+		{"(*Rand).Int31n", Method, 0, ""},
+		{"(*Rand).Int63", Method, 0, ""},
+		{"(*Rand).Int63n", Method, 0, ""},
+		{"(*Rand).Intn", Method, 0, ""},
+		{"(*Rand).NormFloat64", Method, 0, ""},
+		{"(*Rand).Perm", Method, 0, ""},
+		{"(*Rand).Read", Method, 6, ""},
+		{"(*Rand).Seed", Method, 0, ""},
+		{"(*Rand).Shuffle", Method, 10, ""},
+		{"(*Rand).Uint32", Method, 0, ""},
+		{"(*Rand).Uint64", Method, 8, ""},
+		{"(*Zipf).Uint64", Method, 0, ""},
+		{"ExpFloat64", Func, 0, "func() float64"},
+		{"Float32", Func, 0, "func() float32"},
+		{"Float64", Func, 0, "func() float64"},
+		{"Int", Func, 0, "func() int"},
+		{"Int31", Func, 0, "func() int32"},
+		{"Int31n", Func, 0, "func(n int32) int32"},
+		{"Int63", Func, 0, "func() int64"},
+		{"Int63n", Func, 0, "func(n int64) int64"},
+		{"Intn", Func, 0, "func(n int) int"},
+		{"New", Func, 0, "func(src Source) *Rand"},
+		{"NewSource", Func, 0, "func(seed int64) Source"},
+		{"NewZipf", Func, 0, "func(r *Rand, s float64, v float64, imax uint64) *Zipf"},
+		{"NormFloat64", Func, 0, "func() float64"},
+		{"Perm", Func, 0, "func(n int) []int"},
+		{"Rand", Type, 0, ""},
+		{"Read", Func, 6, "func(p []byte) (n int, err error)"},
+		{"Seed", Func, 0, "func(seed int64)"},
+		{"Shuffle", Func, 10, "func(n int, swap func(i int, j int))"},
+		{"Source", Type, 0, ""},
+		{"Source64", Type, 8, ""},
+		{"Uint32", Func, 0, "func() uint32"},
+		{"Uint64", Func, 8, "func() uint64"},
+		{"Zipf", Type, 0, ""},
+	},
+	"math/rand/v2": {
+		{"(*ChaCha8).AppendBinary", Method, 24, ""},
+		{"(*ChaCha8).MarshalBinary", Method, 22, ""},
+		{"(*ChaCha8).Read", Method, 23, ""},
+		{"(*ChaCha8).Seed", Method, 22, ""},
+		{"(*ChaCha8).Uint64", Method, 22, ""},
+		{"(*ChaCha8).UnmarshalBinary", Method, 22, ""},
+		{"(*PCG).AppendBinary", Method, 24, ""},
+		{"(*PCG).MarshalBinary", Method, 22, ""},
+		{"(*PCG).Seed", Method, 22, ""},
+		{"(*PCG).Uint64", Method, 22, ""},
+		{"(*PCG).UnmarshalBinary", Method, 22, ""},
+		{"(*Rand).ExpFloat64", Method, 22, ""},
+		{"(*Rand).Float32", Method, 22, ""},
+		{"(*Rand).Float64", Method, 22, ""},
+		{"(*Rand).Int", Method, 22, ""},
+		{"(*Rand).Int32", Method, 22, ""},
+		{"(*Rand).Int32N", Method, 22, ""},
+		{"(*Rand).Int64", Method, 22, ""},
+		{"(*Rand).Int64N", Method, 22, ""},
+		{"(*Rand).IntN", Method, 22, ""},
+		{"(*Rand).NormFloat64", Method, 22, ""},
+		{"(*Rand).Perm", Method, 22, ""},
+		{"(*Rand).Shuffle", Method, 22, ""},
+		{"(*Rand).Uint", Method, 23, ""},
+		{"(*Rand).Uint32", Method, 22, ""},
+		{"(*Rand).Uint32N", Method, 22, ""},
+		{"(*Rand).Uint64", Method, 22, ""},
+		{"(*Rand).Uint64N", Method, 22, ""},
+		{"(*Rand).UintN", Method, 22, ""},
+		{"(*Zipf).Uint64", Method, 22, ""},
+		{"ChaCha8", Type, 22, ""},
+		{"ExpFloat64", Func, 22, "func() float64"},
+		{"Float32", Func, 22, "func() float32"},
+		{"Float64", Func, 22, "func() float64"},
+		{"Int", Func, 22, "func() int"},
+		{"Int32", Func, 22, "func() int32"},
+		{"Int32N", Func, 22, "func(n int32) int32"},
+		{"Int64", Func, 22, "func() int64"},
+		{"Int64N", Func, 22, "func(n int64) int64"},
+		{"IntN", Func, 22, "func(n int) int"},
+		{"N", Func, 22, "func[Int intType](n Int) Int"},
+		{"New", Func, 22, "func(src Source) *Rand"},
+		{"NewChaCha8", Func, 22, "func(seed [32]byte) *ChaCha8"},
+		{"NewPCG", Func, 22, "func(seed1 uint64, seed2 uint64) *PCG"},
+		{"NewZipf", Func, 22, "func(r *Rand, s float64, v float64, imax uint64) *Zipf"},
+		{"NormFloat64", Func, 22, "func() float64"},
+		{"PCG", Type, 22, ""},
+		{"Perm", Func, 22, "func(n int) []int"},
+		{"Rand", Type, 22, ""},
+		{"Shuffle", Func, 22, "func(n int, swap func(i int, j int))"},
+		{"Source", Type, 22, ""},
+		{"Uint", Func, 23, "func() uint"},
+		{"Uint32", Func, 22, "func() uint32"},
+		{"Uint32N", Func, 22, "func(n uint32) uint32"},
+		{"Uint64", Func, 22, "func() uint64"},
+		{"Uint64N", Func, 22, "func(n uint64) uint64"},
+		{"UintN", Func, 22, "func(n uint) uint"},
+		{"Zipf", Type, 22, ""},
+	},
+	"mime": {
+		{"(*WordDecoder).Decode", Method, 5, ""},
+		{"(*WordDecoder).DecodeHeader", Method, 5, ""},
+		{"(WordEncoder).Encode", Method, 5, ""},
+		{"AddExtensionType", Func, 0, "func(ext string, typ string) error"},
+		{"BEncoding", Const, 5, ""},
+		{"ErrInvalidMediaParameter", Var, 9, ""},
+		{"ExtensionsByType", Func, 5, "func(typ string) ([]string, error)"},
+		{"FormatMediaType", Func, 0, "func(t string, param map[string]string) string"},
+		{"ParseMediaType", Func, 0, "func(v string) (mediatype string, params map[string]string, err error)"},
+		{"QEncoding", Const, 5, ""},
+		{"TypeByExtension", Func, 0, "func(ext string) string"},
+		{"WordDecoder", Type, 5, ""},
+		{"WordDecoder.CharsetReader", Field, 5, ""},
+		{"WordEncoder", Type, 5, ""},
+	},
+	"mime/multipart": {
+		{"(*FileHeader).Open", Method, 0, ""},
+		{"(*Form).RemoveAll", Method, 0, ""},
+		{"(*Part).Close", Method, 0, ""},
+		{"(*Part).FileName", Method, 0, ""},
+		{"(*Part).FormName", Method, 0, ""},
+		{"(*Part).Read", Method, 0, ""},
+		{"(*Reader).NextPart", Method, 0, ""},
+		{"(*Reader).NextRawPart", Method, 14, ""},
+		{"(*Reader).ReadForm", Method, 0, ""},
+		{"(*Writer).Boundary", Method, 0, ""},
+		{"(*Writer).Close", Method, 0, ""},
+		{"(*Writer).CreateFormField", Method, 0, ""},
+		{"(*Writer).CreateFormFile", Method, 0, ""},
+		{"(*Writer).CreatePart", Method, 0, ""},
+		{"(*Writer).FormDataContentType", Method, 0, ""},
+		{"(*Writer).SetBoundary", Method, 1, ""},
+		{"(*Writer).WriteField", Method, 0, ""},
+		{"ErrMessageTooLarge", Var, 9, ""},
+		{"File", Type, 0, ""},
+		{"FileContentDisposition", Func, 25, "func(fieldname string, filename string) string"},
+		{"FileHeader", Type, 0, ""},
+		{"FileHeader.Filename", Field, 0, ""},
+		{"FileHeader.Header", Field, 0, ""},
+		{"FileHeader.Size", Field, 9, ""},
+		{"Form", Type, 0, ""},
+		{"Form.File", Field, 0, ""},
+		{"Form.Value", Field, 0, ""},
+		{"NewReader", Func, 0, "func(r io.Reader, boundary string) *Reader"},
+		{"NewWriter", Func, 0, "func(w io.Writer) *Writer"},
+		{"Part", Type, 0, ""},
+		{"Part.Header", Field, 0, ""},
+		{"Reader", Type, 0, ""},
+		{"Writer", Type, 0, ""},
+	},
+	"mime/quotedprintable": {
+		{"(*Reader).Read", Method, 5, ""},
+		{"(*Writer).Close", Method, 5, ""},
+		{"(*Writer).Write", Method, 5, ""},
+		{"NewReader", Func, 5, "func(r io.Reader) *Reader"},
+		{"NewWriter", Func, 5, "func(w io.Writer) *Writer"},
+		{"Reader", Type, 5, ""},
+		{"Writer", Type, 5, ""},
+		{"Writer.Binary", Field, 5, ""},
+	},
+	"net": {
+		{"(*AddrError).Error", Method, 0, ""},
+		{"(*AddrError).Temporary", Method, 0, ""},
+		{"(*AddrError).Timeout", Method, 0, ""},
+		{"(*Buffers).Read", Method, 8, ""},
+		{"(*Buffers).WriteTo", Method, 8, ""},
+		{"(*DNSConfigError).Error", Method, 0, ""},
+		{"(*DNSConfigError).Temporary", Method, 0, ""},
+		{"(*DNSConfigError).Timeout", Method, 0, ""},
+		{"(*DNSConfigError).Unwrap", Method, 13, ""},
+		{"(*DNSError).Error", Method, 0, ""},
+		{"(*DNSError).Temporary", Method, 0, ""},
+		{"(*DNSError).Timeout", Method, 0, ""},
+		{"(*DNSError).Unwrap", Method, 23, ""},
+		{"(*Dialer).Dial", Method, 1, ""},
+		{"(*Dialer).DialContext", Method, 7, ""},
+		{"(*Dialer).DialIP", Method, 26, ""},
+		{"(*Dialer).DialTCP", Method, 26, ""},
+		{"(*Dialer).DialUDP", Method, 26, ""},
+		{"(*Dialer).DialUnix", Method, 26, ""},
+		{"(*Dialer).MultipathTCP", Method, 21, ""},
+		{"(*Dialer).SetMultipathTCP", Method, 21, ""},
+		{"(*IP).UnmarshalText", Method, 2, ""},
+		{"(*IPAddr).Network", Method, 0, ""},
+		{"(*IPAddr).String", Method, 0, ""},
+		{"(*IPConn).Close", Method, 0, ""},
+		{"(*IPConn).File", Method, 0, ""},
+		{"(*IPConn).LocalAddr", Method, 0, ""},
+		{"(*IPConn).Read", Method, 0, ""},
+		{"(*IPConn).ReadFrom", Method, 0, ""},
+		{"(*IPConn).ReadFromIP", Method, 0, ""},
+		{"(*IPConn).ReadMsgIP", Method, 1, ""},
+		{"(*IPConn).RemoteAddr", Method, 0, ""},
+		{"(*IPConn).SetDeadline", Method, 0, ""},
+		{"(*IPConn).SetReadBuffer", Method, 0, ""},
+		{"(*IPConn).SetReadDeadline", Method, 0, ""},
+		{"(*IPConn).SetWriteBuffer", Method, 0, ""},
+		{"(*IPConn).SetWriteDeadline", Method, 0, ""},
+		{"(*IPConn).SyscallConn", Method, 9, ""},
+		{"(*IPConn).Write", Method, 0, ""},
+		{"(*IPConn).WriteMsgIP", Method, 1, ""},
+		{"(*IPConn).WriteTo", Method, 0, ""},
+		{"(*IPConn).WriteToIP", Method, 0, ""},
+		{"(*IPNet).Contains", Method, 0, ""},
+		{"(*IPNet).Network", Method, 0, ""},
+		{"(*IPNet).String", Method, 0, ""},
+		{"(*Interface).Addrs", Method, 0, ""},
+		{"(*Interface).MulticastAddrs", Method, 0, ""},
+		{"(*ListenConfig).Listen", Method, 11, ""},
+		{"(*ListenConfig).ListenPacket", Method, 11, ""},
+		{"(*ListenConfig).MultipathTCP", Method, 21, ""},
+		{"(*ListenConfig).SetMultipathTCP", Method, 21, ""},
+		{"(*OpError).Error", Method, 0, ""},
+		{"(*OpError).Temporary", Method, 0, ""},
+		{"(*OpError).Timeout", Method, 0, ""},
+		{"(*OpError).Unwrap", Method, 13, ""},
+		{"(*ParseError).Error", Method, 0, ""},
+		{"(*ParseError).Temporary", Method, 17, ""},
+		{"(*ParseError).Timeout", Method, 17, ""},
+		{"(*Resolver).LookupAddr", Method, 8, ""},
+		{"(*Resolver).LookupCNAME", Method, 8, ""},
+		{"(*Resolver).LookupHost", Method, 8, ""},
+		{"(*Resolver).LookupIP", Method, 15, ""},
+		{"(*Resolver).LookupIPAddr", Method, 8, ""},
+		{"(*Resolver).LookupMX", Method, 8, ""},
+		{"(*Resolver).LookupNS", Method, 8, ""},
+		{"(*Resolver).LookupNetIP", Method, 18, ""},
+		{"(*Resolver).LookupPort", Method, 8, ""},
+		{"(*Resolver).LookupSRV", Method, 8, ""},
+		{"(*Resolver).LookupTXT", Method, 8, ""},
+		{"(*TCPAddr).AddrPort", Method, 18, ""},
+		{"(*TCPAddr).Network", Method, 0, ""},
+		{"(*TCPAddr).String", Method, 0, ""},
+		{"(*TCPConn).Close", Method, 0, ""},
+		{"(*TCPConn).CloseRead", Method, 0, ""},
+		{"(*TCPConn).CloseWrite", Method, 0, ""},
+		{"(*TCPConn).File", Method, 0, ""},
+		{"(*TCPConn).LocalAddr", Method, 0, ""},
+		{"(*TCPConn).MultipathTCP", Method, 21, ""},
+		{"(*TCPConn).Read", Method, 0, ""},
+		{"(*TCPConn).ReadFrom", Method, 0, ""},
+		{"(*TCPConn).RemoteAddr", Method, 0, ""},
+		{"(*TCPConn).SetDeadline", Method, 0, ""},
+		{"(*TCPConn).SetKeepAlive", Method, 0, ""},
+		{"(*TCPConn).SetKeepAliveConfig", Method, 23, ""},
+		{"(*TCPConn).SetKeepAlivePeriod", Method, 2, ""},
+		{"(*TCPConn).SetLinger", Method, 0, ""},
+		{"(*TCPConn).SetNoDelay", Method, 0, ""},
+		{"(*TCPConn).SetReadBuffer", Method, 0, ""},
+		{"(*TCPConn).SetReadDeadline", Method, 0, ""},
+		{"(*TCPConn).SetWriteBuffer", Method, 0, ""},
+		{"(*TCPConn).SetWriteDeadline", Method, 0, ""},
+		{"(*TCPConn).SyscallConn", Method, 9, ""},
+		{"(*TCPConn).Write", Method, 0, ""},
+		{"(*TCPConn).WriteTo", Method, 22, ""},
+		{"(*TCPListener).Accept", Method, 0, ""},
+		{"(*TCPListener).AcceptTCP", Method, 0, ""},
+		{"(*TCPListener).Addr", Method, 0, ""},
+		{"(*TCPListener).Close", Method, 0, ""},
+		{"(*TCPListener).File", Method, 0, ""},
+		{"(*TCPListener).SetDeadline", Method, 0, ""},
+		{"(*TCPListener).SyscallConn", Method, 10, ""},
+		{"(*UDPAddr).AddrPort", Method, 18, ""},
+		{"(*UDPAddr).Network", Method, 0, ""},
+		{"(*UDPAddr).String", Method, 0, ""},
+		{"(*UDPConn).Close", Method, 0, ""},
+		{"(*UDPConn).File", Method, 0, ""},
+		{"(*UDPConn).LocalAddr", Method, 0, ""},
+		{"(*UDPConn).Read", Method, 0, ""},
+		{"(*UDPConn).ReadFrom", Method, 0, ""},
+		{"(*UDPConn).ReadFromUDP", Method, 0, ""},
+		{"(*UDPConn).ReadFromUDPAddrPort", Method, 18, ""},
+		{"(*UDPConn).ReadMsgUDP", Method, 1, ""},
+		{"(*UDPConn).ReadMsgUDPAddrPort", Method, 18, ""},
+		{"(*UDPConn).RemoteAddr", Method, 0, ""},
+		{"(*UDPConn).SetDeadline", Method, 0, ""},
+		{"(*UDPConn).SetReadBuffer", Method, 0, ""},
+		{"(*UDPConn).SetReadDeadline", Method, 0, ""},
+		{"(*UDPConn).SetWriteBuffer", Method, 0, ""},
+		{"(*UDPConn).SetWriteDeadline", Method, 0, ""},
+		{"(*UDPConn).SyscallConn", Method, 9, ""},
+		{"(*UDPConn).Write", Method, 0, ""},
+		{"(*UDPConn).WriteMsgUDP", Method, 1, ""},
+		{"(*UDPConn).WriteMsgUDPAddrPort", Method, 18, ""},
+		{"(*UDPConn).WriteTo", Method, 0, ""},
+		{"(*UDPConn).WriteToUDP", Method, 0, ""},
+		{"(*UDPConn).WriteToUDPAddrPort", Method, 18, ""},
+		{"(*UnixAddr).Network", Method, 0, ""},
+		{"(*UnixAddr).String", Method, 0, ""},
+		{"(*UnixConn).Close", Method, 0, ""},
+		{"(*UnixConn).CloseRead", Method, 1, ""},
+		{"(*UnixConn).CloseWrite", Method, 1, ""},
+		{"(*UnixConn).File", Method, 0, ""},
+		{"(*UnixConn).LocalAddr", Method, 0, ""},
+		{"(*UnixConn).Read", Method, 0, ""},
+		{"(*UnixConn).ReadFrom", Method, 0, ""},
+		{"(*UnixConn).ReadFromUnix", Method, 0, ""},
+		{"(*UnixConn).ReadMsgUnix", Method, 0, ""},
+		{"(*UnixConn).RemoteAddr", Method, 0, ""},
+		{"(*UnixConn).SetDeadline", Method, 0, ""},
+		{"(*UnixConn).SetReadBuffer", Method, 0, ""},
+		{"(*UnixConn).SetReadDeadline", Method, 0, ""},
+		{"(*UnixConn).SetWriteBuffer", Method, 0, ""},
+		{"(*UnixConn).SetWriteDeadline", Method, 0, ""},
+		{"(*UnixConn).SyscallConn", Method, 9, ""},
+		{"(*UnixConn).Write", Method, 0, ""},
+		{"(*UnixConn).WriteMsgUnix", Method, 0, ""},
+		{"(*UnixConn).WriteTo", Method, 0, ""},
+		{"(*UnixConn).WriteToUnix", Method, 0, ""},
+		{"(*UnixListener).Accept", Method, 0, ""},
+		{"(*UnixListener).AcceptUnix", Method, 0, ""},
+		{"(*UnixListener).Addr", Method, 0, ""},
+		{"(*UnixListener).Close", Method, 0, ""},
+		{"(*UnixListener).File", Method, 0, ""},
+		{"(*UnixListener).SetDeadline", Method, 0, ""},
+		{"(*UnixListener).SetUnlinkOnClose", Method, 8, ""},
+		{"(*UnixListener).SyscallConn", Method, 10, ""},
+		{"(Flags).String", Method, 0, ""},
+		{"(HardwareAddr).String", Method, 0, ""},
+		{"(IP).AppendText", Method, 24, ""},
+		{"(IP).DefaultMask", Method, 0, ""},
+		{"(IP).Equal", Method, 0, ""},
+		{"(IP).IsGlobalUnicast", Method, 0, ""},
+		{"(IP).IsInterfaceLocalMulticast", Method, 0, ""},
+		{"(IP).IsLinkLocalMulticast", Method, 0, ""},
+		{"(IP).IsLinkLocalUnicast", Method, 0, ""},
+		{"(IP).IsLoopback", Method, 0, ""},
+		{"(IP).IsMulticast", Method, 0, ""},
+		{"(IP).IsPrivate", Method, 17, ""},
+		{"(IP).IsUnspecified", Method, 0, ""},
+		{"(IP).MarshalText", Method, 2, ""},
+		{"(IP).Mask", Method, 0, ""},
+		{"(IP).String", Method, 0, ""},
+		{"(IP).To16", Method, 0, ""},
+		{"(IP).To4", Method, 0, ""},
+		{"(IPMask).Size", Method, 0, ""},
+		{"(IPMask).String", Method, 0, ""},
+		{"(InvalidAddrError).Error", Method, 0, ""},
+		{"(InvalidAddrError).Temporary", Method, 0, ""},
+		{"(InvalidAddrError).Timeout", Method, 0, ""},
+		{"(UnknownNetworkError).Error", Method, 0, ""},
+		{"(UnknownNetworkError).Temporary", Method, 0, ""},
+		{"(UnknownNetworkError).Timeout", Method, 0, ""},
+		{"Addr", Type, 0, ""},
+		{"AddrError", Type, 0, ""},
+		{"AddrError.Addr", Field, 0, ""},
+		{"AddrError.Err", Field, 0, ""},
+		{"Buffers", Type, 8, ""},
+		{"CIDRMask", Func, 0, "func(ones int, bits int) IPMask"},
+		{"Conn", Type, 0, ""},
+		{"DNSConfigError", Type, 0, ""},
+		{"DNSConfigError.Err", Field, 0, ""},
+		{"DNSError", Type, 0, ""},
+		{"DNSError.Err", Field, 0, ""},
+		{"DNSError.IsNotFound", Field, 13, ""},
+		{"DNSError.IsTemporary", Field, 6, ""},
+		{"DNSError.IsTimeout", Field, 0, ""},
+		{"DNSError.Name", Field, 0, ""},
+		{"DNSError.Server", Field, 0, ""},
+		{"DNSError.UnwrapErr", Field, 23, ""},
+		{"DefaultResolver", Var, 8, ""},
+		{"Dial", Func, 0, "func(network string, address string) (Conn, error)"},
+		{"DialIP", Func, 0, "func(network string, laddr *IPAddr, raddr *IPAddr) (*IPConn, error)"},
+		{"DialTCP", Func, 0, "func(network string, laddr *TCPAddr, raddr *TCPAddr) (*TCPConn, error)"},
+		{"DialTimeout", Func, 0, "func(network string, address string, timeout time.Duration) (Conn, error)"},
+		{"DialUDP", Func, 0, "func(network string, laddr *UDPAddr, raddr *UDPAddr) (*UDPConn, error)"},
+		{"DialUnix", Func, 0, "func(network string, laddr *UnixAddr, raddr *UnixAddr) (*UnixConn, error)"},
+		{"Dialer", Type, 1, ""},
+		{"Dialer.Cancel", Field, 6, ""},
+		{"Dialer.Control", Field, 11, ""},
+		{"Dialer.ControlContext", Field, 20, ""},
+		{"Dialer.Deadline", Field, 1, ""},
+		{"Dialer.DualStack", Field, 2, ""},
+		{"Dialer.FallbackDelay", Field, 5, ""},
+		{"Dialer.KeepAlive", Field, 3, ""},
+		{"Dialer.KeepAliveConfig", Field, 23, ""},
+		{"Dialer.LocalAddr", Field, 1, ""},
+		{"Dialer.Resolver", Field, 8, ""},
+		{"Dialer.Timeout", Field, 1, ""},
+		{"ErrClosed", Var, 16, ""},
+		{"ErrWriteToConnected", Var, 0, ""},
+		{"Error", Type, 0, ""},
+		{"FileConn", Func, 0, "func(f *os.File) (c Conn, err error)"},
+		{"FileListener", Func, 0, "func(f *os.File) (ln Listener, err error)"},
+		{"FilePacketConn", Func, 0, "func(f *os.File) (c PacketConn, err error)"},
+		{"FlagBroadcast", Const, 0, ""},
+		{"FlagLoopback", Const, 0, ""},
+		{"FlagMulticast", Const, 0, ""},
+		{"FlagPointToPoint", Const, 0, ""},
+		{"FlagRunning", Const, 20, ""},
+		{"FlagUp", Const, 0, ""},
+		{"Flags", Type, 0, ""},
+		{"HardwareAddr", Type, 0, ""},
+		{"IP", Type, 0, ""},
+		{"IPAddr", Type, 0, ""},
+		{"IPAddr.IP", Field, 0, ""},
+		{"IPAddr.Zone", Field, 1, ""},
+		{"IPConn", Type, 0, ""},
+		{"IPMask", Type, 0, ""},
+		{"IPNet", Type, 0, ""},
+		{"IPNet.IP", Field, 0, ""},
+		{"IPNet.Mask", Field, 0, ""},
+		{"IPv4", Func, 0, "func(a byte, b byte, c byte, d byte) IP"},
+		{"IPv4Mask", Func, 0, "func(a byte, b byte, c byte, d byte) IPMask"},
+		{"IPv4allrouter", Var, 0, ""},
+		{"IPv4allsys", Var, 0, ""},
+		{"IPv4bcast", Var, 0, ""},
+		{"IPv4len", Const, 0, ""},
+		{"IPv4zero", Var, 0, ""},
+		{"IPv6interfacelocalallnodes", Var, 0, ""},
+		{"IPv6len", Const, 0, ""},
+		{"IPv6linklocalallnodes", Var, 0, ""},
+		{"IPv6linklocalallrouters", Var, 0, ""},
+		{"IPv6loopback", Var, 0, ""},
+		{"IPv6unspecified", Var, 0, ""},
+		{"IPv6zero", Var, 0, ""},
+		{"Interface", Type, 0, ""},
+		{"Interface.Flags", Field, 0, ""},
+		{"Interface.HardwareAddr", Field, 0, ""},
+		{"Interface.Index", Field, 0, ""},
+		{"Interface.MTU", Field, 0, ""},
+		{"Interface.Name", Field, 0, ""},
+		{"InterfaceAddrs", Func, 0, "func() ([]Addr, error)"},
+		{"InterfaceByIndex", Func, 0, "func(index int) (*Interface, error)"},
+		{"InterfaceByName", Func, 0, "func(name string) (*Interface, error)"},
+		{"Interfaces", Func, 0, "func() ([]Interface, error)"},
+		{"InvalidAddrError", Type, 0, ""},
+		{"JoinHostPort", Func, 0, "func(host string, port string) string"},
+		{"KeepAliveConfig", Type, 23, ""},
+		{"KeepAliveConfig.Count", Field, 23, ""},
+		{"KeepAliveConfig.Enable", Field, 23, ""},
+		{"KeepAliveConfig.Idle", Field, 23, ""},
+		{"KeepAliveConfig.Interval", Field, 23, ""},
+		{"Listen", Func, 0, "func(network string, address string) (Listener, error)"},
+		{"ListenConfig", Type, 11, ""},
+		{"ListenConfig.Control", Field, 11, ""},
+		{"ListenConfig.KeepAlive", Field, 13, ""},
+		{"ListenConfig.KeepAliveConfig", Field, 23, ""},
+		{"ListenIP", Func, 0, "func(network string, laddr *IPAddr) (*IPConn, error)"},
+		{"ListenMulticastUDP", Func, 0, "func(network string, ifi *Interface, gaddr *UDPAddr) (*UDPConn, error)"},
+		{"ListenPacket", Func, 0, "func(network string, address string) (PacketConn, error)"},
+		{"ListenTCP", Func, 0, "func(network string, laddr *TCPAddr) (*TCPListener, error)"},
+		{"ListenUDP", Func, 0, "func(network string, laddr *UDPAddr) (*UDPConn, error)"},
+		{"ListenUnix", Func, 0, "func(network string, laddr *UnixAddr) (*UnixListener, error)"},
+		{"ListenUnixgram", Func, 0, "func(network string, laddr *UnixAddr) (*UnixConn, error)"},
+		{"Listener", Type, 0, ""},
+		{"LookupAddr", Func, 0, "func(addr string) (names []string, err error)"},
+		{"LookupCNAME", Func, 0, "func(host string) (cname string, err error)"},
+		{"LookupHost", Func, 0, "func(host string) (addrs []string, err error)"},
+		{"LookupIP", Func, 0, "func(host string) ([]IP, error)"},
+		{"LookupMX", Func, 0, "func(name string) ([]*MX, error)"},
+		{"LookupNS", Func, 1, "func(name string) ([]*NS, error)"},
+		{"LookupPort", Func, 0, "func(network string, service string) (port int, err error)"},
+		{"LookupSRV", Func, 0, "func(service string, proto string, name string) (cname string, addrs []*SRV, err error)"},
+		{"LookupTXT", Func, 0, "func(name string) ([]string, error)"},
+		{"MX", Type, 0, ""},
+		{"MX.Host", Field, 0, ""},
+		{"MX.Pref", Field, 0, ""},
+		{"NS", Type, 1, ""},
+		{"NS.Host", Field, 1, ""},
+		{"OpError", Type, 0, ""},
+		{"OpError.Addr", Field, 0, ""},
+		{"OpError.Err", Field, 0, ""},
+		{"OpError.Net", Field, 0, ""},
+		{"OpError.Op", Field, 0, ""},
+		{"OpError.Source", Field, 5, ""},
+		{"PacketConn", Type, 0, ""},
+		{"ParseCIDR", Func, 0, "func(s string) (IP, *IPNet, error)"},
+		{"ParseError", Type, 0, ""},
+		{"ParseError.Text", Field, 0, ""},
+		{"ParseError.Type", Field, 0, ""},
+		{"ParseIP", Func, 0, "func(s string) IP"},
+		{"ParseMAC", Func, 0, "func(s string) (hw HardwareAddr, err error)"},
+		{"Pipe", Func, 0, "func() (Conn, Conn)"},
+		{"ResolveIPAddr", Func, 0, "func(network string, address string) (*IPAddr, error)"},
+		{"ResolveTCPAddr", Func, 0, "func(network string, address string) (*TCPAddr, error)"},
+		{"ResolveUDPAddr", Func, 0, "func(network string, address string) (*UDPAddr, error)"},
+		{"ResolveUnixAddr", Func, 0, "func(network string, address string) (*UnixAddr, error)"},
+		{"Resolver", Type, 8, ""},
+		{"Resolver.Dial", Field, 9, ""},
+		{"Resolver.PreferGo", Field, 8, ""},
+		{"Resolver.StrictErrors", Field, 9, ""},
+		{"SRV", Type, 0, ""},
+		{"SRV.Port", Field, 0, ""},
+		{"SRV.Priority", Field, 0, ""},
+		{"SRV.Target", Field, 0, ""},
+		{"SRV.Weight", Field, 0, ""},
+		{"SplitHostPort", Func, 0, "func(hostport string) (host string, port string, err error)"},
+		{"TCPAddr", Type, 0, ""},
+		{"TCPAddr.IP", Field, 0, ""},
+		{"TCPAddr.Port", Field, 0, ""},
+		{"TCPAddr.Zone", Field, 1, ""},
+		{"TCPAddrFromAddrPort", Func, 18, "func(addr netip.AddrPort) *TCPAddr"},
+		{"TCPConn", Type, 0, ""},
+		{"TCPListener", Type, 0, ""},
+		{"UDPAddr", Type, 0, ""},
+		{"UDPAddr.IP", Field, 0, ""},
+		{"UDPAddr.Port", Field, 0, ""},
+		{"UDPAddr.Zone", Field, 1, ""},
+		{"UDPAddrFromAddrPort", Func, 18, "func(addr netip.AddrPort) *UDPAddr"},
+		{"UDPConn", Type, 0, ""},
+		{"UnixAddr", Type, 0, ""},
+		{"UnixAddr.Name", Field, 0, ""},
+		{"UnixAddr.Net", Field, 0, ""},
+		{"UnixConn", Type, 0, ""},
+		{"UnixListener", Type, 0, ""},
+		{"UnknownNetworkError", Type, 0, ""},
+	},
+	"net/http": {
+		{"(*Client).CloseIdleConnections", Method, 12, ""},
+		{"(*Client).Do", Method, 0, ""},
+		{"(*Client).Get", Method, 0, ""},
+		{"(*Client).Head", Method, 0, ""},
+		{"(*Client).Post", Method, 0, ""},
+		{"(*Client).PostForm", Method, 0, ""},
+		{"(*Cookie).String", Method, 0, ""},
+		{"(*Cookie).Valid", Method, 18, ""},
+		{"(*CrossOriginProtection).AddInsecureBypassPattern", Method, 25, ""},
+		{"(*CrossOriginProtection).AddTrustedOrigin", Method, 25, ""},
+		{"(*CrossOriginProtection).Check", Method, 25, ""},
+		{"(*CrossOriginProtection).Handler", Method, 25, ""},
+		{"(*CrossOriginProtection).SetDenyHandler", Method, 25, ""},
+		{"(*MaxBytesError).Error", Method, 19, ""},
+		{"(*ProtocolError).Error", Method, 0, ""},
+		{"(*ProtocolError).Is", Method, 21, ""},
+		{"(*Protocols).SetHTTP1", Method, 24, ""},
+		{"(*Protocols).SetHTTP2", Method, 24, ""},
+		{"(*Protocols).SetUnencryptedHTTP2", Method, 24, ""},
+		{"(*Request).AddCookie", Method, 0, ""},
+		{"(*Request).BasicAuth", Method, 4, ""},
+		{"(*Request).Clone", Method, 13, ""},
+		{"(*Request).Context", Method, 7, ""},
+		{"(*Request).Cookie", Method, 0, ""},
+		{"(*Request).Cookies", Method, 0, ""},
+		{"(*Request).CookiesNamed", Method, 23, ""},
+		{"(*Request).FormFile", Method, 0, ""},
+		{"(*Request).FormValue", Method, 0, ""},
+		{"(*Request).MultipartReader", Method, 0, ""},
+		{"(*Request).ParseForm", Method, 0, ""},
+		{"(*Request).ParseMultipartForm", Method, 0, ""},
+		{"(*Request).PathValue", Method, 22, ""},
+		{"(*Request).PostFormValue", Method, 1, ""},
+		{"(*Request).ProtoAtLeast", Method, 0, ""},
+		{"(*Request).Referer", Method, 0, ""},
+		{"(*Request).SetBasicAuth", Method, 0, ""},
+		{"(*Request).SetPathValue", Method, 22, ""},
+		{"(*Request).UserAgent", Method, 0, ""},
+		{"(*Request).WithContext", Method, 7, ""},
+		{"(*Request).Write", Method, 0, ""},
+		{"(*Request).WriteProxy", Method, 0, ""},
+		{"(*Response).Cookies", Method, 0, ""},
+		{"(*Response).Location", Method, 0, ""},
+		{"(*Response).ProtoAtLeast", Method, 0, ""},
+		{"(*Response).Write", Method, 0, ""},
+		{"(*ResponseController).EnableFullDuplex", Method, 21, ""},
+		{"(*ResponseController).Flush", Method, 20, ""},
+		{"(*ResponseController).Hijack", Method, 20, ""},
+		{"(*ResponseController).SetReadDeadline", Method, 20, ""},
+		{"(*ResponseController).SetWriteDeadline", Method, 20, ""},
+		{"(*ServeMux).Handle", Method, 0, ""},
+		{"(*ServeMux).HandleFunc", Method, 0, ""},
+		{"(*ServeMux).Handler", Method, 1, ""},
+		{"(*ServeMux).ServeHTTP", Method, 0, ""},
+		{"(*Server).Close", Method, 8, ""},
+		{"(*Server).ListenAndServe", Method, 0, ""},
+		{"(*Server).ListenAndServeTLS", Method, 0, ""},
+		{"(*Server).RegisterOnShutdown", Method, 9, ""},
+		{"(*Server).Serve", Method, 0, ""},
+		{"(*Server).ServeTLS", Method, 9, ""},
+		{"(*Server).SetKeepAlivesEnabled", Method, 3, ""},
+		{"(*Server).Shutdown", Method, 8, ""},
+		{"(*Transport).CancelRequest", Method, 1, ""},
+		{"(*Transport).Clone", Method, 13, ""},
+		{"(*Transport).CloseIdleConnections", Method, 0, ""},
+		{"(*Transport).RegisterProtocol", Method, 0, ""},
+		{"(*Transport).RoundTrip", Method, 0, ""},
+		{"(ConnState).String", Method, 3, ""},
+		{"(Dir).Open", Method, 0, ""},
+		{"(HandlerFunc).ServeHTTP", Method, 0, ""},
+		{"(Header).Add", Method, 0, ""},
+		{"(Header).Clone", Method, 13, ""},
+		{"(Header).Del", Method, 0, ""},
+		{"(Header).Get", Method, 0, ""},
+		{"(Header).Set", Method, 0, ""},
+		{"(Header).Values", Method, 14, ""},
+		{"(Header).Write", Method, 0, ""},
+		{"(Header).WriteSubset", Method, 0, ""},
+		{"(Protocols).HTTP1", Method, 24, ""},
+		{"(Protocols).HTTP2", Method, 24, ""},
+		{"(Protocols).String", Method, 24, ""},
+		{"(Protocols).UnencryptedHTTP2", Method, 24, ""},
+		{"AllowQuerySemicolons", Func, 17, "func(h Handler) Handler"},
+		{"CanonicalHeaderKey", Func, 0, "func(s string) string"},
+		{"Client", Type, 0, ""},
+		{"Client.CheckRedirect", Field, 0, ""},
+		{"Client.Jar", Field, 0, ""},
+		{"Client.Timeout", Field, 3, ""},
+		{"Client.Transport", Field, 0, ""},
+		{"CloseNotifier", Type, 1, ""},
+		{"ConnState", Type, 3, ""},
+		{"Cookie", Type, 0, ""},
+		{"Cookie.Domain", Field, 0, ""},
+		{"Cookie.Expires", Field, 0, ""},
+		{"Cookie.HttpOnly", Field, 0, ""},
+		{"Cookie.MaxAge", Field, 0, ""},
+		{"Cookie.Name", Field, 0, ""},
+		{"Cookie.Partitioned", Field, 23, ""},
+		{"Cookie.Path", Field, 0, ""},
+		{"Cookie.Quoted", Field, 23, ""},
+		{"Cookie.Raw", Field, 0, ""},
+		{"Cookie.RawExpires", Field, 0, ""},
+		{"Cookie.SameSite", Field, 11, ""},
+		{"Cookie.Secure", Field, 0, ""},
+		{"Cookie.Unparsed", Field, 0, ""},
+		{"Cookie.Value", Field, 0, ""},
+		{"CookieJar", Type, 0, ""},
+		{"CrossOriginProtection", Type, 25, ""},
+		{"DefaultClient", Var, 0, ""},
+		{"DefaultMaxHeaderBytes", Const, 0, ""},
+		{"DefaultMaxIdleConnsPerHost", Const, 0, ""},
+		{"DefaultServeMux", Var, 0, ""},
+		{"DefaultTransport", Var, 0, ""},
+		{"DetectContentType", Func, 0, "func(data []byte) string"},
+		{"Dir", Type, 0, ""},
+		{"ErrAbortHandler", Var, 8, ""},
+		{"ErrBodyNotAllowed", Var, 0, ""},
+		{"ErrBodyReadAfterClose", Var, 0, ""},
+		{"ErrContentLength", Var, 0, ""},
+		{"ErrHandlerTimeout", Var, 0, ""},
+		{"ErrHeaderTooLong", Var, 0, ""},
+		{"ErrHijacked", Var, 0, ""},
+		{"ErrLineTooLong", Var, 0, ""},
+		{"ErrMissingBoundary", Var, 0, ""},
+		{"ErrMissingContentLength", Var, 0, ""},
+		{"ErrMissingFile", Var, 0, ""},
+		{"ErrNoCookie", Var, 0, ""},
+		{"ErrNoLocation", Var, 0, ""},
+		{"ErrNotMultipart", Var, 0, ""},
+		{"ErrNotSupported", Var, 0, ""},
+		{"ErrSchemeMismatch", Var, 21, ""},
+		{"ErrServerClosed", Var, 8, ""},
+		{"ErrShortBody", Var, 0, ""},
+		{"ErrSkipAltProtocol", Var, 6, ""},
+		{"ErrUnexpectedTrailer", Var, 0, ""},
+		{"ErrUseLastResponse", Var, 7, ""},
+		{"ErrWriteAfterFlush", Var, 0, ""},
+		{"Error", Func, 0, "func(w ResponseWriter, error string, code int)"},
+		{"FS", Func, 16, "func(fsys fs.FS) FileSystem"},
+		{"File", Type, 0, ""},
+		{"FileServer", Func, 0, "func(root FileSystem) Handler"},
+		{"FileServerFS", Func, 22, "func(root fs.FS) Handler"},
+		{"FileSystem", Type, 0, ""},
+		{"Flusher", Type, 0, ""},
+		{"Get", Func, 0, "func(url string) (resp *Response, err error)"},
+		{"HTTP2Config", Type, 24, ""},
+		{"HTTP2Config.CountError", Field, 24, ""},
+		{"HTTP2Config.MaxConcurrentStreams", Field, 24, ""},
+		{"HTTP2Config.MaxDecoderHeaderTableSize", Field, 24, ""},
+		{"HTTP2Config.MaxEncoderHeaderTableSize", Field, 24, ""},
+		{"HTTP2Config.MaxReadFrameSize", Field, 24, ""},
+		{"HTTP2Config.MaxReceiveBufferPerConnection", Field, 24, ""},
+		{"HTTP2Config.MaxReceiveBufferPerStream", Field, 24, ""},
+		{"HTTP2Config.PermitProhibitedCipherSuites", Field, 24, ""},
+		{"HTTP2Config.PingTimeout", Field, 24, ""},
+		{"HTTP2Config.SendPingTimeout", Field, 24, ""},
+		{"HTTP2Config.StrictMaxConcurrentRequests", Field, 26, ""},
+		{"HTTP2Config.WriteByteTimeout", Field, 24, ""},
+		{"Handle", Func, 0, "func(pattern string, handler Handler)"},
+		{"HandleFunc", Func, 0, "func(pattern string, handler func(ResponseWriter, *Request))"},
+		{"Handler", Type, 0, ""},
+		{"HandlerFunc", Type, 0, ""},
+		{"Head", Func, 0, "func(url string) (resp *Response, err error)"},
+		{"Header", Type, 0, ""},
+		{"Hijacker", Type, 0, ""},
+		{"ListenAndServe", Func, 0, "func(addr string, handler Handler) error"},
+		{"ListenAndServeTLS", Func, 0, "func(addr string, certFile string, keyFile string, handler Handler) error"},
+		{"LocalAddrContextKey", Var, 7, ""},
+		{"MaxBytesError", Type, 19, ""},
+		{"MaxBytesError.Limit", Field, 19, ""},
+		{"MaxBytesHandler", Func, 18, "func(h Handler, n int64) Handler"},
+		{"MaxBytesReader", Func, 0, "func(w ResponseWriter, r io.ReadCloser, n int64) io.ReadCloser"},
+		{"MethodConnect", Const, 6, ""},
+		{"MethodDelete", Const, 6, ""},
+		{"MethodGet", Const, 6, ""},
+		{"MethodHead", Const, 6, ""},
+		{"MethodOptions", Const, 6, ""},
+		{"MethodPatch", Const, 6, ""},
+		{"MethodPost", Const, 6, ""},
+		{"MethodPut", Const, 6, ""},
+		{"MethodTrace", Const, 6, ""},
+		{"NewCrossOriginProtection", Func, 25, "func() *CrossOriginProtection"},
+		{"NewFileTransport", Func, 0, "func(fs FileSystem) RoundTripper"},
+		{"NewFileTransportFS", Func, 22, "func(fsys fs.FS) RoundTripper"},
+		{"NewRequest", Func, 0, "func(method string, url string, body io.Reader) (*Request, error)"},
+		{"NewRequestWithContext", Func, 13, "func(ctx context.Context, method string, url string, body io.Reader) (*Request, error)"},
+		{"NewResponseController", Func, 20, "func(rw ResponseWriter) *ResponseController"},
+		{"NewServeMux", Func, 0, "func() *ServeMux"},
+		{"NoBody", Var, 8, ""},
+		{"NotFound", Func, 0, "func(w ResponseWriter, r *Request)"},
+		{"NotFoundHandler", Func, 0, "func() Handler"},
+		{"ParseCookie", Func, 23, "func(line string) ([]*Cookie, error)"},
+		{"ParseHTTPVersion", Func, 0, "func(vers string) (major int, minor int, ok bool)"},
+		{"ParseSetCookie", Func, 23, "func(line string) (*Cookie, error)"},
+		{"ParseTime", Func, 1, "func(text string) (t time.Time, err error)"},
+		{"Post", Func, 0, "func(url string, contentType string, body io.Reader) (resp *Response, err error)"},
+		{"PostForm", Func, 0, "func(url string, data url.Values) (resp *Response, err error)"},
+		{"ProtocolError", Type, 0, ""},
+		{"ProtocolError.ErrorString", Field, 0, ""},
+		{"Protocols", Type, 24, ""},
+		{"ProxyFromEnvironment", Func, 0, "func(req *Request) (*url.URL, error)"},
+		{"ProxyURL", Func, 0, "func(fixedURL *url.URL) func(*Request) (*url.URL, error)"},
+		{"PushOptions", Type, 8, ""},
+		{"PushOptions.Header", Field, 8, ""},
+		{"PushOptions.Method", Field, 8, ""},
+		{"Pusher", Type, 8, ""},
+		{"ReadRequest", Func, 0, "func(b *bufio.Reader) (*Request, error)"},
+		{"ReadResponse", Func, 0, "func(r *bufio.Reader, req *Request) (*Response, error)"},
+		{"Redirect", Func, 0, "func(w ResponseWriter, r *Request, url string, code int)"},
+		{"RedirectHandler", Func, 0, "func(url string, code int) Handler"},
+		{"Request", Type, 0, ""},
+		{"Request.Body", Field, 0, ""},
+		{"Request.Cancel", Field, 5, ""},
+		{"Request.Close", Field, 0, ""},
+		{"Request.ContentLength", Field, 0, ""},
+		{"Request.Form", Field, 0, ""},
+		{"Request.GetBody", Field, 8, ""},
+		{"Request.Header", Field, 0, ""},
+		{"Request.Host", Field, 0, ""},
+		{"Request.Method", Field, 0, ""},
+		{"Request.MultipartForm", Field, 0, ""},
+		{"Request.Pattern", Field, 23, ""},
+		{"Request.PostForm", Field, 1, ""},
+		{"Request.Proto", Field, 0, ""},
+		{"Request.ProtoMajor", Field, 0, ""},
+		{"Request.ProtoMinor", Field, 0, ""},
+		{"Request.RemoteAddr", Field, 0, ""},
+		{"Request.RequestURI", Field, 0, ""},
+		{"Request.Response", Field, 7, ""},
+		{"Request.TLS", Field, 0, ""},
+		{"Request.Trailer", Field, 0, ""},
+		{"Request.TransferEncoding", Field, 0, ""},
+		{"Request.URL", Field, 0, ""},
+		{"Response", Type, 0, ""},
+		{"Response.Body", Field, 0, ""},
+		{"Response.Close", Field, 0, ""},
+		{"Response.ContentLength", Field, 0, ""},
+		{"Response.Header", Field, 0, ""},
+		{"Response.Proto", Field, 0, ""},
+		{"Response.ProtoMajor", Field, 0, ""},
+		{"Response.ProtoMinor", Field, 0, ""},
+		{"Response.Request", Field, 0, ""},
+		{"Response.Status", Field, 0, ""},
+		{"Response.StatusCode", Field, 0, ""},
+		{"Response.TLS", Field, 3, ""},
+		{"Response.Trailer", Field, 0, ""},
+		{"Response.TransferEncoding", Field, 0, ""},
+		{"Response.Uncompressed", Field, 7, ""},
+		{"ResponseController", Type, 20, ""},
+		{"ResponseWriter", Type, 0, ""},
+		{"RoundTripper", Type, 0, ""},
+		{"SameSite", Type, 11, ""},
+		{"SameSiteDefaultMode", Const, 11, ""},
+		{"SameSiteLaxMode", Const, 11, ""},
+		{"SameSiteNoneMode", Const, 13, ""},
+		{"SameSiteStrictMode", Const, 11, ""},
+		{"Serve", Func, 0, "func(l net.Listener, handler Handler) error"},
+		{"ServeContent", Func, 0, "func(w ResponseWriter, req *Request, name string, modtime time.Time, content io.ReadSeeker)"},
+		{"ServeFile", Func, 0, "func(w ResponseWriter, r *Request, name string)"},
+		{"ServeFileFS", Func, 22, "func(w ResponseWriter, r *Request, fsys fs.FS, name string)"},
+		{"ServeMux", Type, 0, ""},
+		{"ServeTLS", Func, 9, "func(l net.Listener, handler Handler, certFile string, keyFile string) error"},
+		{"Server", Type, 0, ""},
+		{"Server.Addr", Field, 0, ""},
+		{"Server.BaseContext", Field, 13, ""},
+		{"Server.ConnContext", Field, 13, ""},
+		{"Server.ConnState", Field, 3, ""},
+		{"Server.DisableGeneralOptionsHandler", Field, 20, ""},
+		{"Server.ErrorLog", Field, 3, ""},
+		{"Server.HTTP2", Field, 24, ""},
+		{"Server.Handler", Field, 0, ""},
+		{"Server.IdleTimeout", Field, 8, ""},
+		{"Server.MaxHeaderBytes", Field, 0, ""},
+		{"Server.Protocols", Field, 24, ""},
+		{"Server.ReadHeaderTimeout", Field, 8, ""},
+		{"Server.ReadTimeout", Field, 0, ""},
+		{"Server.TLSConfig", Field, 0, ""},
+		{"Server.TLSNextProto", Field, 1, ""},
+		{"Server.WriteTimeout", Field, 0, ""},
+		{"ServerContextKey", Var, 7, ""},
+		{"SetCookie", Func, 0, "func(w ResponseWriter, cookie *Cookie)"},
+		{"StateActive", Const, 3, ""},
+		{"StateClosed", Const, 3, ""},
+		{"StateHijacked", Const, 3, ""},
+		{"StateIdle", Const, 3, ""},
+		{"StateNew", Const, 3, ""},
+		{"StatusAccepted", Const, 0, ""},
+		{"StatusAlreadyReported", Const, 7, ""},
+		{"StatusBadGateway", Const, 0, ""},
+		{"StatusBadRequest", Const, 0, ""},
+		{"StatusConflict", Const, 0, ""},
+		{"StatusContinue", Const, 0, ""},
+		{"StatusCreated", Const, 0, ""},
+		{"StatusEarlyHints", Const, 13, ""},
+		{"StatusExpectationFailed", Const, 0, ""},
+		{"StatusFailedDependency", Const, 7, ""},
+		{"StatusForbidden", Const, 0, ""},
+		{"StatusFound", Const, 0, ""},
+		{"StatusGatewayTimeout", Const, 0, ""},
+		{"StatusGone", Const, 0, ""},
+		{"StatusHTTPVersionNotSupported", Const, 0, ""},
+		{"StatusIMUsed", Const, 7, ""},
+		{"StatusInsufficientStorage", Const, 7, ""},
+		{"StatusInternalServerError", Const, 0, ""},
+		{"StatusLengthRequired", Const, 0, ""},
+		{"StatusLocked", Const, 7, ""},
+		{"StatusLoopDetected", Const, 7, ""},
+		{"StatusMethodNotAllowed", Const, 0, ""},
+		{"StatusMisdirectedRequest", Const, 11, ""},
+		{"StatusMovedPermanently", Const, 0, ""},
+		{"StatusMultiStatus", Const, 7, ""},
+		{"StatusMultipleChoices", Const, 0, ""},
+		{"StatusNetworkAuthenticationRequired", Const, 6, ""},
+		{"StatusNoContent", Const, 0, ""},
+		{"StatusNonAuthoritativeInfo", Const, 0, ""},
+		{"StatusNotAcceptable", Const, 0, ""},
+		{"StatusNotExtended", Const, 7, ""},
+		{"StatusNotFound", Const, 0, ""},
+		{"StatusNotImplemented", Const, 0, ""},
+		{"StatusNotModified", Const, 0, ""},
+		{"StatusOK", Const, 0, ""},
+		{"StatusPartialContent", Const, 0, ""},
+		{"StatusPaymentRequired", Const, 0, ""},
+		{"StatusPermanentRedirect", Const, 7, ""},
+		{"StatusPreconditionFailed", Const, 0, ""},
+		{"StatusPreconditionRequired", Const, 6, ""},
+		{"StatusProcessing", Const, 7, ""},
+		{"StatusProxyAuthRequired", Const, 0, ""},
+		{"StatusRequestEntityTooLarge", Const, 0, ""},
+		{"StatusRequestHeaderFieldsTooLarge", Const, 6, ""},
+		{"StatusRequestTimeout", Const, 0, ""},
+		{"StatusRequestURITooLong", Const, 0, ""},
+		{"StatusRequestedRangeNotSatisfiable", Const, 0, ""},
+		{"StatusResetContent", Const, 0, ""},
+		{"StatusSeeOther", Const, 0, ""},
+		{"StatusServiceUnavailable", Const, 0, ""},
+		{"StatusSwitchingProtocols", Const, 0, ""},
+		{"StatusTeapot", Const, 0, ""},
+		{"StatusTemporaryRedirect", Const, 0, ""},
+		{"StatusText", Func, 0, "func(code int) string"},
+		{"StatusTooEarly", Const, 12, ""},
+		{"StatusTooManyRequests", Const, 6, ""},
+		{"StatusUnauthorized", Const, 0, ""},
+		{"StatusUnavailableForLegalReasons", Const, 6, ""},
+		{"StatusUnprocessableEntity", Const, 7, ""},
+		{"StatusUnsupportedMediaType", Const, 0, ""},
+		{"StatusUpgradeRequired", Const, 7, ""},
+		{"StatusUseProxy", Const, 0, ""},
+		{"StatusVariantAlsoNegotiates", Const, 7, ""},
+		{"StripPrefix", Func, 0, "func(prefix string, h Handler) Handler"},
+		{"TimeFormat", Const, 0, ""},
+		{"TimeoutHandler", Func, 0, "func(h Handler, dt time.Duration, msg string) Handler"},
+		{"TrailerPrefix", Const, 8, ""},
+		{"Transport", Type, 0, ""},
+		{"Transport.Dial", Field, 0, ""},
+		{"Transport.DialContext", Field, 7, ""},
+		{"Transport.DialTLS", Field, 4, ""},
+		{"Transport.DialTLSContext", Field, 14, ""},
+		{"Transport.DisableCompression", Field, 0, ""},
+		{"Transport.DisableKeepAlives", Field, 0, ""},
+		{"Transport.ExpectContinueTimeout", Field, 6, ""},
+		{"Transport.ForceAttemptHTTP2", Field, 13, ""},
+		{"Transport.GetProxyConnectHeader", Field, 16, ""},
+		{"Transport.HTTP2", Field, 24, ""},
+		{"Transport.IdleConnTimeout", Field, 7, ""},
+		{"Transport.MaxConnsPerHost", Field, 11, ""},
+		{"Transport.MaxIdleConns", Field, 7, ""},
+		{"Transport.MaxIdleConnsPerHost", Field, 0, ""},
+		{"Transport.MaxResponseHeaderBytes", Field, 7, ""},
+		{"Transport.OnProxyConnectResponse", Field, 20, ""},
+		{"Transport.Protocols", Field, 24, ""},
+		{"Transport.Proxy", Field, 0, ""},
+		{"Transport.ProxyConnectHeader", Field, 8, ""},
+		{"Transport.ReadBufferSize", Field, 13, ""},
+		{"Transport.ResponseHeaderTimeout", Field, 1, ""},
+		{"Transport.TLSClientConfig", Field, 0, ""},
+		{"Transport.TLSHandshakeTimeout", Field, 3, ""},
+		{"Transport.TLSNextProto", Field, 6, ""},
+		{"Transport.WriteBufferSize", Field, 13, ""},
+	},
+	"net/http/cgi": {
+		{"(*Handler).ServeHTTP", Method, 0, ""},
+		{"Handler", Type, 0, ""},
+		{"Handler.Args", Field, 0, ""},
+		{"Handler.Dir", Field, 0, ""},
+		{"Handler.Env", Field, 0, ""},
+		{"Handler.InheritEnv", Field, 0, ""},
+		{"Handler.Logger", Field, 0, ""},
+		{"Handler.Path", Field, 0, ""},
+		{"Handler.PathLocationHandler", Field, 0, ""},
+		{"Handler.Root", Field, 0, ""},
+		{"Handler.Stderr", Field, 7, ""},
+		{"Request", Func, 0, "func() (*http.Request, error)"},
+		{"RequestFromMap", Func, 0, "func(params map[string]string) (*http.Request, error)"},
+		{"Serve", Func, 0, "func(handler http.Handler) error"},
+	},
+	"net/http/cookiejar": {
+		{"(*Jar).Cookies", Method, 1, ""},
+		{"(*Jar).SetCookies", Method, 1, ""},
+		{"Jar", Type, 1, ""},
+		{"New", Func, 1, "func(o *Options) (*Jar, error)"},
+		{"Options", Type, 1, ""},
+		{"Options.PublicSuffixList", Field, 1, ""},
+		{"PublicSuffixList", Type, 1, ""},
+	},
+	"net/http/fcgi": {
+		{"ErrConnClosed", Var, 5, ""},
+		{"ErrRequestAborted", Var, 5, ""},
+		{"ProcessEnv", Func, 9, "func(r *http.Request) map[string]string"},
+		{"Serve", Func, 0, "func(l net.Listener, handler http.Handler) error"},
+	},
+	"net/http/httptest": {
+		{"(*ResponseRecorder).Flush", Method, 0, ""},
+		{"(*ResponseRecorder).Header", Method, 0, ""},
+		{"(*ResponseRecorder).Result", Method, 7, ""},
+		{"(*ResponseRecorder).Write", Method, 0, ""},
+		{"(*ResponseRecorder).WriteHeader", Method, 0, ""},
+		{"(*ResponseRecorder).WriteString", Method, 6, ""},
+		{"(*Server).Certificate", Method, 9, ""},
+		{"(*Server).Client", Method, 9, ""},
+		{"(*Server).Close", Method, 0, ""},
+		{"(*Server).CloseClientConnections", Method, 0, ""},
+		{"(*Server).Start", Method, 0, ""},
+		{"(*Server).StartTLS", Method, 0, ""},
+		{"DefaultRemoteAddr", Const, 0, ""},
+		{"NewRecorder", Func, 0, "func() *ResponseRecorder"},
+		{"NewRequest", Func, 7, "func(method string, target string, body io.Reader) *http.Request"},
+		{"NewRequestWithContext", Func, 23, "func(ctx context.Context, method string, target string, body io.Reader) *http.Request"},
+		{"NewServer", Func, 0, "func(handler http.Handler) *Server"},
+		{"NewTLSServer", Func, 0, "func(handler http.Handler) *Server"},
+		{"NewUnstartedServer", Func, 0, "func(handler http.Handler) *Server"},
+		{"ResponseRecorder", Type, 0, ""},
+		{"ResponseRecorder.Body", Field, 0, ""},
+		{"ResponseRecorder.Code", Field, 0, ""},
+		{"ResponseRecorder.Flushed", Field, 0, ""},
+		{"ResponseRecorder.HeaderMap", Field, 0, ""},
+		{"Server", Type, 0, ""},
+		{"Server.Config", Field, 0, ""},
+		{"Server.EnableHTTP2", Field, 14, ""},
+		{"Server.Listener", Field, 0, ""},
+		{"Server.TLS", Field, 0, ""},
+		{"Server.URL", Field, 0, ""},
+	},
+	"net/http/httptrace": {
+		{"ClientTrace", Type, 7, ""},
+		{"ClientTrace.ConnectDone", Field, 7, ""},
+		{"ClientTrace.ConnectStart", Field, 7, ""},
+		{"ClientTrace.DNSDone", Field, 7, ""},
+		{"ClientTrace.DNSStart", Field, 7, ""},
+		{"ClientTrace.GetConn", Field, 7, ""},
+		{"ClientTrace.Got100Continue", Field, 7, ""},
+		{"ClientTrace.Got1xxResponse", Field, 11, ""},
+		{"ClientTrace.GotConn", Field, 7, ""},
+		{"ClientTrace.GotFirstResponseByte", Field, 7, ""},
+		{"ClientTrace.PutIdleConn", Field, 7, ""},
+		{"ClientTrace.TLSHandshakeDone", Field, 8, ""},
+		{"ClientTrace.TLSHandshakeStart", Field, 8, ""},
+		{"ClientTrace.Wait100Continue", Field, 7, ""},
+		{"ClientTrace.WroteHeaderField", Field, 11, ""},
+		{"ClientTrace.WroteHeaders", Field, 7, ""},
+		{"ClientTrace.WroteRequest", Field, 7, ""},
+		{"ContextClientTrace", Func, 7, "func(ctx context.Context) *ClientTrace"},
+		{"DNSDoneInfo", Type, 7, ""},
+		{"DNSDoneInfo.Addrs", Field, 7, ""},
+		{"DNSDoneInfo.Coalesced", Field, 7, ""},
+		{"DNSDoneInfo.Err", Field, 7, ""},
+		{"DNSStartInfo", Type, 7, ""},
+		{"DNSStartInfo.Host", Field, 7, ""},
+		{"GotConnInfo", Type, 7, ""},
+		{"GotConnInfo.Conn", Field, 7, ""},
+		{"GotConnInfo.IdleTime", Field, 7, ""},
+		{"GotConnInfo.Reused", Field, 7, ""},
+		{"GotConnInfo.WasIdle", Field, 7, ""},
+		{"WithClientTrace", Func, 7, "func(ctx context.Context, trace *ClientTrace) context.Context"},
+		{"WroteRequestInfo", Type, 7, ""},
+		{"WroteRequestInfo.Err", Field, 7, ""},
+	},
+	"net/http/httputil": {
+		{"(*ClientConn).Close", Method, 0, ""},
+		{"(*ClientConn).Do", Method, 0, ""},
+		{"(*ClientConn).Hijack", Method, 0, ""},
+		{"(*ClientConn).Pending", Method, 0, ""},
+		{"(*ClientConn).Read", Method, 0, ""},
+		{"(*ClientConn).Write", Method, 0, ""},
+		{"(*ProxyRequest).SetURL", Method, 20, ""},
+		{"(*ProxyRequest).SetXForwarded", Method, 20, ""},
+		{"(*ReverseProxy).ServeHTTP", Method, 0, ""},
+		{"(*ServerConn).Close", Method, 0, ""},
+		{"(*ServerConn).Hijack", Method, 0, ""},
+		{"(*ServerConn).Pending", Method, 0, ""},
+		{"(*ServerConn).Read", Method, 0, ""},
+		{"(*ServerConn).Write", Method, 0, ""},
+		{"BufferPool", Type, 6, ""},
+		{"ClientConn", Type, 0, ""},
+		{"DumpRequest", Func, 0, "func(req *http.Request, body bool) ([]byte, error)"},
+		{"DumpRequestOut", Func, 0, "func(req *http.Request, body bool) ([]byte, error)"},
+		{"DumpResponse", Func, 0, "func(resp *http.Response, body bool) ([]byte, error)"},
+		{"ErrClosed", Var, 0, ""},
+		{"ErrLineTooLong", Var, 0, ""},
+		{"ErrPersistEOF", Var, 0, ""},
+		{"ErrPipeline", Var, 0, ""},
+		{"NewChunkedReader", Func, 0, "func(r io.Reader) io.Reader"},
+		{"NewChunkedWriter", Func, 0, "func(w io.Writer) io.WriteCloser"},
+		{"NewClientConn", Func, 0, "func(c net.Conn, r *bufio.Reader) *ClientConn"},
+		{"NewProxyClientConn", Func, 0, "func(c net.Conn, r *bufio.Reader) *ClientConn"},
+		{"NewServerConn", Func, 0, "func(c net.Conn, r *bufio.Reader) *ServerConn"},
+		{"NewSingleHostReverseProxy", Func, 0, "func(target *url.URL) *ReverseProxy"},
+		{"ProxyRequest", Type, 20, ""},
+		{"ProxyRequest.In", Field, 20, ""},
+		{"ProxyRequest.Out", Field, 20, ""},
+		{"ReverseProxy", Type, 0, ""},
+		{"ReverseProxy.BufferPool", Field, 6, ""},
+		{"ReverseProxy.Director", Field, 0, ""},
+		{"ReverseProxy.ErrorHandler", Field, 11, ""},
+		{"ReverseProxy.ErrorLog", Field, 4, ""},
+		{"ReverseProxy.FlushInterval", Field, 0, ""},
+		{"ReverseProxy.ModifyResponse", Field, 8, ""},
+		{"ReverseProxy.Rewrite", Field, 20, ""},
+		{"ReverseProxy.Transport", Field, 0, ""},
+		{"ServerConn", Type, 0, ""},
+	},
+	"net/http/pprof": {
+		{"Cmdline", Func, 0, "func(w http.ResponseWriter, r *http.Request)"},
+		{"Handler", Func, 0, "func(name string) http.Handler"},
+		{"Index", Func, 0, "func(w http.ResponseWriter, r *http.Request)"},
+		{"Profile", Func, 0, "func(w http.ResponseWriter, r *http.Request)"},
+		{"Symbol", Func, 0, "func(w http.ResponseWriter, r *http.Request)"},
+		{"Trace", Func, 5, "func(w http.ResponseWriter, r *http.Request)"},
+	},
+	"net/mail": {
+		{"(*Address).String", Method, 0, ""},
+		{"(*AddressParser).Parse", Method, 5, ""},
+		{"(*AddressParser).ParseList", Method, 5, ""},
+		{"(Header).AddressList", Method, 0, ""},
+		{"(Header).Date", Method, 0, ""},
+		{"(Header).Get", Method, 0, ""},
+		{"Address", Type, 0, ""},
+		{"Address.Address", Field, 0, ""},
+		{"Address.Name", Field, 0, ""},
+		{"AddressParser", Type, 5, ""},
+		{"AddressParser.WordDecoder", Field, 5, ""},
+		{"ErrHeaderNotPresent", Var, 0, ""},
+		{"Header", Type, 0, ""},
+		{"Message", Type, 0, ""},
+		{"Message.Body", Field, 0, ""},
+		{"Message.Header", Field, 0, ""},
+		{"ParseAddress", Func, 1, "func(address string) (*Address, error)"},
+		{"ParseAddressList", Func, 1, "func(list string) ([]*Address, error)"},
+		{"ParseDate", Func, 8, "func(date string) (time.Time, error)"},
+		{"ReadMessage", Func, 0, "func(r io.Reader) (msg *Message, err error)"},
+	},
+	"net/netip": {
+		{"(*Addr).UnmarshalBinary", Method, 18, ""},
+		{"(*Addr).UnmarshalText", Method, 18, ""},
+		{"(*AddrPort).UnmarshalBinary", Method, 18, ""},
+		{"(*AddrPort).UnmarshalText", Method, 18, ""},
+		{"(*Prefix).UnmarshalBinary", Method, 18, ""},
+		{"(*Prefix).UnmarshalText", Method, 18, ""},
+		{"(Addr).AppendBinary", Method, 24, ""},
+		{"(Addr).AppendText", Method, 24, ""},
+		{"(Addr).AppendTo", Method, 18, ""},
+		{"(Addr).As16", Method, 18, ""},
+		{"(Addr).As4", Method, 18, ""},
+		{"(Addr).AsSlice", Method, 18, ""},
+		{"(Addr).BitLen", Method, 18, ""},
+		{"(Addr).Compare", Method, 18, ""},
+		{"(Addr).Is4", Method, 18, ""},
+		{"(Addr).Is4In6", Method, 18, ""},
+		{"(Addr).Is6", Method, 18, ""},
+		{"(Addr).IsGlobalUnicast", Method, 18, ""},
+		{"(Addr).IsInterfaceLocalMulticast", Method, 18, ""},
+		{"(Addr).IsLinkLocalMulticast", Method, 18, ""},
+		{"(Addr).IsLinkLocalUnicast", Method, 18, ""},
+		{"(Addr).IsLoopback", Method, 18, ""},
+		{"(Addr).IsMulticast", Method, 18, ""},
+		{"(Addr).IsPrivate", Method, 18, ""},
+		{"(Addr).IsUnspecified", Method, 18, ""},
+		{"(Addr).IsValid", Method, 18, ""},
+		{"(Addr).Less", Method, 18, ""},
+		{"(Addr).MarshalBinary", Method, 18, ""},
+		{"(Addr).MarshalText", Method, 18, ""},
+		{"(Addr).Next", Method, 18, ""},
+		{"(Addr).Prefix", Method, 18, ""},
+		{"(Addr).Prev", Method, 18, ""},
+		{"(Addr).String", Method, 18, ""},
+		{"(Addr).StringExpanded", Method, 18, ""},
+		{"(Addr).Unmap", Method, 18, ""},
+		{"(Addr).WithZone", Method, 18, ""},
+		{"(Addr).Zone", Method, 18, ""},
+		{"(AddrPort).Addr", Method, 18, ""},
+		{"(AddrPort).AppendBinary", Method, 24, ""},
+		{"(AddrPort).AppendText", Method, 24, ""},
+		{"(AddrPort).AppendTo", Method, 18, ""},
+		{"(AddrPort).Compare", Method, 22, ""},
+		{"(AddrPort).IsValid", Method, 18, ""},
+		{"(AddrPort).MarshalBinary", Method, 18, ""},
+		{"(AddrPort).MarshalText", Method, 18, ""},
+		{"(AddrPort).Port", Method, 18, ""},
+		{"(AddrPort).String", Method, 18, ""},
+		{"(Prefix).Addr", Method, 18, ""},
+		{"(Prefix).AppendBinary", Method, 24, ""},
+		{"(Prefix).AppendText", Method, 24, ""},
+		{"(Prefix).AppendTo", Method, 18, ""},
+		{"(Prefix).Bits", Method, 18, ""},
+		{"(Prefix).Compare", Method, 26, ""},
+		{"(Prefix).Contains", Method, 18, ""},
+		{"(Prefix).IsSingleIP", Method, 18, ""},
+		{"(Prefix).IsValid", Method, 18, ""},
+		{"(Prefix).MarshalBinary", Method, 18, ""},
+		{"(Prefix).MarshalText", Method, 18, ""},
+		{"(Prefix).Masked", Method, 18, ""},
+		{"(Prefix).Overlaps", Method, 18, ""},
+		{"(Prefix).String", Method, 18, ""},
+		{"Addr", Type, 18, ""},
+		{"AddrFrom16", Func, 18, "func(addr [16]byte) Addr"},
+		{"AddrFrom4", Func, 18, "func(addr [4]byte) Addr"},
+		{"AddrFromSlice", Func, 18, "func(slice []byte) (ip Addr, ok bool)"},
+		{"AddrPort", Type, 18, ""},
+		{"AddrPortFrom", Func, 18, "func(ip Addr, port uint16) AddrPort"},
+		{"IPv4Unspecified", Func, 18, "func() Addr"},
+		{"IPv6LinkLocalAllNodes", Func, 18, "func() Addr"},
+		{"IPv6LinkLocalAllRouters", Func, 20, "func() Addr"},
+		{"IPv6Loopback", Func, 20, "func() Addr"},
+		{"IPv6Unspecified", Func, 18, "func() Addr"},
+		{"MustParseAddr", Func, 18, "func(s string) Addr"},
+		{"MustParseAddrPort", Func, 18, "func(s string) AddrPort"},
+		{"MustParsePrefix", Func, 18, "func(s string) Prefix"},
+		{"ParseAddr", Func, 18, "func(s string) (Addr, error)"},
+		{"ParseAddrPort", Func, 18, "func(s string) (AddrPort, error)"},
+		{"ParsePrefix", Func, 18, "func(s string) (Prefix, error)"},
+		{"Prefix", Type, 18, ""},
+		{"PrefixFrom", Func, 18, "func(ip Addr, bits int) Prefix"},
+	},
+	"net/rpc": {
+		{"(*Client).Call", Method, 0, ""},
+		{"(*Client).Close", Method, 0, ""},
+		{"(*Client).Go", Method, 0, ""},
+		{"(*Server).Accept", Method, 0, ""},
+		{"(*Server).HandleHTTP", Method, 0, ""},
+		{"(*Server).Register", Method, 0, ""},
+		{"(*Server).RegisterName", Method, 0, ""},
+		{"(*Server).ServeCodec", Method, 0, ""},
+		{"(*Server).ServeConn", Method, 0, ""},
+		{"(*Server).ServeHTTP", Method, 0, ""},
+		{"(*Server).ServeRequest", Method, 0, ""},
+		{"(ServerError).Error", Method, 0, ""},
+		{"Accept", Func, 0, "func(lis net.Listener)"},
+		{"Call", Type, 0, ""},
+		{"Call.Args", Field, 0, ""},
+		{"Call.Done", Field, 0, ""},
+		{"Call.Error", Field, 0, ""},
+		{"Call.Reply", Field, 0, ""},
+		{"Call.ServiceMethod", Field, 0, ""},
+		{"Client", Type, 0, ""},
+		{"ClientCodec", Type, 0, ""},
+		{"DefaultDebugPath", Const, 0, ""},
+		{"DefaultRPCPath", Const, 0, ""},
+		{"DefaultServer", Var, 0, ""},
+		{"Dial", Func, 0, "func(network string, address string) (*Client, error)"},
+		{"DialHTTP", Func, 0, "func(network string, address string) (*Client, error)"},
+		{"DialHTTPPath", Func, 0, "func(network string, address string, path string) (*Client, error)"},
+		{"ErrShutdown", Var, 0, ""},
+		{"HandleHTTP", Func, 0, "func()"},
+		{"NewClient", Func, 0, "func(conn io.ReadWriteCloser) *Client"},
+		{"NewClientWithCodec", Func, 0, "func(codec ClientCodec) *Client"},
+		{"NewServer", Func, 0, "func() *Server"},
+		{"Register", Func, 0, "func(rcvr any) error"},
+		{"RegisterName", Func, 0, "func(name string, rcvr any) error"},
+		{"Request", Type, 0, ""},
+		{"Request.Seq", Field, 0, ""},
+		{"Request.ServiceMethod", Field, 0, ""},
+		{"Response", Type, 0, ""},
+		{"Response.Error", Field, 0, ""},
+		{"Response.Seq", Field, 0, ""},
+		{"Response.ServiceMethod", Field, 0, ""},
+		{"ServeCodec", Func, 0, "func(codec ServerCodec)"},
+		{"ServeConn", Func, 0, "func(conn io.ReadWriteCloser)"},
+		{"ServeRequest", Func, 0, "func(codec ServerCodec) error"},
+		{"Server", Type, 0, ""},
+		{"ServerCodec", Type, 0, ""},
+		{"ServerError", Type, 0, ""},
+	},
+	"net/rpc/jsonrpc": {
+		{"Dial", Func, 0, "func(network string, address string) (*rpc.Client, error)"},
+		{"NewClient", Func, 0, "func(conn io.ReadWriteCloser) *rpc.Client"},
+		{"NewClientCodec", Func, 0, "func(conn io.ReadWriteCloser) rpc.ClientCodec"},
+		{"NewServerCodec", Func, 0, "func(conn io.ReadWriteCloser) rpc.ServerCodec"},
+		{"ServeConn", Func, 0, "func(conn io.ReadWriteCloser)"},
+	},
+	"net/smtp": {
+		{"(*Client).Auth", Method, 0, ""},
+		{"(*Client).Close", Method, 2, ""},
+		{"(*Client).Data", Method, 0, ""},
+		{"(*Client).Extension", Method, 0, ""},
+		{"(*Client).Hello", Method, 1, ""},
+		{"(*Client).Mail", Method, 0, ""},
+		{"(*Client).Noop", Method, 10, ""},
+		{"(*Client).Quit", Method, 0, ""},
+		{"(*Client).Rcpt", Method, 0, ""},
+		{"(*Client).Reset", Method, 0, ""},
+		{"(*Client).StartTLS", Method, 0, ""},
+		{"(*Client).TLSConnectionState", Method, 5, ""},
+		{"(*Client).Verify", Method, 0, ""},
+		{"Auth", Type, 0, ""},
+		{"CRAMMD5Auth", Func, 0, "func(username string, secret string) Auth"},
+		{"Client", Type, 0, ""},
+		{"Client.Text", Field, 0, ""},
+		{"Dial", Func, 0, "func(addr string) (*Client, error)"},
+		{"NewClient", Func, 0, "func(conn net.Conn, host string) (*Client, error)"},
+		{"PlainAuth", Func, 0, "func(identity string, username string, password string, host string) Auth"},
+		{"SendMail", Func, 0, "func(addr string, a Auth, from string, to []string, msg []byte) error"},
+		{"ServerInfo", Type, 0, ""},
+		{"ServerInfo.Auth", Field, 0, ""},
+		{"ServerInfo.Name", Field, 0, ""},
+		{"ServerInfo.TLS", Field, 0, ""},
+	},
+	"net/textproto": {
+		{"(*Conn).Close", Method, 0, ""},
+		{"(*Conn).Cmd", Method, 0, ""},
+		{"(*Conn).DotReader", Method, 0, ""},
+		{"(*Conn).DotWriter", Method, 0, ""},
+		{"(*Conn).EndRequest", Method, 0, ""},
+		{"(*Conn).EndResponse", Method, 0, ""},
+		{"(*Conn).Next", Method, 0, ""},
+		{"(*Conn).PrintfLine", Method, 0, ""},
+		{"(*Conn).ReadCodeLine", Method, 0, ""},
+		{"(*Conn).ReadContinuedLine", Method, 0, ""},
+		{"(*Conn).ReadContinuedLineBytes", Method, 0, ""},
+		{"(*Conn).ReadDotBytes", Method, 0, ""},
+		{"(*Conn).ReadDotLines", Method, 0, ""},
+		{"(*Conn).ReadLine", Method, 0, ""},
+		{"(*Conn).ReadLineBytes", Method, 0, ""},
+		{"(*Conn).ReadMIMEHeader", Method, 0, ""},
+		{"(*Conn).ReadResponse", Method, 0, ""},
+		{"(*Conn).StartRequest", Method, 0, ""},
+		{"(*Conn).StartResponse", Method, 0, ""},
+		{"(*Error).Error", Method, 0, ""},
+		{"(*Pipeline).EndRequest", Method, 0, ""},
+		{"(*Pipeline).EndResponse", Method, 0, ""},
+		{"(*Pipeline).Next", Method, 0, ""},
+		{"(*Pipeline).StartRequest", Method, 0, ""},
+		{"(*Pipeline).StartResponse", Method, 0, ""},
+		{"(*Reader).DotReader", Method, 0, ""},
+		{"(*Reader).ReadCodeLine", Method, 0, ""},
+		{"(*Reader).ReadContinuedLine", Method, 0, ""},
+		{"(*Reader).ReadContinuedLineBytes", Method, 0, ""},
+		{"(*Reader).ReadDotBytes", Method, 0, ""},
+		{"(*Reader).ReadDotLines", Method, 0, ""},
+		{"(*Reader).ReadLine", Method, 0, ""},
+		{"(*Reader).ReadLineBytes", Method, 0, ""},
+		{"(*Reader).ReadMIMEHeader", Method, 0, ""},
+		{"(*Reader).ReadResponse", Method, 0, ""},
+		{"(*Writer).DotWriter", Method, 0, ""},
+		{"(*Writer).PrintfLine", Method, 0, ""},
+		{"(MIMEHeader).Add", Method, 0, ""},
+		{"(MIMEHeader).Del", Method, 0, ""},
+		{"(MIMEHeader).Get", Method, 0, ""},
+		{"(MIMEHeader).Set", Method, 0, ""},
+		{"(MIMEHeader).Values", Method, 14, ""},
+		{"(ProtocolError).Error", Method, 0, ""},
+		{"CanonicalMIMEHeaderKey", Func, 0, "func(s string) string"},
+		{"Conn", Type, 0, ""},
+		{"Conn.Pipeline", Field, 0, ""},
+		{"Conn.Reader", Field, 0, ""},
+		{"Conn.Writer", Field, 0, ""},
+		{"Dial", Func, 0, "func(network string, addr string) (*Conn, error)"},
+		{"Error", Type, 0, ""},
+		{"Error.Code", Field, 0, ""},
+		{"Error.Msg", Field, 0, ""},
+		{"MIMEHeader", Type, 0, ""},
+		{"NewConn", Func, 0, "func(conn io.ReadWriteCloser) *Conn"},
+		{"NewReader", Func, 0, "func(r *bufio.Reader) *Reader"},
+		{"NewWriter", Func, 0, "func(w *bufio.Writer) *Writer"},
+		{"Pipeline", Type, 0, ""},
+		{"ProtocolError", Type, 0, ""},
+		{"Reader", Type, 0, ""},
+		{"Reader.R", Field, 0, ""},
+		{"TrimBytes", Func, 1, "func(b []byte) []byte"},
+		{"TrimString", Func, 1, "func(s string) string"},
+		{"Writer", Type, 0, ""},
+		{"Writer.W", Field, 0, ""},
+	},
+	"net/url": {
+		{"(*Error).Error", Method, 0, ""},
+		{"(*Error).Temporary", Method, 6, ""},
+		{"(*Error).Timeout", Method, 6, ""},
+		{"(*Error).Unwrap", Method, 13, ""},
+		{"(*URL).AppendBinary", Method, 24, ""},
+		{"(*URL).EscapedFragment", Method, 15, ""},
+		{"(*URL).EscapedPath", Method, 5, ""},
+		{"(*URL).Hostname", Method, 8, ""},
+		{"(*URL).IsAbs", Method, 0, ""},
+		{"(*URL).JoinPath", Method, 19, ""},
+		{"(*URL).MarshalBinary", Method, 8, ""},
+		{"(*URL).Parse", Method, 0, ""},
+		{"(*URL).Port", Method, 8, ""},
+		{"(*URL).Query", Method, 0, ""},
+		{"(*URL).Redacted", Method, 15, ""},
+		{"(*URL).RequestURI", Method, 0, ""},
+		{"(*URL).ResolveReference", Method, 0, ""},
+		{"(*URL).String", Method, 0, ""},
+		{"(*URL).UnmarshalBinary", Method, 8, ""},
+		{"(*Userinfo).Password", Method, 0, ""},
+		{"(*Userinfo).String", Method, 0, ""},
+		{"(*Userinfo).Username", Method, 0, ""},
+		{"(EscapeError).Error", Method, 0, ""},
+		{"(InvalidHostError).Error", Method, 6, ""},
+		{"(Values).Add", Method, 0, ""},
+		{"(Values).Del", Method, 0, ""},
+		{"(Values).Encode", Method, 0, ""},
+		{"(Values).Get", Method, 0, ""},
+		{"(Values).Has", Method, 17, ""},
+		{"(Values).Set", Method, 0, ""},
+		{"Error", Type, 0, ""},
+		{"Error.Err", Field, 0, ""},
+		{"Error.Op", Field, 0, ""},
+		{"Error.URL", Field, 0, ""},
+		{"EscapeError", Type, 0, ""},
+		{"InvalidHostError", Type, 6, ""},
+		{"JoinPath", Func, 19, "func(base string, elem ...string) (result string, err error)"},
+		{"Parse", Func, 0, "func(rawURL string) (*URL, error)"},
+		{"ParseQuery", Func, 0, "func(query string) (Values, error)"},
+		{"ParseRequestURI", Func, 0, "func(rawURL string) (*URL, error)"},
+		{"PathEscape", Func, 8, "func(s string) string"},
+		{"PathUnescape", Func, 8, "func(s string) (string, error)"},
+		{"QueryEscape", Func, 0, "func(s string) string"},
+		{"QueryUnescape", Func, 0, "func(s string) (string, error)"},
+		{"URL", Type, 0, ""},
+		{"URL.ForceQuery", Field, 7, ""},
+		{"URL.Fragment", Field, 0, ""},
+		{"URL.Host", Field, 0, ""},
+		{"URL.OmitHost", Field, 19, ""},
+		{"URL.Opaque", Field, 0, ""},
+		{"URL.Path", Field, 0, ""},
+		{"URL.RawFragment", Field, 15, ""},
+		{"URL.RawPath", Field, 5, ""},
+		{"URL.RawQuery", Field, 0, ""},
+		{"URL.Scheme", Field, 0, ""},
+		{"URL.User", Field, 0, ""},
+		{"User", Func, 0, "func(username string) *Userinfo"},
+		{"UserPassword", Func, 0, "func(username string, password string) *Userinfo"},
+		{"Userinfo", Type, 0, ""},
+		{"Values", Type, 0, ""},
+	},
+	"os": {
+		{"(*File).Chdir", Method, 0, ""},
+		{"(*File).Chmod", Method, 0, ""},
+		{"(*File).Chown", Method, 0, ""},
+		{"(*File).Close", Method, 0, ""},
+		{"(*File).Fd", Method, 0, ""},
+		{"(*File).Name", Method, 0, ""},
+		{"(*File).Read", Method, 0, ""},
+		{"(*File).ReadAt", Method, 0, ""},
+		{"(*File).ReadDir", Method, 16, ""},
+		{"(*File).ReadFrom", Method, 15, ""},
+		{"(*File).Readdir", Method, 0, ""},
+		{"(*File).Readdirnames", Method, 0, ""},
+		{"(*File).Seek", Method, 0, ""},
+		{"(*File).SetDeadline", Method, 10, ""},
+		{"(*File).SetReadDeadline", Method, 10, ""},
+		{"(*File).SetWriteDeadline", Method, 10, ""},
+		{"(*File).Stat", Method, 0, ""},
+		{"(*File).Sync", Method, 0, ""},
+		{"(*File).SyscallConn", Method, 12, ""},
+		{"(*File).Truncate", Method, 0, ""},
+		{"(*File).Write", Method, 0, ""},
+		{"(*File).WriteAt", Method, 0, ""},
+		{"(*File).WriteString", Method, 0, ""},
+		{"(*File).WriteTo", Method, 22, ""},
+		{"(*LinkError).Error", Method, 0, ""},
+		{"(*LinkError).Unwrap", Method, 13, ""},
+		{"(*PathError).Error", Method, 0, ""},
+		{"(*PathError).Timeout", Method, 10, ""},
+		{"(*PathError).Unwrap", Method, 13, ""},
+		{"(*Process).Kill", Method, 0, ""},
+		{"(*Process).Release", Method, 0, ""},
+		{"(*Process).Signal", Method, 0, ""},
+		{"(*Process).Wait", Method, 0, ""},
+		{"(*Process).WithHandle", Method, 26, ""},
+		{"(*ProcessState).ExitCode", Method, 12, ""},
+		{"(*ProcessState).Exited", Method, 0, ""},
+		{"(*ProcessState).Pid", Method, 0, ""},
+		{"(*ProcessState).String", Method, 0, ""},
+		{"(*ProcessState).Success", Method, 0, ""},
+		{"(*ProcessState).Sys", Method, 0, ""},
+		{"(*ProcessState).SysUsage", Method, 0, ""},
+		{"(*ProcessState).SystemTime", Method, 0, ""},
+		{"(*ProcessState).UserTime", Method, 0, ""},
+		{"(*Root).Chmod", Method, 25, ""},
+		{"(*Root).Chown", Method, 25, ""},
+		{"(*Root).Chtimes", Method, 25, ""},
+		{"(*Root).Close", Method, 24, ""},
+		{"(*Root).Create", Method, 24, ""},
+		{"(*Root).FS", Method, 24, ""},
+		{"(*Root).Lchown", Method, 25, ""},
+		{"(*Root).Link", Method, 25, ""},
+		{"(*Root).Lstat", Method, 24, ""},
+		{"(*Root).Mkdir", Method, 24, ""},
+		{"(*Root).MkdirAll", Method, 25, ""},
+		{"(*Root).Name", Method, 24, ""},
+		{"(*Root).Open", Method, 24, ""},
+		{"(*Root).OpenFile", Method, 24, ""},
+		{"(*Root).OpenRoot", Method, 24, ""},
+		{"(*Root).ReadFile", Method, 25, ""},
+		{"(*Root).Readlink", Method, 25, ""},
+		{"(*Root).Remove", Method, 24, ""},
+		{"(*Root).RemoveAll", Method, 25, ""},
+		{"(*Root).Rename", Method, 25, ""},
+		{"(*Root).Stat", Method, 24, ""},
+		{"(*Root).Symlink", Method, 25, ""},
+		{"(*Root).WriteFile", Method, 25, ""},
+		{"(*SyscallError).Error", Method, 0, ""},
+		{"(*SyscallError).Timeout", Method, 10, ""},
+		{"(*SyscallError).Unwrap", Method, 13, ""},
+		{"(FileMode).IsDir", Method, 0, ""},
+		{"(FileMode).IsRegular", Method, 1, ""},
+		{"(FileMode).Perm", Method, 0, ""},
+		{"(FileMode).String", Method, 0, ""},
+		{"Args", Var, 0, ""},
+		{"Chdir", Func, 0, "func(dir string) error"},
+		{"Chmod", Func, 0, "func(name string, mode FileMode) error"},
+		{"Chown", Func, 0, "func(name string, uid int, gid int) error"},
+		{"Chtimes", Func, 0, "func(name string, atime time.Time, mtime time.Time) error"},
+		{"Clearenv", Func, 0, "func()"},
+		{"CopyFS", Func, 23, "func(dir string, fsys fs.FS) error"},
+		{"Create", Func, 0, "func(name string) (*File, error)"},
+		{"CreateTemp", Func, 16, "func(dir string, pattern string) (*File, error)"},
+		{"DevNull", Const, 0, ""},
+		{"DirEntry", Type, 16, ""},
+		{"DirFS", Func, 16, "func(dir string) fs.FS"},
+		{"Environ", Func, 0, "func() []string"},
+		{"ErrClosed", Var, 8, ""},
+		{"ErrDeadlineExceeded", Var, 15, ""},
+		{"ErrExist", Var, 0, ""},
+		{"ErrInvalid", Var, 0, ""},
+		{"ErrNoDeadline", Var, 10, ""},
+		{"ErrNoHandle", Var, 26, ""},
+		{"ErrNotExist", Var, 0, ""},
+		{"ErrPermission", Var, 0, ""},
+		{"ErrProcessDone", Var, 16, ""},
+		{"Executable", Func, 8, "func() (string, error)"},
+		{"Exit", Func, 0, "func(code int)"},
+		{"Expand", Func, 0, "func(s string, mapping func(string) string) string"},
+		{"ExpandEnv", Func, 0, "func(s string) string"},
+		{"File", Type, 0, ""},
+		{"FileInfo", Type, 0, ""},
+		{"FileMode", Type, 0, ""},
+		{"FindProcess", Func, 0, "func(pid int) (*Process, error)"},
+		{"Getegid", Func, 0, "func() int"},
+		{"Getenv", Func, 0, "func(key string) string"},
+		{"Geteuid", Func, 0, "func() int"},
+		{"Getgid", Func, 0, "func() int"},
+		{"Getgroups", Func, 0, "func() ([]int, error)"},
+		{"Getpagesize", Func, 0, "func() int"},
+		{"Getpid", Func, 0, "func() int"},
+		{"Getppid", Func, 0, "func() int"},
+		{"Getuid", Func, 0, "func() int"},
+		{"Getwd", Func, 0, "func() (dir string, err error)"},
+		{"Hostname", Func, 0, "func() (name string, err error)"},
+		{"Interrupt", Var, 0, ""},
+		{"IsExist", Func, 0, "func(err error) bool"},
+		{"IsNotExist", Func, 0, "func(err error) bool"},
+		{"IsPathSeparator", Func, 0, "func(c uint8) bool"},
+		{"IsPermission", Func, 0, "func(err error) bool"},
+		{"IsTimeout", Func, 10, "func(err error) bool"},
+		{"Kill", Var, 0, ""},
+		{"Lchown", Func, 0, "func(name string, uid int, gid int) error"},
+		{"Link", Func, 0, "func(oldname string, newname string) error"},
+		{"LinkError", Type, 0, ""},
+		{"LinkError.Err", Field, 0, ""},
+		{"LinkError.New", Field, 0, ""},
+		{"LinkError.Old", Field, 0, ""},
+		{"LinkError.Op", Field, 0, ""},
+		{"LookupEnv", Func, 5, "func(key string) (string, bool)"},
+		{"Lstat", Func, 0, "func(name string) (FileInfo, error)"},
+		{"Mkdir", Func, 0, "func(name string, perm FileMode) error"},
+		{"MkdirAll", Func, 0, "func(path string, perm FileMode) error"},
+		{"MkdirTemp", Func, 16, "func(dir string, pattern string) (string, error)"},
+		{"ModeAppend", Const, 0, ""},
+		{"ModeCharDevice", Const, 0, ""},
+		{"ModeDevice", Const, 0, ""},
+		{"ModeDir", Const, 0, ""},
+		{"ModeExclusive", Const, 0, ""},
+		{"ModeIrregular", Const, 11, ""},
+		{"ModeNamedPipe", Const, 0, ""},
+		{"ModePerm", Const, 0, ""},
+		{"ModeSetgid", Const, 0, ""},
+		{"ModeSetuid", Const, 0, ""},
+		{"ModeSocket", Const, 0, ""},
+		{"ModeSticky", Const, 0, ""},
+		{"ModeSymlink", Const, 0, ""},
+		{"ModeTemporary", Const, 0, ""},
+		{"ModeType", Const, 0, ""},
+		{"NewFile", Func, 0, "func(fd uintptr, name string) *File"},
+		{"NewSyscallError", Func, 0, "func(syscall string, err error) error"},
+		{"O_APPEND", Const, 0, ""},
+		{"O_CREATE", Const, 0, ""},
+		{"O_EXCL", Const, 0, ""},
+		{"O_RDONLY", Const, 0, ""},
+		{"O_RDWR", Const, 0, ""},
+		{"O_SYNC", Const, 0, ""},
+		{"O_TRUNC", Const, 0, ""},
+		{"O_WRONLY", Const, 0, ""},
+		{"Open", Func, 0, "func(name string) (*File, error)"},
+		{"OpenFile", Func, 0, "func(name string, flag int, perm FileMode) (*File, error)"},
+		{"OpenInRoot", Func, 24, "func(dir string, name string) (*File, error)"},
+		{"OpenRoot", Func, 24, "func(name string) (*Root, error)"},
+		{"PathError", Type, 0, ""},
+		{"PathError.Err", Field, 0, ""},
+		{"PathError.Op", Field, 0, ""},
+		{"PathError.Path", Field, 0, ""},
+		{"PathListSeparator", Const, 0, ""},
+		{"PathSeparator", Const, 0, ""},
+		{"Pipe", Func, 0, "func() (r *File, w *File, err error)"},
+		{"ProcAttr", Type, 0, ""},
+		{"ProcAttr.Dir", Field, 0, ""},
+		{"ProcAttr.Env", Field, 0, ""},
+		{"ProcAttr.Files", Field, 0, ""},
+		{"ProcAttr.Sys", Field, 0, ""},
+		{"Process", Type, 0, ""},
+		{"Process.Pid", Field, 0, ""},
+		{"ProcessState", Type, 0, ""},
+		{"ReadDir", Func, 16, "func(name string) ([]DirEntry, error)"},
+		{"ReadFile", Func, 16, "func(name string) ([]byte, error)"},
+		{"Readlink", Func, 0, "func(name string) (string, error)"},
+		{"Remove", Func, 0, "func(name string) error"},
+		{"RemoveAll", Func, 0, "func(path string) error"},
+		{"Rename", Func, 0, "func(oldpath string, newpath string) error"},
+		{"Root", Type, 24, ""},
+		{"SEEK_CUR", Const, 0, ""},
+		{"SEEK_END", Const, 0, ""},
+		{"SEEK_SET", Const, 0, ""},
+		{"SameFile", Func, 0, "func(fi1 FileInfo, fi2 FileInfo) bool"},
+		{"Setenv", Func, 0, "func(key string, value string) error"},
+		{"Signal", Type, 0, ""},
+		{"StartProcess", Func, 0, "func(name string, argv []string, attr *ProcAttr) (*Process, error)"},
+		{"Stat", Func, 0, "func(name string) (FileInfo, error)"},
+		{"Stderr", Var, 0, ""},
+		{"Stdin", Var, 0, ""},
+		{"Stdout", Var, 0, ""},
+		{"Symlink", Func, 0, "func(oldname string, newname string) error"},
+		{"SyscallError", Type, 0, ""},
+		{"SyscallError.Err", Field, 0, ""},
+		{"SyscallError.Syscall", Field, 0, ""},
+		{"TempDir", Func, 0, "func() string"},
+		{"Truncate", Func, 0, "func(name string, size int64) error"},
+		{"Unsetenv", Func, 4, "func(key string) error"},
+		{"UserCacheDir", Func, 11, "func() (string, error)"},
+		{"UserConfigDir", Func, 13, "func() (string, error)"},
+		{"UserHomeDir", Func, 12, "func() (string, error)"},
+		{"WriteFile", Func, 16, "func(name string, data []byte, perm FileMode) error"},
+	},
+	"os/exec": {
+		{"(*Cmd).CombinedOutput", Method, 0, ""},
+		{"(*Cmd).Environ", Method, 19, ""},
+		{"(*Cmd).Output", Method, 0, ""},
+		{"(*Cmd).Run", Method, 0, ""},
+		{"(*Cmd).Start", Method, 0, ""},
+		{"(*Cmd).StderrPipe", Method, 0, ""},
+		{"(*Cmd).StdinPipe", Method, 0, ""},
+		{"(*Cmd).StdoutPipe", Method, 0, ""},
+		{"(*Cmd).String", Method, 13, ""},
+		{"(*Cmd).Wait", Method, 0, ""},
+		{"(*Error).Error", Method, 0, ""},
+		{"(*Error).Unwrap", Method, 13, ""},
+		{"(*ExitError).Error", Method, 0, ""},
+		{"(ExitError).ExitCode", Method, 12, ""},
+		{"(ExitError).Exited", Method, 0, ""},
+		{"(ExitError).Pid", Method, 0, ""},
+		{"(ExitError).String", Method, 0, ""},
+		{"(ExitError).Success", Method, 0, ""},
+		{"(ExitError).Sys", Method, 0, ""},
+		{"(ExitError).SysUsage", Method, 0, ""},
+		{"(ExitError).SystemTime", Method, 0, ""},
+		{"(ExitError).UserTime", Method, 0, ""},
+		{"Cmd", Type, 0, ""},
+		{"Cmd.Args", Field, 0, ""},
+		{"Cmd.Cancel", Field, 20, ""},
+		{"Cmd.Dir", Field, 0, ""},
+		{"Cmd.Env", Field, 0, ""},
+		{"Cmd.Err", Field, 19, ""},
+		{"Cmd.ExtraFiles", Field, 0, ""},
+		{"Cmd.Path", Field, 0, ""},
+		{"Cmd.Process", Field, 0, ""},
+		{"Cmd.ProcessState", Field, 0, ""},
+		{"Cmd.Stderr", Field, 0, ""},
+		{"Cmd.Stdin", Field, 0, ""},
+		{"Cmd.Stdout", Field, 0, ""},
+		{"Cmd.SysProcAttr", Field, 0, ""},
+		{"Cmd.WaitDelay", Field, 20, ""},
+		{"Command", Func, 0, "func(name string, arg ...string) *Cmd"},
+		{"CommandContext", Func, 7, "func(ctx context.Context, name string, arg ...string) *Cmd"},
+		{"ErrDot", Var, 19, ""},
+		{"ErrNotFound", Var, 0, ""},
+		{"ErrWaitDelay", Var, 20, ""},
+		{"Error", Type, 0, ""},
+		{"Error.Err", Field, 0, ""},
+		{"Error.Name", Field, 0, ""},
+		{"ExitError", Type, 0, ""},
+		{"ExitError.ProcessState", Field, 0, ""},
+		{"ExitError.Stderr", Field, 6, ""},
+		{"LookPath", Func, 0, "func(file string) (string, error)"},
+	},
+	"os/signal": {
+		{"Ignore", Func, 5, "func(sig ...os.Signal)"},
+		{"Ignored", Func, 11, "func(sig os.Signal) bool"},
+		{"Notify", Func, 0, "func(c chan<- os.Signal, sig ...os.Signal)"},
+		{"NotifyContext", Func, 16, "func(parent context.Context, signals ...os.Signal) (ctx context.Context, stop context.CancelFunc)"},
+		{"Reset", Func, 5, "func(sig ...os.Signal)"},
+		{"Stop", Func, 1, "func(c chan<- os.Signal)"},
+	},
+	"os/user": {
+		{"(*User).GroupIds", Method, 7, ""},
+		{"(UnknownGroupError).Error", Method, 7, ""},
+		{"(UnknownGroupIdError).Error", Method, 7, ""},
+		{"(UnknownUserError).Error", Method, 0, ""},
+		{"(UnknownUserIdError).Error", Method, 0, ""},
+		{"Current", Func, 0, "func() (*User, error)"},
+		{"Group", Type, 7, ""},
+		{"Group.Gid", Field, 7, ""},
+		{"Group.Name", Field, 7, ""},
+		{"Lookup", Func, 0, "func(username string) (*User, error)"},
+		{"LookupGroup", Func, 7, "func(name string) (*Group, error)"},
+		{"LookupGroupId", Func, 7, "func(gid string) (*Group, error)"},
+		{"LookupId", Func, 0, "func(uid string) (*User, error)"},
+		{"UnknownGroupError", Type, 7, ""},
+		{"UnknownGroupIdError", Type, 7, ""},
+		{"UnknownUserError", Type, 0, ""},
+		{"UnknownUserIdError", Type, 0, ""},
+		{"User", Type, 0, ""},
+		{"User.Gid", Field, 0, ""},
+		{"User.HomeDir", Field, 0, ""},
+		{"User.Name", Field, 0, ""},
+		{"User.Uid", Field, 0, ""},
+		{"User.Username", Field, 0, ""},
+	},
+	"path": {
+		{"Base", Func, 0, "func(path string) string"},
+		{"Clean", Func, 0, "func(path string) string"},
+		{"Dir", Func, 0, "func(path string) string"},
+		{"ErrBadPattern", Var, 0, ""},
+		{"Ext", Func, 0, "func(path string) string"},
+		{"IsAbs", Func, 0, "func(path string) bool"},
+		{"Join", Func, 0, "func(elem ...string) string"},
+		{"Match", Func, 0, "func(pattern string, name string) (matched bool, err error)"},
+		{"Split", Func, 0, "func(path string) (dir string, file string)"},
+	},
+	"path/filepath": {
+		{"Abs", Func, 0, "func(path string) (string, error)"},
+		{"Base", Func, 0, "func(path string) string"},
+		{"Clean", Func, 0, "func(path string) string"},
+		{"Dir", Func, 0, "func(path string) string"},
+		{"ErrBadPattern", Var, 0, ""},
+		{"EvalSymlinks", Func, 0, "func(path string) (string, error)"},
+		{"Ext", Func, 0, "func(path string) string"},
+		{"FromSlash", Func, 0, "func(path string) string"},
+		{"Glob", Func, 0, "func(pattern string) (matches []string, err error)"},
+		{"HasPrefix", Func, 0, "func(p string, prefix string) bool"},
+		{"IsAbs", Func, 0, "func(path string) bool"},
+		{"IsLocal", Func, 20, "func(path string) bool"},
+		{"Join", Func, 0, "func(elem ...string) string"},
+		{"ListSeparator", Const, 0, ""},
+		{"Localize", Func, 23, "func(path string) (string, error)"},
+		{"Match", Func, 0, "func(pattern string, name string) (matched bool, err error)"},
+		{"Rel", Func, 0, "func(basePath string, targPath string) (string, error)"},
+		{"Separator", Const, 0, ""},
+		{"SkipAll", Var, 20, ""},
+		{"SkipDir", Var, 0, ""},
+		{"Split", Func, 0, "func(path string) (dir string, file string)"},
+		{"SplitList", Func, 0, "func(path string) []string"},
+		{"ToSlash", Func, 0, "func(path string) string"},
+		{"VolumeName", Func, 0, "func(path string) string"},
+		{"Walk", Func, 0, "func(root string, fn WalkFunc) error"},
+		{"WalkDir", Func, 16, "func(root string, fn fs.WalkDirFunc) error"},
+		{"WalkFunc", Type, 0, ""},
+	},
+	"plugin": {
+		{"(*Plugin).Lookup", Method, 8, ""},
+		{"Open", Func, 8, "func(path string) (*Plugin, error)"},
+		{"Plugin", Type, 8, ""},
+		{"Symbol", Type, 8, ""},
+	},
+	"reflect": {
+		{"(*MapIter).Key", Method, 12, ""},
+		{"(*MapIter).Next", Method, 12, ""},
+		{"(*MapIter).Reset", Method, 18, ""},
+		{"(*MapIter).Value", Method, 12, ""},
+		{"(*ValueError).Error", Method, 0, ""},
+		{"(ChanDir).String", Method, 0, ""},
+		{"(Kind).String", Method, 0, ""},
+		{"(Method).IsExported", Method, 17, ""},
+		{"(StructField).IsExported", Method, 17, ""},
+		{"(StructTag).Get", Method, 0, ""},
+		{"(StructTag).Lookup", Method, 7, ""},
+		{"(Value).Addr", Method, 0, ""},
+		{"(Value).Bool", Method, 0, ""},
+		{"(Value).Bytes", Method, 0, ""},
+		{"(Value).Call", Method, 0, ""},
+		{"(Value).CallSlice", Method, 0, ""},
+		{"(Value).CanAddr", Method, 0, ""},
+		{"(Value).CanComplex", Method, 18, ""},
+		{"(Value).CanConvert", Method, 17, ""},
+		{"(Value).CanFloat", Method, 18, ""},
+		{"(Value).CanInt", Method, 18, ""},
+		{"(Value).CanInterface", Method, 0, ""},
+		{"(Value).CanSet", Method, 0, ""},
+		{"(Value).CanUint", Method, 18, ""},
+		{"(Value).Cap", Method, 0, ""},
+		{"(Value).Clear", Method, 21, ""},
+		{"(Value).Close", Method, 0, ""},
+		{"(Value).Comparable", Method, 20, ""},
+		{"(Value).Complex", Method, 0, ""},
+		{"(Value).Convert", Method, 1, ""},
+		{"(Value).Elem", Method, 0, ""},
+		{"(Value).Equal", Method, 20, ""},
+		{"(Value).Field", Method, 0, ""},
+		{"(Value).FieldByIndex", Method, 0, ""},
+		{"(Value).FieldByIndexErr", Method, 18, ""},
+		{"(Value).FieldByName", Method, 0, ""},
+		{"(Value).FieldByNameFunc", Method, 0, ""},
+		{"(Value).Float", Method, 0, ""},
+		{"(Value).Grow", Method, 20, ""},
+		{"(Value).Index", Method, 0, ""},
+		{"(Value).Int", Method, 0, ""},
+		{"(Value).Interface", Method, 0, ""},
+		{"(Value).InterfaceData", Method, 0, ""},
+		{"(Value).IsNil", Method, 0, ""},
+		{"(Value).IsValid", Method, 0, ""},
+		{"(Value).IsZero", Method, 13, ""},
+		{"(Value).Kind", Method, 0, ""},
+		{"(Value).Len", Method, 0, ""},
+		{"(Value).MapIndex", Method, 0, ""},
+		{"(Value).MapKeys", Method, 0, ""},
+		{"(Value).MapRange", Method, 12, ""},
+		{"(Value).Method", Method, 0, ""},
+		{"(Value).MethodByName", Method, 0, ""},
+		{"(Value).NumField", Method, 0, ""},
+		{"(Value).NumMethod", Method, 0, ""},
+		{"(Value).OverflowComplex", Method, 0, ""},
+		{"(Value).OverflowFloat", Method, 0, ""},
+		{"(Value).OverflowInt", Method, 0, ""},
+		{"(Value).OverflowUint", Method, 0, ""},
+		{"(Value).Pointer", Method, 0, ""},
+		{"(Value).Recv", Method, 0, ""},
+		{"(Value).Send", Method, 0, ""},
+		{"(Value).Seq", Method, 23, ""},
+		{"(Value).Seq2", Method, 23, ""},
+		{"(Value).Set", Method, 0, ""},
+		{"(Value).SetBool", Method, 0, ""},
+		{"(Value).SetBytes", Method, 0, ""},
+		{"(Value).SetCap", Method, 2, ""},
+		{"(Value).SetComplex", Method, 0, ""},
+		{"(Value).SetFloat", Method, 0, ""},
+		{"(Value).SetInt", Method, 0, ""},
+		{"(Value).SetIterKey", Method, 18, ""},
+		{"(Value).SetIterValue", Method, 18, ""},
+		{"(Value).SetLen", Method, 0, ""},
+		{"(Value).SetMapIndex", Method, 0, ""},
+		{"(Value).SetPointer", Method, 0, ""},
+		{"(Value).SetString", Method, 0, ""},
+		{"(Value).SetUint", Method, 0, ""},
+		{"(Value).SetZero", Method, 20, ""},
+		{"(Value).Slice", Method, 0, ""},
+		{"(Value).Slice3", Method, 2, ""},
+		{"(Value).String", Method, 0, ""},
+		{"(Value).TryRecv", Method, 0, ""},
+		{"(Value).TrySend", Method, 0, ""},
+		{"(Value).Type", Method, 0, ""},
+		{"(Value).Uint", Method, 0, ""},
+		{"(Value).UnsafeAddr", Method, 0, ""},
+		{"(Value).UnsafePointer", Method, 18, ""},
+		{"Append", Func, 0, "func(s Value, x ...Value) Value"},
+		{"AppendSlice", Func, 0, "func(s Value, t Value) Value"},
+		{"Array", Const, 0, ""},
+		{"ArrayOf", Func, 5, "func(length int, elem Type) Type"},
+		{"Bool", Const, 0, ""},
+		{"BothDir", Const, 0, ""},
+		{"Chan", Const, 0, ""},
+		{"ChanDir", Type, 0, ""},
+		{"ChanOf", Func, 1, "func(dir ChanDir, t Type) Type"},
+		{"Complex128", Const, 0, ""},
+		{"Complex64", Const, 0, ""},
+		{"Copy", Func, 0, "func(dst Value, src Value) int"},
+		{"DeepEqual", Func, 0, "func(x any, y any) bool"},
+		{"Float32", Const, 0, ""},
+		{"Float64", Const, 0, ""},
+		{"Func", Const, 0, ""},
+		{"FuncOf", Func, 5, "func(in []Type, out []Type, variadic bool) Type"},
+		{"Indirect", Func, 0, "func(v Value) Value"},
+		{"Int", Const, 0, ""},
+		{"Int16", Const, 0, ""},
+		{"Int32", Const, 0, ""},
+		{"Int64", Const, 0, ""},
+		{"Int8", Const, 0, ""},
+		{"Interface", Const, 0, ""},
+		{"Invalid", Const, 0, ""},
+		{"Kind", Type, 0, ""},
+		{"MakeChan", Func, 0, "func(typ Type, buffer int) Value"},
+		{"MakeFunc", Func, 1, "func(typ Type, fn func(args []Value) (results []Value)) Value"},
+		{"MakeMap", Func, 0, "func(typ Type) Value"},
+		{"MakeMapWithSize", Func, 9, "func(typ Type, n int) Value"},
+		{"MakeSlice", Func, 0, "func(typ Type, len int, cap int) Value"},
+		{"Map", Const, 0, ""},
+		{"MapIter", Type, 12, ""},
+		{"MapOf", Func, 1, "func(key Type, elem Type) Type"},
+		{"Method", Type, 0, ""},
+		{"Method.Func", Field, 0, ""},
+		{"Method.Index", Field, 0, ""},
+		{"Method.Name", Field, 0, ""},
+		{"Method.PkgPath", Field, 0, ""},
+		{"Method.Type", Field, 0, ""},
+		{"New", Func, 0, "func(typ Type) Value"},
+		{"NewAt", Func, 0, "func(typ Type, p unsafe.Pointer) Value"},
+		{"Pointer", Const, 18, ""},
+		{"PointerTo", Func, 18, "func(t Type) Type"},
+		{"Ptr", Const, 0, ""},
+		{"PtrTo", Func, 0, "func(t Type) Type"},
+		{"RecvDir", Const, 0, ""},
+		{"Select", Func, 1, "func(cases []SelectCase) (chosen int, recv Value, recvOK bool)"},
+		{"SelectCase", Type, 1, ""},
+		{"SelectCase.Chan", Field, 1, ""},
+		{"SelectCase.Dir", Field, 1, ""},
+		{"SelectCase.Send", Field, 1, ""},
+		{"SelectDefault", Const, 1, ""},
+		{"SelectDir", Type, 1, ""},
+		{"SelectRecv", Const, 1, ""},
+		{"SelectSend", Const, 1, ""},
+		{"SendDir", Const, 0, ""},
+		{"Slice", Const, 0, ""},
+		{"SliceAt", Func, 23, "func(typ Type, p unsafe.Pointer, n int) Value"},
+		{"SliceHeader", Type, 0, ""},
+		{"SliceHeader.Cap", Field, 0, ""},
+		{"SliceHeader.Data", Field, 0, ""},
+		{"SliceHeader.Len", Field, 0, ""},
+		{"SliceOf", Func, 1, "func(t Type) Type"},
+		{"String", Const, 0, ""},
+		{"StringHeader", Type, 0, ""},
+		{"StringHeader.Data", Field, 0, ""},
+		{"StringHeader.Len", Field, 0, ""},
+		{"Struct", Const, 0, ""},
+		{"StructField", Type, 0, ""},
+		{"StructField.Anonymous", Field, 0, ""},
+		{"StructField.Index", Field, 0, ""},
+		{"StructField.Name", Field, 0, ""},
+		{"StructField.Offset", Field, 0, ""},
+		{"StructField.PkgPath", Field, 0, ""},
+		{"StructField.Tag", Field, 0, ""},
+		{"StructField.Type", Field, 0, ""},
+		{"StructOf", Func, 7, "func(fields []StructField) Type"},
+		{"StructTag", Type, 0, ""},
+		{"Swapper", Func, 8, "func(slice any) func(i int, j int)"},
+		{"Type", Type, 0, ""},
+		{"TypeAssert", Func, 25, "func[T any](v Value) (T, bool)"},
+		{"TypeFor", Func, 22, "func[T any]() Type"},
+		{"TypeOf", Func, 0, "func(i any) Type"},
+		{"Uint", Const, 0, ""},
+		{"Uint16", Const, 0, ""},
+		{"Uint32", Const, 0, ""},
+		{"Uint64", Const, 0, ""},
+		{"Uint8", Const, 0, ""},
+		{"Uintptr", Const, 0, ""},
+		{"UnsafePointer", Const, 0, ""},
+		{"Value", Type, 0, ""},
+		{"ValueError", Type, 0, ""},
+		{"ValueError.Kind", Field, 0, ""},
+		{"ValueError.Method", Field, 0, ""},
+		{"ValueOf", Func, 0, "func(i any) Value"},
+		{"VisibleFields", Func, 17, "func(t Type) []StructField"},
+		{"Zero", Func, 0, "func(typ Type) Value"},
+	},
+	"regexp": {
+		{"(*Regexp).AppendText", Method, 24, ""},
+		{"(*Regexp).Copy", Method, 6, ""},
+		{"(*Regexp).Expand", Method, 0, ""},
+		{"(*Regexp).ExpandString", Method, 0, ""},
+		{"(*Regexp).Find", Method, 0, ""},
+		{"(*Regexp).FindAll", Method, 0, ""},
+		{"(*Regexp).FindAllIndex", Method, 0, ""},
+		{"(*Regexp).FindAllString", Method, 0, ""},
+		{"(*Regexp).FindAllStringIndex", Method, 0, ""},
+		{"(*Regexp).FindAllStringSubmatch", Method, 0, ""},
+		{"(*Regexp).FindAllStringSubmatchIndex", Method, 0, ""},
+		{"(*Regexp).FindAllSubmatch", Method, 0, ""},
+		{"(*Regexp).FindAllSubmatchIndex", Method, 0, ""},
+		{"(*Regexp).FindIndex", Method, 0, ""},
+		{"(*Regexp).FindReaderIndex", Method, 0, ""},
+		{"(*Regexp).FindReaderSubmatchIndex", Method, 0, ""},
+		{"(*Regexp).FindString", Method, 0, ""},
+		{"(*Regexp).FindStringIndex", Method, 0, ""},
+		{"(*Regexp).FindStringSubmatch", Method, 0, ""},
+		{"(*Regexp).FindStringSubmatchIndex", Method, 0, ""},
+		{"(*Regexp).FindSubmatch", Method, 0, ""},
+		{"(*Regexp).FindSubmatchIndex", Method, 0, ""},
+		{"(*Regexp).LiteralPrefix", Method, 0, ""},
+		{"(*Regexp).Longest", Method, 1, ""},
+		{"(*Regexp).MarshalText", Method, 21, ""},
+		{"(*Regexp).Match", Method, 0, ""},
+		{"(*Regexp).MatchReader", Method, 0, ""},
+		{"(*Regexp).MatchString", Method, 0, ""},
+		{"(*Regexp).NumSubexp", Method, 0, ""},
+		{"(*Regexp).ReplaceAll", Method, 0, ""},
+		{"(*Regexp).ReplaceAllFunc", Method, 0, ""},
+		{"(*Regexp).ReplaceAllLiteral", Method, 0, ""},
+		{"(*Regexp).ReplaceAllLiteralString", Method, 0, ""},
+		{"(*Regexp).ReplaceAllString", Method, 0, ""},
+		{"(*Regexp).ReplaceAllStringFunc", Method, 0, ""},
+		{"(*Regexp).Split", Method, 1, ""},
+		{"(*Regexp).String", Method, 0, ""},
+		{"(*Regexp).SubexpIndex", Method, 15, ""},
+		{"(*Regexp).SubexpNames", Method, 0, ""},
+		{"(*Regexp).UnmarshalText", Method, 21, ""},
+		{"Compile", Func, 0, "func(expr string) (*Regexp, error)"},
+		{"CompilePOSIX", Func, 0, "func(expr string) (*Regexp, error)"},
+		{"Match", Func, 0, "func(pattern string, b []byte) (matched bool, err error)"},
+		{"MatchReader", Func, 0, "func(pattern string, r io.RuneReader) (matched bool, err error)"},
+		{"MatchString", Func, 0, "func(pattern string, s string) (matched bool, err error)"},
+		{"MustCompile", Func, 0, "func(str string) *Regexp"},
+		{"MustCompilePOSIX", Func, 0, "func(str string) *Regexp"},
+		{"QuoteMeta", Func, 0, "func(s string) string"},
+		{"Regexp", Type, 0, ""},
+	},
+	"regexp/syntax": {
+		{"(*Error).Error", Method, 0, ""},
+		{"(*Inst).MatchEmptyWidth", Method, 0, ""},
+		{"(*Inst).MatchRune", Method, 0, ""},
+		{"(*Inst).MatchRunePos", Method, 3, ""},
+		{"(*Inst).String", Method, 0, ""},
+		{"(*Prog).Prefix", Method, 0, ""},
+		{"(*Prog).StartCond", Method, 0, ""},
+		{"(*Prog).String", Method, 0, ""},
+		{"(*Regexp).CapNames", Method, 0, ""},
+		{"(*Regexp).Equal", Method, 0, ""},
+		{"(*Regexp).MaxCap", Method, 0, ""},
+		{"(*Regexp).Simplify", Method, 0, ""},
+		{"(*Regexp).String", Method, 0, ""},
+		{"(ErrorCode).String", Method, 0, ""},
+		{"(InstOp).String", Method, 3, ""},
+		{"(Op).String", Method, 11, ""},
+		{"ClassNL", Const, 0, ""},
+		{"Compile", Func, 0, "func(re *Regexp) (*Prog, error)"},
+		{"DotNL", Const, 0, ""},
+		{"EmptyBeginLine", Const, 0, ""},
+		{"EmptyBeginText", Const, 0, ""},
+		{"EmptyEndLine", Const, 0, ""},
+		{"EmptyEndText", Const, 0, ""},
+		{"EmptyNoWordBoundary", Const, 0, ""},
+		{"EmptyOp", Type, 0, ""},
+		{"EmptyOpContext", Func, 0, "func(r1 rune, r2 rune) EmptyOp"},
+		{"EmptyWordBoundary", Const, 0, ""},
+		{"ErrInternalError", Const, 0, ""},
+		{"ErrInvalidCharClass", Const, 0, ""},
+		{"ErrInvalidCharRange", Const, 0, ""},
+		{"ErrInvalidEscape", Const, 0, ""},
+		{"ErrInvalidNamedCapture", Const, 0, ""},
+		{"ErrInvalidPerlOp", Const, 0, ""},
+		{"ErrInvalidRepeatOp", Const, 0, ""},
+		{"ErrInvalidRepeatSize", Const, 0, ""},
+		{"ErrInvalidUTF8", Const, 0, ""},
+		{"ErrLarge", Const, 20, ""},
+		{"ErrMissingBracket", Const, 0, ""},
+		{"ErrMissingParen", Const, 0, ""},
+		{"ErrMissingRepeatArgument", Const, 0, ""},
+		{"ErrNestingDepth", Const, 19, ""},
+		{"ErrTrailingBackslash", Const, 0, ""},
+		{"ErrUnexpectedParen", Const, 1, ""},
+		{"Error", Type, 0, ""},
+		{"Error.Code", Field, 0, ""},
+		{"Error.Expr", Field, 0, ""},
+		{"ErrorCode", Type, 0, ""},
+		{"Flags", Type, 0, ""},
+		{"FoldCase", Const, 0, ""},
+		{"Inst", Type, 0, ""},
+		{"Inst.Arg", Field, 0, ""},
+		{"Inst.Op", Field, 0, ""},
+		{"Inst.Out", Field, 0, ""},
+		{"Inst.Rune", Field, 0, ""},
+		{"InstAlt", Const, 0, ""},
+		{"InstAltMatch", Const, 0, ""},
+		{"InstCapture", Const, 0, ""},
+		{"InstEmptyWidth", Const, 0, ""},
+		{"InstFail", Const, 0, ""},
+		{"InstMatch", Const, 0, ""},
+		{"InstNop", Const, 0, ""},
+		{"InstOp", Type, 0, ""},
+		{"InstRune", Const, 0, ""},
+		{"InstRune1", Const, 0, ""},
+		{"InstRuneAny", Const, 0, ""},
+		{"InstRuneAnyNotNL", Const, 0, ""},
+		{"IsWordChar", Func, 0, "func(r rune) bool"},
+		{"Literal", Const, 0, ""},
+		{"MatchNL", Const, 0, ""},
+		{"NonGreedy", Const, 0, ""},
+		{"OneLine", Const, 0, ""},
+		{"Op", Type, 0, ""},
+		{"OpAlternate", Const, 0, ""},
+		{"OpAnyChar", Const, 0, ""},
+		{"OpAnyCharNotNL", Const, 0, ""},
+		{"OpBeginLine", Const, 0, ""},
+		{"OpBeginText", Const, 0, ""},
+		{"OpCapture", Const, 0, ""},
+		{"OpCharClass", Const, 0, ""},
+		{"OpConcat", Const, 0, ""},
+		{"OpEmptyMatch", Const, 0, ""},
+		{"OpEndLine", Const, 0, ""},
+		{"OpEndText", Const, 0, ""},
+		{"OpLiteral", Const, 0, ""},
+		{"OpNoMatch", Const, 0, ""},
+		{"OpNoWordBoundary", Const, 0, ""},
+		{"OpPlus", Const, 0, ""},
+		{"OpQuest", Const, 0, ""},
+		{"OpRepeat", Const, 0, ""},
+		{"OpStar", Const, 0, ""},
+		{"OpWordBoundary", Const, 0, ""},
+		{"POSIX", Const, 0, ""},
+		{"Parse", Func, 0, "func(s string, flags Flags) (*Regexp, error)"},
+		{"Perl", Const, 0, ""},
+		{"PerlX", Const, 0, ""},
+		{"Prog", Type, 0, ""},
+		{"Prog.Inst", Field, 0, ""},
+		{"Prog.NumCap", Field, 0, ""},
+		{"Prog.Start", Field, 0, ""},
+		{"Regexp", Type, 0, ""},
+		{"Regexp.Cap", Field, 0, ""},
+		{"Regexp.Flags", Field, 0, ""},
+		{"Regexp.Max", Field, 0, ""},
+		{"Regexp.Min", Field, 0, ""},
+		{"Regexp.Name", Field, 0, ""},
+		{"Regexp.Op", Field, 0, ""},
+		{"Regexp.Rune", Field, 0, ""},
+		{"Regexp.Rune0", Field, 0, ""},
+		{"Regexp.Sub", Field, 0, ""},
+		{"Regexp.Sub0", Field, 0, ""},
+		{"Simple", Const, 0, ""},
+		{"UnicodeGroups", Const, 0, ""},
+		{"WasDollar", Const, 0, ""},
+	},
+	"runtime": {
+		{"(*BlockProfileRecord).Stack", Method, 1, ""},
+		{"(*Frames).Next", Method, 7, ""},
+		{"(*Func).Entry", Method, 0, ""},
+		{"(*Func).FileLine", Method, 0, ""},
+		{"(*Func).Name", Method, 0, ""},
+		{"(*MemProfileRecord).InUseBytes", Method, 0, ""},
+		{"(*MemProfileRecord).InUseObjects", Method, 0, ""},
+		{"(*MemProfileRecord).Stack", Method, 0, ""},
+		{"(*PanicNilError).Error", Method, 21, ""},
+		{"(*PanicNilError).RuntimeError", Method, 21, ""},
+		{"(*Pinner).Pin", Method, 21, ""},
+		{"(*Pinner).Unpin", Method, 21, ""},
+		{"(*StackRecord).Stack", Method, 0, ""},
+		{"(*TypeAssertionError).Error", Method, 0, ""},
+		{"(*TypeAssertionError).RuntimeError", Method, 0, ""},
+		{"(Cleanup).Stop", Method, 24, ""},
+		{"AddCleanup", Func, 24, "func[T, S any](ptr *T, cleanup func(S), arg S) Cleanup"},
+		{"BlockProfile", Func, 1, "func(p []BlockProfileRecord) (n int, ok bool)"},
+		{"BlockProfileRecord", Type, 1, ""},
+		{"BlockProfileRecord.Count", Field, 1, ""},
+		{"BlockProfileRecord.Cycles", Field, 1, ""},
+		{"BlockProfileRecord.StackRecord", Field, 1, ""},
+		{"Breakpoint", Func, 0, "func()"},
+		{"CPUProfile", Func, 0, "func() []byte"},
+		{"Caller", Func, 0, "func(skip int) (pc uintptr, file string, line int, ok bool)"},
+		{"Callers", Func, 0, "func(skip int, pc []uintptr) int"},
+		{"CallersFrames", Func, 7, "func(callers []uintptr) *Frames"},
+		{"Cleanup", Type, 24, ""},
+		{"Compiler", Const, 0, ""},
+		{"Error", Type, 0, ""},
+		{"Frame", Type, 7, ""},
+		{"Frame.Entry", Field, 7, ""},
+		{"Frame.File", Field, 7, ""},
+		{"Frame.Func", Field, 7, ""},
+		{"Frame.Function", Field, 7, ""},
+		{"Frame.Line", Field, 7, ""},
+		{"Frame.PC", Field, 7, ""},
+		{"Frames", Type, 7, ""},
+		{"Func", Type, 0, ""},
+		{"FuncForPC", Func, 0, "func(pc uintptr) *Func"},
+		{"GC", Func, 0, "func()"},
+		{"GOARCH", Const, 0, ""},
+		{"GOMAXPROCS", Func, 0, "func(n int) int"},
+		{"GOOS", Const, 0, ""},
+		{"GOROOT", Func, 0, "func() string"},
+		{"Goexit", Func, 0, "func()"},
+		{"GoroutineProfile", Func, 0, "func(p []StackRecord) (n int, ok bool)"},
+		{"Gosched", Func, 0, "func()"},
+		{"KeepAlive", Func, 7, "func(x any)"},
+		{"LockOSThread", Func, 0, "func()"},
+		{"MemProfile", Func, 0, "func(p []MemProfileRecord, inuseZero bool) (n int, ok bool)"},
+		{"MemProfileRate", Var, 0, ""},
+		{"MemProfileRecord", Type, 0, ""},
+		{"MemProfileRecord.AllocBytes", Field, 0, ""},
+		{"MemProfileRecord.AllocObjects", Field, 0, ""},
+		{"MemProfileRecord.FreeBytes", Field, 0, ""},
+		{"MemProfileRecord.FreeObjects", Field, 0, ""},
+		{"MemProfileRecord.Stack0", Field, 0, ""},
+		{"MemStats", Type, 0, ""},
+		{"MemStats.Alloc", Field, 0, ""},
+		{"MemStats.BuckHashSys", Field, 0, ""},
+		{"MemStats.BySize", Field, 0, ""},
+		{"MemStats.DebugGC", Field, 0, ""},
+		{"MemStats.EnableGC", Field, 0, ""},
+		{"MemStats.Frees", Field, 0, ""},
+		{"MemStats.GCCPUFraction", Field, 5, ""},
+		{"MemStats.GCSys", Field, 2, ""},
+		{"MemStats.HeapAlloc", Field, 0, ""},
+		{"MemStats.HeapIdle", Field, 0, ""},
+		{"MemStats.HeapInuse", Field, 0, ""},
+		{"MemStats.HeapObjects", Field, 0, ""},
+		{"MemStats.HeapReleased", Field, 0, ""},
+		{"MemStats.HeapSys", Field, 0, ""},
+		{"MemStats.LastGC", Field, 0, ""},
+		{"MemStats.Lookups", Field, 0, ""},
+		{"MemStats.MCacheInuse", Field, 0, ""},
+		{"MemStats.MCacheSys", Field, 0, ""},
+		{"MemStats.MSpanInuse", Field, 0, ""},
+		{"MemStats.MSpanSys", Field, 0, ""},
+		{"MemStats.Mallocs", Field, 0, ""},
+		{"MemStats.NextGC", Field, 0, ""},
+		{"MemStats.NumForcedGC", Field, 8, ""},
+		{"MemStats.NumGC", Field, 0, ""},
+		{"MemStats.OtherSys", Field, 2, ""},
+		{"MemStats.PauseEnd", Field, 4, ""},
+		{"MemStats.PauseNs", Field, 0, ""},
+		{"MemStats.PauseTotalNs", Field, 0, ""},
+		{"MemStats.StackInuse", Field, 0, ""},
+		{"MemStats.StackSys", Field, 0, ""},
+		{"MemStats.Sys", Field, 0, ""},
+		{"MemStats.TotalAlloc", Field, 0, ""},
+		{"MutexProfile", Func, 8, "func(p []BlockProfileRecord) (n int, ok bool)"},
+		{"NumCPU", Func, 0, "func() int"},
+		{"NumCgoCall", Func, 0, "func() int64"},
+		{"NumGoroutine", Func, 0, "func() int"},
+		{"PanicNilError", Type, 21, ""},
+		{"Pinner", Type, 21, ""},
+		{"ReadMemStats", Func, 0, "func(m *MemStats)"},
+		{"ReadTrace", Func, 5, "func() (buf []byte)"},
+		{"SetBlockProfileRate", Func, 1, "func(rate int)"},
+		{"SetCPUProfileRate", Func, 0, "func(hz int)"},
+		{"SetCgoTraceback", Func, 7, "func(version int, traceback unsafe.Pointer, context unsafe.Pointer, symbolizer unsafe.Pointer)"},
+		{"SetDefaultGOMAXPROCS", Func, 25, "func()"},
+		{"SetFinalizer", Func, 0, "func(obj any, finalizer any)"},
+		{"SetMutexProfileFraction", Func, 8, "func(rate int) int"},
+		{"Stack", Func, 0, "func(buf []byte, all bool) int"},
+		{"StackRecord", Type, 0, ""},
+		{"StackRecord.Stack0", Field, 0, ""},
+		{"StartTrace", Func, 5, "func() error"},
+		{"StopTrace", Func, 5, "func()"},
+		{"ThreadCreateProfile", Func, 0, "func(p []StackRecord) (n int, ok bool)"},
+		{"TypeAssertionError", Type, 0, ""},
+		{"UnlockOSThread", Func, 0, "func()"},
+		{"Version", Func, 0, "func() string"},
+	},
+	"runtime/cgo": {
+		{"(Handle).Delete", Method, 17, ""},
+		{"(Handle).Value", Method, 17, ""},
+		{"Handle", Type, 17, ""},
+		{"Incomplete", Type, 20, ""},
+		{"NewHandle", Func, 17, ""},
+	},
+	"runtime/coverage": {
+		{"ClearCounters", Func, 20, "func() error"},
+		{"WriteCounters", Func, 20, "func(w io.Writer) error"},
+		{"WriteCountersDir", Func, 20, "func(dir string) error"},
+		{"WriteMeta", Func, 20, "func(w io.Writer) error"},
+		{"WriteMetaDir", Func, 20, "func(dir string) error"},
+	},
+	"runtime/debug": {
+		{"(*BuildInfo).String", Method, 18, ""},
+		{"BuildInfo", Type, 12, ""},
+		{"BuildInfo.Deps", Field, 12, ""},
+		{"BuildInfo.GoVersion", Field, 18, ""},
+		{"BuildInfo.Main", Field, 12, ""},
+		{"BuildInfo.Path", Field, 12, ""},
+		{"BuildInfo.Settings", Field, 18, ""},
+		{"BuildSetting", Type, 18, ""},
+		{"BuildSetting.Key", Field, 18, ""},
+		{"BuildSetting.Value", Field, 18, ""},
+		{"CrashOptions", Type, 23, ""},
+		{"FreeOSMemory", Func, 1, "func()"},
+		{"GCStats", Type, 1, ""},
+		{"GCStats.LastGC", Field, 1, ""},
+		{"GCStats.NumGC", Field, 1, ""},
+		{"GCStats.Pause", Field, 1, ""},
+		{"GCStats.PauseEnd", Field, 4, ""},
+		{"GCStats.PauseQuantiles", Field, 1, ""},
+		{"GCStats.PauseTotal", Field, 1, ""},
+		{"Module", Type, 12, ""},
+		{"Module.Path", Field, 12, ""},
+		{"Module.Replace", Field, 12, ""},
+		{"Module.Sum", Field, 12, ""},
+		{"Module.Version", Field, 12, ""},
+		{"ParseBuildInfo", Func, 18, "func(data string) (bi *BuildInfo, err error)"},
+		{"PrintStack", Func, 0, "func()"},
+		{"ReadBuildInfo", Func, 12, "func() (info *BuildInfo, ok bool)"},
+		{"ReadGCStats", Func, 1, "func(stats *GCStats)"},
+		{"SetCrashOutput", Func, 23, "func(f *os.File, opts CrashOptions) error"},
+		{"SetGCPercent", Func, 1, "func(percent int) int"},
+		{"SetMaxStack", Func, 2, "func(bytes int) int"},
+		{"SetMaxThreads", Func, 2, "func(threads int) int"},
+		{"SetMemoryLimit", Func, 19, "func(limit int64) int64"},
+		{"SetPanicOnFault", Func, 3, "func(enabled bool) bool"},
+		{"SetTraceback", Func, 6, "func(level string)"},
+		{"Stack", Func, 0, "func() []byte"},
+		{"WriteHeapDump", Func, 3, "func(fd uintptr)"},
+	},
+	"runtime/metrics": {
+		{"(Value).Float64", Method, 16, ""},
+		{"(Value).Float64Histogram", Method, 16, ""},
+		{"(Value).Kind", Method, 16, ""},
+		{"(Value).Uint64", Method, 16, ""},
+		{"All", Func, 16, "func() []Description"},
+		{"Description", Type, 16, ""},
+		{"Description.Cumulative", Field, 16, ""},
+		{"Description.Description", Field, 16, ""},
+		{"Description.Kind", Field, 16, ""},
+		{"Description.Name", Field, 16, ""},
+		{"Float64Histogram", Type, 16, ""},
+		{"Float64Histogram.Buckets", Field, 16, ""},
+		{"Float64Histogram.Counts", Field, 16, ""},
+		{"KindBad", Const, 16, ""},
+		{"KindFloat64", Const, 16, ""},
+		{"KindFloat64Histogram", Const, 16, ""},
+		{"KindUint64", Const, 16, ""},
+		{"Read", Func, 16, "func(m []Sample)"},
+		{"Sample", Type, 16, ""},
+		{"Sample.Name", Field, 16, ""},
+		{"Sample.Value", Field, 16, ""},
+		{"Value", Type, 16, ""},
+		{"ValueKind", Type, 16, ""},
+	},
+	"runtime/pprof": {
+		{"(*Profile).Add", Method, 0, ""},
+		{"(*Profile).Count", Method, 0, ""},
+		{"(*Profile).Name", Method, 0, ""},
+		{"(*Profile).Remove", Method, 0, ""},
+		{"(*Profile).WriteTo", Method, 0, ""},
+		{"Do", Func, 9, "func(ctx context.Context, labels LabelSet, f func(context.Context))"},
+		{"ForLabels", Func, 9, "func(ctx context.Context, f func(key string, value string) bool)"},
+		{"Label", Func, 9, "func(ctx context.Context, key string) (string, bool)"},
+		{"LabelSet", Type, 9, ""},
+		{"Labels", Func, 9, "func(args ...string) LabelSet"},
+		{"Lookup", Func, 0, "func(name string) *Profile"},
+		{"NewProfile", Func, 0, "func(name string) *Profile"},
+		{"Profile", Type, 0, ""},
+		{"Profiles", Func, 0, "func() []*Profile"},
+		{"SetGoroutineLabels", Func, 9, "func(ctx context.Context)"},
+		{"StartCPUProfile", Func, 0, "func(w io.Writer) error"},
+		{"StopCPUProfile", Func, 0, "func()"},
+		{"WithLabels", Func, 9, "func(ctx context.Context, labels LabelSet) context.Context"},
+		{"WriteHeapProfile", Func, 0, "func(w io.Writer) error"},
+	},
+	"runtime/trace": {
+		{"(*FlightRecorder).Enabled", Method, 25, ""},
+		{"(*FlightRecorder).Start", Method, 25, ""},
+		{"(*FlightRecorder).Stop", Method, 25, ""},
+		{"(*FlightRecorder).WriteTo", Method, 25, ""},
+		{"(*Region).End", Method, 11, ""},
+		{"(*Task).End", Method, 11, ""},
+		{"FlightRecorder", Type, 25, ""},
+		{"FlightRecorderConfig", Type, 25, ""},
+		{"FlightRecorderConfig.MaxBytes", Field, 25, ""},
+		{"FlightRecorderConfig.MinAge", Field, 25, ""},
+		{"IsEnabled", Func, 11, "func() bool"},
+		{"Log", Func, 11, "func(ctx context.Context, category string, message string)"},
+		{"Logf", Func, 11, "func(ctx context.Context, category string, format string, args ...any)"},
+		{"NewFlightRecorder", Func, 25, "func(cfg FlightRecorderConfig) *FlightRecorder"},
+		{"NewTask", Func, 11, "func(pctx context.Context, taskType string) (ctx context.Context, task *Task)"},
+		{"Region", Type, 11, ""},
+		{"Start", Func, 5, "func(w io.Writer) error"},
+		{"StartRegion", Func, 11, "func(ctx context.Context, regionType string) *Region"},
+		{"Stop", Func, 5, "func()"},
+		{"Task", Type, 11, ""},
+		{"WithRegion", Func, 11, "func(ctx context.Context, regionType string, fn func())"},
+	},
+	"slices": {
+		{"All", Func, 23, "func[Slice ~[]E, E any](s Slice) iter.Seq2[int, E]"},
+		{"AppendSeq", Func, 23, "func[Slice ~[]E, E any](s Slice, seq iter.Seq[E]) Slice"},
+		{"Backward", Func, 23, "func[Slice ~[]E, E any](s Slice) iter.Seq2[int, E]"},
+		{"BinarySearch", Func, 21, "func[S ~[]E, E cmp.Ordered](x S, target E) (int, bool)"},
+		{"BinarySearchFunc", Func, 21, "func[S ~[]E, E, T any](x S, target T, cmp func(E, T) int) (int, bool)"},
+		{"Chunk", Func, 23, "func[Slice ~[]E, E any](s Slice, n int) iter.Seq[Slice]"},
+		{"Clip", Func, 21, "func[S ~[]E, E any](s S) S"},
+		{"Clone", Func, 21, "func[S ~[]E, E any](s S) S"},
+		{"Collect", Func, 23, "func[E any](seq iter.Seq[E]) []E"},
+		{"Compact", Func, 21, "func[S ~[]E, E comparable](s S) S"},
+		{"CompactFunc", Func, 21, "func[S ~[]E, E any](s S, eq func(E, E) bool) S"},
+		{"Compare", Func, 21, "func[S ~[]E, E cmp.Ordered](s1 S, s2 S) int"},
+		{"CompareFunc", Func, 21, "func[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, cmp func(E1, E2) int) int"},
+		{"Concat", Func, 22, "func[S ~[]E, E any](slices ...S) S"},
+		{"Contains", Func, 21, "func[S ~[]E, E comparable](s S, v E) bool"},
+		{"ContainsFunc", Func, 21, "func[S ~[]E, E any](s S, f func(E) bool) bool"},
+		{"Delete", Func, 21, "func[S ~[]E, E any](s S, i int, j int) S"},
+		{"DeleteFunc", Func, 21, "func[S ~[]E, E any](s S, del func(E) bool) S"},
+		{"Equal", Func, 21, "func[S ~[]E, E comparable](s1 S, s2 S) bool"},
+		{"EqualFunc", Func, 21, "func[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) bool) bool"},
+		{"Grow", Func, 21, "func[S ~[]E, E any](s S, n int) S"},
+		{"Index", Func, 21, "func[S ~[]E, E comparable](s S, v E) int"},
+		{"IndexFunc", Func, 21, "func[S ~[]E, E any](s S, f func(E) bool) int"},
+		{"Insert", Func, 21, "func[S ~[]E, E any](s S, i int, v ...E) S"},
+		{"IsSorted", Func, 21, "func[S ~[]E, E cmp.Ordered](x S) bool"},
+		{"IsSortedFunc", Func, 21, "func[S ~[]E, E any](x S, cmp func(a E, b E) int) bool"},
+		{"Max", Func, 21, "func[S ~[]E, E cmp.Ordered](x S) E"},
+		{"MaxFunc", Func, 21, "func[S ~[]E, E any](x S, cmp func(a E, b E) int) E"},
+		{"Min", Func, 21, "func[S ~[]E, E cmp.Ordered](x S) E"},
+		{"MinFunc", Func, 21, "func[S ~[]E, E any](x S, cmp func(a E, b E) int) E"},
+		{"Repeat", Func, 23, "func[S ~[]E, E any](x S, count int) S"},
+		{"Replace", Func, 21, "func[S ~[]E, E any](s S, i int, j int, v ...E) S"},
+		{"Reverse", Func, 21, "func[S ~[]E, E any](s S)"},
+		{"Sort", Func, 21, "func[S ~[]E, E cmp.Ordered](x S)"},
+		{"SortFunc", Func, 21, "func[S ~[]E, E any](x S, cmp func(a E, b E) int)"},
+		{"SortStableFunc", Func, 21, "func[S ~[]E, E any](x S, cmp func(a E, b E) int)"},
+		{"Sorted", Func, 23, "func[E cmp.Ordered](seq iter.Seq[E]) []E"},
+		{"SortedFunc", Func, 23, "func[E any](seq iter.Seq[E], cmp func(E, E) int) []E"},
+		{"SortedStableFunc", Func, 23, "func[E any](seq iter.Seq[E], cmp func(E, E) int) []E"},
+		{"Values", Func, 23, "func[Slice ~[]E, E any](s Slice) iter.Seq[E]"},
+	},
+	"sort": {
+		{"(Float64Slice).Len", Method, 0, ""},
+		{"(Float64Slice).Less", Method, 0, ""},
+		{"(Float64Slice).Search", Method, 0, ""},
+		{"(Float64Slice).Sort", Method, 0, ""},
+		{"(Float64Slice).Swap", Method, 0, ""},
+		{"(IntSlice).Len", Method, 0, ""},
+		{"(IntSlice).Less", Method, 0, ""},
+		{"(IntSlice).Search", Method, 0, ""},
+		{"(IntSlice).Sort", Method, 0, ""},
+		{"(IntSlice).Swap", Method, 0, ""},
+		{"(StringSlice).Len", Method, 0, ""},
+		{"(StringSlice).Less", Method, 0, ""},
+		{"(StringSlice).Search", Method, 0, ""},
+		{"(StringSlice).Sort", Method, 0, ""},
+		{"(StringSlice).Swap", Method, 0, ""},
+		{"Find", Func, 19, "func(n int, cmp func(int) int) (i int, found bool)"},
+		{"Float64Slice", Type, 0, ""},
+		{"Float64s", Func, 0, "func(x []float64)"},
+		{"Float64sAreSorted", Func, 0, "func(x []float64) bool"},
+		{"IntSlice", Type, 0, ""},
+		{"Interface", Type, 0, ""},
+		{"Ints", Func, 0, "func(x []int)"},
+		{"IntsAreSorted", Func, 0, "func(x []int) bool"},
+		{"IsSorted", Func, 0, "func(data Interface) bool"},
+		{"Reverse", Func, 1, "func(data Interface) Interface"},
+		{"Search", Func, 0, "func(n int, f func(int) bool) int"},
+		{"SearchFloat64s", Func, 0, "func(a []float64, x float64) int"},
+		{"SearchInts", Func, 0, "func(a []int, x int) int"},
+		{"SearchStrings", Func, 0, "func(a []string, x string) int"},
+		{"Slice", Func, 8, "func(x any, less func(i int, j int) bool)"},
+		{"SliceIsSorted", Func, 8, "func(x any, less func(i int, j int) bool) bool"},
+		{"SliceStable", Func, 8, "func(x any, less func(i int, j int) bool)"},
+		{"Sort", Func, 0, "func(data Interface)"},
+		{"Stable", Func, 2, "func(data Interface)"},
+		{"StringSlice", Type, 0, ""},
+		{"Strings", Func, 0, "func(x []string)"},
+		{"StringsAreSorted", Func, 0, "func(x []string) bool"},
+	},
+	"strconv": {
+		{"(*NumError).Error", Method, 0, ""},
+		{"(*NumError).Unwrap", Method, 14, ""},
+		{"AppendBool", Func, 0, "func(dst []byte, b bool) []byte"},
+		{"AppendFloat", Func, 0, "func(dst []byte, f float64, fmt byte, prec int, bitSize int) []byte"},
+		{"AppendInt", Func, 0, "func(dst []byte, i int64, base int) []byte"},
+		{"AppendQuote", Func, 0, "func(dst []byte, s string) []byte"},
+		{"AppendQuoteRune", Func, 0, "func(dst []byte, r rune) []byte"},
+		{"AppendQuoteRuneToASCII", Func, 0, "func(dst []byte, r rune) []byte"},
+		{"AppendQuoteRuneToGraphic", Func, 6, "func(dst []byte, r rune) []byte"},
+		{"AppendQuoteToASCII", Func, 0, "func(dst []byte, s string) []byte"},
+		{"AppendQuoteToGraphic", Func, 6, "func(dst []byte, s string) []byte"},
+		{"AppendUint", Func, 0, "func(dst []byte, i uint64, base int) []byte"},
+		{"Atoi", Func, 0, "func(s string) (int, error)"},
+		{"CanBackquote", Func, 0, "func(s string) bool"},
+		{"ErrRange", Var, 0, ""},
+		{"ErrSyntax", Var, 0, ""},
+		{"FormatBool", Func, 0, "func(b bool) string"},
+		{"FormatComplex", Func, 15, "func(c complex128, fmt byte, prec int, bitSize int) string"},
+		{"FormatFloat", Func, 0, "func(f float64, fmt byte, prec int, bitSize int) string"},
+		{"FormatInt", Func, 0, "func(i int64, base int) string"},
+		{"FormatUint", Func, 0, "func(i uint64, base int) string"},
+		{"IntSize", Const, 0, ""},
+		{"IsGraphic", Func, 6, "func(r rune) bool"},
+		{"IsPrint", Func, 0, "func(r rune) bool"},
+		{"Itoa", Func, 0, "func(i int) string"},
+		{"NumError", Type, 0, ""},
+		{"NumError.Err", Field, 0, ""},
+		{"NumError.Func", Field, 0, ""},
+		{"NumError.Num", Field, 0, ""},
+		{"ParseBool", Func, 0, "func(str string) (bool, error)"},
+		{"ParseComplex", Func, 15, "func(s string, bitSize int) (complex128, error)"},
+		{"ParseFloat", Func, 0, "func(s string, bitSize int) (float64, error)"},
+		{"ParseInt", Func, 0, "func(s string, base int, bitSize int) (i int64, err error)"},
+		{"ParseUint", Func, 0, "func(s string, base int, bitSize int) (uint64, error)"},
+		{"Quote", Func, 0, "func(s string) string"},
+		{"QuoteRune", Func, 0, "func(r rune) string"},
+		{"QuoteRuneToASCII", Func, 0, "func(r rune) string"},
+		{"QuoteRuneToGraphic", Func, 6, "func(r rune) string"},
+		{"QuoteToASCII", Func, 0, "func(s string) string"},
+		{"QuoteToGraphic", Func, 6, "func(s string) string"},
+		{"QuotedPrefix", Func, 17, "func(s string) (string, error)"},
+		{"Unquote", Func, 0, "func(s string) (string, error)"},
+		{"UnquoteChar", Func, 0, "func(s string, quote byte) (value rune, multibyte bool, tail string, err error)"},
+	},
+	"strings": {
+		{"(*Builder).Cap", Method, 12, ""},
+		{"(*Builder).Grow", Method, 10, ""},
+		{"(*Builder).Len", Method, 10, ""},
+		{"(*Builder).Reset", Method, 10, ""},
+		{"(*Builder).String", Method, 10, ""},
+		{"(*Builder).Write", Method, 10, ""},
+		{"(*Builder).WriteByte", Method, 10, ""},
+		{"(*Builder).WriteRune", Method, 10, ""},
+		{"(*Builder).WriteString", Method, 10, ""},
+		{"(*Reader).Len", Method, 0, ""},
+		{"(*Reader).Read", Method, 0, ""},
+		{"(*Reader).ReadAt", Method, 0, ""},
+		{"(*Reader).ReadByte", Method, 0, ""},
+		{"(*Reader).ReadRune", Method, 0, ""},
+		{"(*Reader).Reset", Method, 7, ""},
+		{"(*Reader).Seek", Method, 0, ""},
+		{"(*Reader).Size", Method, 5, ""},
+		{"(*Reader).UnreadByte", Method, 0, ""},
+		{"(*Reader).UnreadRune", Method, 0, ""},
+		{"(*Reader).WriteTo", Method, 1, ""},
+		{"(*Replacer).Replace", Method, 0, ""},
+		{"(*Replacer).WriteString", Method, 0, ""},
+		{"Builder", Type, 10, ""},
+		{"Clone", Func, 18, "func(s string) string"},
+		{"Compare", Func, 5, "func(a string, b string) int"},
+		{"Contains", Func, 0, "func(s string, substr string) bool"},
+		{"ContainsAny", Func, 0, "func(s string, chars string) bool"},
+		{"ContainsFunc", Func, 21, "func(s string, f func(rune) bool) bool"},
+		{"ContainsRune", Func, 0, "func(s string, r rune) bool"},
+		{"Count", Func, 0, "func(s string, substr string) int"},
+		{"Cut", Func, 18, "func(s string, sep string) (before string, after string, found bool)"},
+		{"CutPrefix", Func, 20, "func(s string, prefix string) (after string, found bool)"},
+		{"CutSuffix", Func, 20, "func(s string, suffix string) (before string, found bool)"},
+		{"EqualFold", Func, 0, "func(s string, t string) bool"},
+		{"Fields", Func, 0, "func(s string) []string"},
+		{"FieldsFunc", Func, 0, "func(s string, f func(rune) bool) []string"},
+		{"FieldsFuncSeq", Func, 24, "func(s string, f func(rune) bool) iter.Seq[string]"},
+		{"FieldsSeq", Func, 24, "func(s string) iter.Seq[string]"},
+		{"HasPrefix", Func, 0, "func(s string, prefix string) bool"},
+		{"HasSuffix", Func, 0, "func(s string, suffix string) bool"},
+		{"Index", Func, 0, "func(s string, substr string) int"},
+		{"IndexAny", Func, 0, "func(s string, chars string) int"},
+		{"IndexByte", Func, 2, "func(s string, c byte) int"},
+		{"IndexFunc", Func, 0, "func(s string, f func(rune) bool) int"},
+		{"IndexRune", Func, 0, "func(s string, r rune) int"},
+		{"Join", Func, 0, "func(elems []string, sep string) string"},
+		{"LastIndex", Func, 0, "func(s string, substr string) int"},
+		{"LastIndexAny", Func, 0, "func(s string, chars string) int"},
+		{"LastIndexByte", Func, 5, "func(s string, c byte) int"},
+		{"LastIndexFunc", Func, 0, "func(s string, f func(rune) bool) int"},
+		{"Lines", Func, 24, "func(s string) iter.Seq[string]"},
+		{"Map", Func, 0, "func(mapping func(rune) rune, s string) string"},
+		{"NewReader", Func, 0, "func(s string) *Reader"},
+		{"NewReplacer", Func, 0, "func(oldnew ...string) *Replacer"},
+		{"Reader", Type, 0, ""},
+		{"Repeat", Func, 0, "func(s string, count int) string"},
+		{"Replace", Func, 0, "func(s string, old string, new string, n int) string"},
+		{"ReplaceAll", Func, 12, "func(s string, old string, new string) string"},
+		{"Replacer", Type, 0, ""},
+		{"Split", Func, 0, "func(s string, sep string) []string"},
+		{"SplitAfter", Func, 0, "func(s string, sep string) []string"},
+		{"SplitAfterN", Func, 0, "func(s string, sep string, n int) []string"},
+		{"SplitAfterSeq", Func, 24, "func(s string, sep string) iter.Seq[string]"},
+		{"SplitN", Func, 0, "func(s string, sep string, n int) []string"},
+		{"SplitSeq", Func, 24, "func(s string, sep string) iter.Seq[string]"},
+		{"Title", Func, 0, "func(s string) string"},
+		{"ToLower", Func, 0, "func(s string) string"},
+		{"ToLowerSpecial", Func, 0, "func(c unicode.SpecialCase, s string) string"},
+		{"ToTitle", Func, 0, "func(s string) string"},
+		{"ToTitleSpecial", Func, 0, "func(c unicode.SpecialCase, s string) string"},
+		{"ToUpper", Func, 0, "func(s string) string"},
+		{"ToUpperSpecial", Func, 0, "func(c unicode.SpecialCase, s string) string"},
+		{"ToValidUTF8", Func, 13, "func(s string, replacement string) string"},
+		{"Trim", Func, 0, "func(s string, cutset string) string"},
+		{"TrimFunc", Func, 0, "func(s string, f func(rune) bool) string"},
+		{"TrimLeft", Func, 0, "func(s string, cutset string) string"},
+		{"TrimLeftFunc", Func, 0, "func(s string, f func(rune) bool) string"},
+		{"TrimPrefix", Func, 1, "func(s string, prefix string) string"},
+		{"TrimRight", Func, 0, "func(s string, cutset string) string"},
+		{"TrimRightFunc", Func, 0, "func(s string, f func(rune) bool) string"},
+		{"TrimSpace", Func, 0, "func(s string) string"},
+		{"TrimSuffix", Func, 1, "func(s string, suffix string) string"},
+	},
+	"structs": {
+		{"HostLayout", Type, 23, ""},
+	},
+	"sync": {
+		{"(*Cond).Broadcast", Method, 0, ""},
+		{"(*Cond).Signal", Method, 0, ""},
+		{"(*Cond).Wait", Method, 0, ""},
+		{"(*Map).Clear", Method, 23, ""},
+		{"(*Map).CompareAndDelete", Method, 20, ""},
+		{"(*Map).CompareAndSwap", Method, 20, ""},
+		{"(*Map).Delete", Method, 9, ""},
+		{"(*Map).Load", Method, 9, ""},
+		{"(*Map).LoadAndDelete", Method, 15, ""},
+		{"(*Map).LoadOrStore", Method, 9, ""},
+		{"(*Map).Range", Method, 9, ""},
+		{"(*Map).Store", Method, 9, ""},
+		{"(*Map).Swap", Method, 20, ""},
+		{"(*Mutex).Lock", Method, 0, ""},
+		{"(*Mutex).TryLock", Method, 18, ""},
+		{"(*Mutex).Unlock", Method, 0, ""},
+		{"(*Once).Do", Method, 0, ""},
+		{"(*Pool).Get", Method, 3, ""},
+		{"(*Pool).Put", Method, 3, ""},
+		{"(*RWMutex).Lock", Method, 0, ""},
+		{"(*RWMutex).RLock", Method, 0, ""},
+		{"(*RWMutex).RLocker", Method, 0, ""},
+		{"(*RWMutex).RUnlock", Method, 0, ""},
+		{"(*RWMutex).TryLock", Method, 18, ""},
+		{"(*RWMutex).TryRLock", Method, 18, ""},
+		{"(*RWMutex).Unlock", Method, 0, ""},
+		{"(*WaitGroup).Add", Method, 0, ""},
+		{"(*WaitGroup).Done", Method, 0, ""},
+		{"(*WaitGroup).Go", Method, 25, ""},
+		{"(*WaitGroup).Wait", Method, 0, ""},
+		{"Cond", Type, 0, ""},
+		{"Cond.L", Field, 0, ""},
+		{"Locker", Type, 0, ""},
+		{"Map", Type, 9, ""},
+		{"Mutex", Type, 0, ""},
+		{"NewCond", Func, 0, "func(l Locker) *Cond"},
+		{"Once", Type, 0, ""},
+		{"OnceFunc", Func, 21, "func(f func()) func()"},
+		{"OnceValue", Func, 21, "func[T any](f func() T) func() T"},
+		{"OnceValues", Func, 21, "func[T1, T2 any](f func() (T1, T2)) func() (T1, T2)"},
+		{"Pool", Type, 3, ""},
+		{"Pool.New", Field, 3, ""},
+		{"RWMutex", Type, 0, ""},
+		{"WaitGroup", Type, 0, ""},
+	},
+	"sync/atomic": {
+		{"(*Bool).CompareAndSwap", Method, 19, ""},
+		{"(*Bool).Load", Method, 19, ""},
+		{"(*Bool).Store", Method, 19, ""},
+		{"(*Bool).Swap", Method, 19, ""},
+		{"(*Int32).Add", Method, 19, ""},
+		{"(*Int32).And", Method, 23, ""},
+		{"(*Int32).CompareAndSwap", Method, 19, ""},
+		{"(*Int32).Load", Method, 19, ""},
+		{"(*Int32).Or", Method, 23, ""},
+		{"(*Int32).Store", Method, 19, ""},
+		{"(*Int32).Swap", Method, 19, ""},
+		{"(*Int64).Add", Method, 19, ""},
+		{"(*Int64).And", Method, 23, ""},
+		{"(*Int64).CompareAndSwap", Method, 19, ""},
+		{"(*Int64).Load", Method, 19, ""},
+		{"(*Int64).Or", Method, 23, ""},
+		{"(*Int64).Store", Method, 19, ""},
+		{"(*Int64).Swap", Method, 19, ""},
+		{"(*Pointer).CompareAndSwap", Method, 19, ""},
+		{"(*Pointer).Load", Method, 19, ""},
+		{"(*Pointer).Store", Method, 19, ""},
+		{"(*Pointer).Swap", Method, 19, ""},
+		{"(*Uint32).Add", Method, 19, ""},
+		{"(*Uint32).And", Method, 23, ""},
+		{"(*Uint32).CompareAndSwap", Method, 19, ""},
+		{"(*Uint32).Load", Method, 19, ""},
+		{"(*Uint32).Or", Method, 23, ""},
+		{"(*Uint32).Store", Method, 19, ""},
+		{"(*Uint32).Swap", Method, 19, ""},
+		{"(*Uint64).Add", Method, 19, ""},
+		{"(*Uint64).And", Method, 23, ""},
+		{"(*Uint64).CompareAndSwap", Method, 19, ""},
+		{"(*Uint64).Load", Method, 19, ""},
+		{"(*Uint64).Or", Method, 23, ""},
+		{"(*Uint64).Store", Method, 19, ""},
+		{"(*Uint64).Swap", Method, 19, ""},
+		{"(*Uintptr).Add", Method, 19, ""},
+		{"(*Uintptr).And", Method, 23, ""},
+		{"(*Uintptr).CompareAndSwap", Method, 19, ""},
+		{"(*Uintptr).Load", Method, 19, ""},
+		{"(*Uintptr).Or", Method, 23, ""},
+		{"(*Uintptr).Store", Method, 19, ""},
+		{"(*Uintptr).Swap", Method, 19, ""},
+		{"(*Value).CompareAndSwap", Method, 17, ""},
+		{"(*Value).Load", Method, 4, ""},
+		{"(*Value).Store", Method, 4, ""},
+		{"(*Value).Swap", Method, 17, ""},
+		{"AddInt32", Func, 0, "func(addr *int32, delta int32) (new int32)"},
+		{"AddInt64", Func, 0, "func(addr *int64, delta int64) (new int64)"},
+		{"AddUint32", Func, 0, "func(addr *uint32, delta uint32) (new uint32)"},
+		{"AddUint64", Func, 0, "func(addr *uint64, delta uint64) (new uint64)"},
+		{"AddUintptr", Func, 0, "func(addr *uintptr, delta uintptr) (new uintptr)"},
+		{"AndInt32", Func, 23, "func(addr *int32, mask int32) (old int32)"},
+		{"AndInt64", Func, 23, "func(addr *int64, mask int64) (old int64)"},
+		{"AndUint32", Func, 23, "func(addr *uint32, mask uint32) (old uint32)"},
+		{"AndUint64", Func, 23, "func(addr *uint64, mask uint64) (old uint64)"},
+		{"AndUintptr", Func, 23, "func(addr *uintptr, mask uintptr) (old uintptr)"},
+		{"Bool", Type, 19, ""},
+		{"CompareAndSwapInt32", Func, 0, "func(addr *int32, old int32, new int32) (swapped bool)"},
+		{"CompareAndSwapInt64", Func, 0, "func(addr *int64, old int64, new int64) (swapped bool)"},
+		{"CompareAndSwapPointer", Func, 0, "func(addr *unsafe.Pointer, old unsafe.Pointer, new unsafe.Pointer) (swapped bool)"},
+		{"CompareAndSwapUint32", Func, 0, "func(addr *uint32, old uint32, new uint32) (swapped bool)"},
+		{"CompareAndSwapUint64", Func, 0, "func(addr *uint64, old uint64, new uint64) (swapped bool)"},
+		{"CompareAndSwapUintptr", Func, 0, "func(addr *uintptr, old uintptr, new uintptr) (swapped bool)"},
+		{"Int32", Type, 19, ""},
+		{"Int64", Type, 19, ""},
+		{"LoadInt32", Func, 0, "func(addr *int32) (val int32)"},
+		{"LoadInt64", Func, 0, "func(addr *int64) (val int64)"},
+		{"LoadPointer", Func, 0, "func(addr *unsafe.Pointer) (val unsafe.Pointer)"},
+		{"LoadUint32", Func, 0, "func(addr *uint32) (val uint32)"},
+		{"LoadUint64", Func, 0, "func(addr *uint64) (val uint64)"},
+		{"LoadUintptr", Func, 0, "func(addr *uintptr) (val uintptr)"},
+		{"OrInt32", Func, 23, "func(addr *int32, mask int32) (old int32)"},
+		{"OrInt64", Func, 23, "func(addr *int64, mask int64) (old int64)"},
+		{"OrUint32", Func, 23, "func(addr *uint32, mask uint32) (old uint32)"},
+		{"OrUint64", Func, 23, "func(addr *uint64, mask uint64) (old uint64)"},
+		{"OrUintptr", Func, 23, "func(addr *uintptr, mask uintptr) (old uintptr)"},
+		{"Pointer", Type, 19, ""},
+		{"StoreInt32", Func, 0, "func(addr *int32, val int32)"},
+		{"StoreInt64", Func, 0, "func(addr *int64, val int64)"},
+		{"StorePointer", Func, 0, "func(addr *unsafe.Pointer, val unsafe.Pointer)"},
+		{"StoreUint32", Func, 0, "func(addr *uint32, val uint32)"},
+		{"StoreUint64", Func, 0, "func(addr *uint64, val uint64)"},
+		{"StoreUintptr", Func, 0, "func(addr *uintptr, val uintptr)"},
+		{"SwapInt32", Func, 2, "func(addr *int32, new int32) (old int32)"},
+		{"SwapInt64", Func, 2, "func(addr *int64, new int64) (old int64)"},
+		{"SwapPointer", Func, 2, "func(addr *unsafe.Pointer, new unsafe.Pointer) (old unsafe.Pointer)"},
+		{"SwapUint32", Func, 2, "func(addr *uint32, new uint32) (old uint32)"},
+		{"SwapUint64", Func, 2, "func(addr *uint64, new uint64) (old uint64)"},
+		{"SwapUintptr", Func, 2, "func(addr *uintptr, new uintptr) (old uintptr)"},
+		{"Uint32", Type, 19, ""},
+		{"Uint64", Type, 19, ""},
+		{"Uintptr", Type, 19, ""},
+		{"Value", Type, 4, ""},
+	},
+	"syscall": {
+		{"(*Cmsghdr).SetLen", Method, 0, ""},
+		{"(*DLL).FindProc", Method, 0, ""},
+		{"(*DLL).MustFindProc", Method, 0, ""},
+		{"(*DLL).Release", Method, 0, ""},
+		{"(*DLLError).Error", Method, 0, ""},
+		{"(*DLLError).Unwrap", Method, 16, ""},
+		{"(*Filetime).Nanoseconds", Method, 0, ""},
+		{"(*Iovec).SetLen", Method, 0, ""},
+		{"(*LazyDLL).Handle", Method, 0, ""},
+		{"(*LazyDLL).Load", Method, 0, ""},
+		{"(*LazyDLL).NewProc", Method, 0, ""},
+		{"(*LazyProc).Addr", Method, 0, ""},
+		{"(*LazyProc).Call", Method, 0, ""},
+		{"(*LazyProc).Find", Method, 0, ""},
+		{"(*Msghdr).SetControllen", Method, 0, ""},
+		{"(*Proc).Addr", Method, 0, ""},
+		{"(*Proc).Call", Method, 0, ""},
+		{"(*PtraceRegs).PC", Method, 0, ""},
+		{"(*PtraceRegs).SetPC", Method, 0, ""},
+		{"(*RawSockaddrAny).Sockaddr", Method, 0, ""},
+		{"(*SID).Copy", Method, 0, ""},
+		{"(*SID).Len", Method, 0, ""},
+		{"(*SID).LookupAccount", Method, 0, ""},
+		{"(*SID).String", Method, 0, ""},
+		{"(*Timespec).Nano", Method, 0, ""},
+		{"(*Timespec).Unix", Method, 0, ""},
+		{"(*Timeval).Nano", Method, 0, ""},
+		{"(*Timeval).Nanoseconds", Method, 0, ""},
+		{"(*Timeval).Unix", Method, 0, ""},
+		{"(Errno).Error", Method, 0, ""},
+		{"(Errno).Is", Method, 13, ""},
+		{"(Errno).Temporary", Method, 0, ""},
+		{"(Errno).Timeout", Method, 0, ""},
+		{"(Signal).Signal", Method, 0, ""},
+		{"(Signal).String", Method, 0, ""},
+		{"(Token).Close", Method, 0, ""},
+		{"(Token).GetTokenPrimaryGroup", Method, 0, ""},
+		{"(Token).GetTokenUser", Method, 0, ""},
+		{"(Token).GetUserProfileDirectory", Method, 0, ""},
+		{"(WaitStatus).Continued", Method, 0, ""},
+		{"(WaitStatus).CoreDump", Method, 0, ""},
+		{"(WaitStatus).ExitStatus", Method, 0, ""},
+		{"(WaitStatus).Exited", Method, 0, ""},
+		{"(WaitStatus).Signal", Method, 0, ""},
+		{"(WaitStatus).Signaled", Method, 0, ""},
+		{"(WaitStatus).StopSignal", Method, 0, ""},
+		{"(WaitStatus).Stopped", Method, 0, ""},
+		{"(WaitStatus).TrapCause", Method, 0, ""},
+		{"AF_ALG", Const, 0, ""},
+		{"AF_APPLETALK", Const, 0, ""},
+		{"AF_ARP", Const, 0, ""},
+		{"AF_ASH", Const, 0, ""},
+		{"AF_ATM", Const, 0, ""},
+		{"AF_ATMPVC", Const, 0, ""},
+		{"AF_ATMSVC", Const, 0, ""},
+		{"AF_AX25", Const, 0, ""},
+		{"AF_BLUETOOTH", Const, 0, ""},
+		{"AF_BRIDGE", Const, 0, ""},
+		{"AF_CAIF", Const, 0, ""},
+		{"AF_CAN", Const, 0, ""},
+		{"AF_CCITT", Const, 0, ""},
+		{"AF_CHAOS", Const, 0, ""},
+		{"AF_CNT", Const, 0, ""},
+		{"AF_COIP", Const, 0, ""},
+		{"AF_DATAKIT", Const, 0, ""},
+		{"AF_DECnet", Const, 0, ""},
+		{"AF_DLI", Const, 0, ""},
+		{"AF_E164", Const, 0, ""},
+		{"AF_ECMA", Const, 0, ""},
+		{"AF_ECONET", Const, 0, ""},
+		{"AF_ENCAP", Const, 1, ""},
+		{"AF_FILE", Const, 0, ""},
+		{"AF_HYLINK", Const, 0, ""},
+		{"AF_IEEE80211", Const, 0, ""},
+		{"AF_IEEE802154", Const, 0, ""},
+		{"AF_IMPLINK", Const, 0, ""},
+		{"AF_INET", Const, 0, ""},
+		{"AF_INET6", Const, 0, ""},
+		{"AF_INET6_SDP", Const, 3, ""},
+		{"AF_INET_SDP", Const, 3, ""},
+		{"AF_IPX", Const, 0, ""},
+		{"AF_IRDA", Const, 0, ""},
+		{"AF_ISDN", Const, 0, ""},
+		{"AF_ISO", Const, 0, ""},
+		{"AF_IUCV", Const, 0, ""},
+		{"AF_KEY", Const, 0, ""},
+		{"AF_LAT", Const, 0, ""},
+		{"AF_LINK", Const, 0, ""},
+		{"AF_LLC", Const, 0, ""},
+		{"AF_LOCAL", Const, 0, ""},
+		{"AF_MAX", Const, 0, ""},
+		{"AF_MPLS", Const, 1, ""},
+		{"AF_NATM", Const, 0, ""},
+		{"AF_NDRV", Const, 0, ""},
+		{"AF_NETBEUI", Const, 0, ""},
+		{"AF_NETBIOS", Const, 0, ""},
+		{"AF_NETGRAPH", Const, 0, ""},
+		{"AF_NETLINK", Const, 0, ""},
+		{"AF_NETROM", Const, 0, ""},
+		{"AF_NS", Const, 0, ""},
+		{"AF_OROUTE", Const, 1, ""},
+		{"AF_OSI", Const, 0, ""},
+		{"AF_PACKET", Const, 0, ""},
+		{"AF_PHONET", Const, 0, ""},
+		{"AF_PPP", Const, 0, ""},
+		{"AF_PPPOX", Const, 0, ""},
+		{"AF_PUP", Const, 0, ""},
+		{"AF_RDS", Const, 0, ""},
+		{"AF_RESERVED_36", Const, 0, ""},
+		{"AF_ROSE", Const, 0, ""},
+		{"AF_ROUTE", Const, 0, ""},
+		{"AF_RXRPC", Const, 0, ""},
+		{"AF_SCLUSTER", Const, 0, ""},
+		{"AF_SECURITY", Const, 0, ""},
+		{"AF_SIP", Const, 0, ""},
+		{"AF_SLOW", Const, 0, ""},
+		{"AF_SNA", Const, 0, ""},
+		{"AF_SYSTEM", Const, 0, ""},
+		{"AF_TIPC", Const, 0, ""},
+		{"AF_UNIX", Const, 0, ""},
+		{"AF_UNSPEC", Const, 0, ""},
+		{"AF_UTUN", Const, 16, ""},
+		{"AF_VENDOR00", Const, 0, ""},
+		{"AF_VENDOR01", Const, 0, ""},
+		{"AF_VENDOR02", Const, 0, ""},
+		{"AF_VENDOR03", Const, 0, ""},
+		{"AF_VENDOR04", Const, 0, ""},
+		{"AF_VENDOR05", Const, 0, ""},
+		{"AF_VENDOR06", Const, 0, ""},
+		{"AF_VENDOR07", Const, 0, ""},
+		{"AF_VENDOR08", Const, 0, ""},
+		{"AF_VENDOR09", Const, 0, ""},
+		{"AF_VENDOR10", Const, 0, ""},
+		{"AF_VENDOR11", Const, 0, ""},
+		{"AF_VENDOR12", Const, 0, ""},
+		{"AF_VENDOR13", Const, 0, ""},
+		{"AF_VENDOR14", Const, 0, ""},
+		{"AF_VENDOR15", Const, 0, ""},
+		{"AF_VENDOR16", Const, 0, ""},
+		{"AF_VENDOR17", Const, 0, ""},
+		{"AF_VENDOR18", Const, 0, ""},
+		{"AF_VENDOR19", Const, 0, ""},
+		{"AF_VENDOR20", Const, 0, ""},
+		{"AF_VENDOR21", Const, 0, ""},
+		{"AF_VENDOR22", Const, 0, ""},
+		{"AF_VENDOR23", Const, 0, ""},
+		{"AF_VENDOR24", Const, 0, ""},
+		{"AF_VENDOR25", Const, 0, ""},
+		{"AF_VENDOR26", Const, 0, ""},
+		{"AF_VENDOR27", Const, 0, ""},
+		{"AF_VENDOR28", Const, 0, ""},
+		{"AF_VENDOR29", Const, 0, ""},
+		{"AF_VENDOR30", Const, 0, ""},
+		{"AF_VENDOR31", Const, 0, ""},
+		{"AF_VENDOR32", Const, 0, ""},
+		{"AF_VENDOR33", Const, 0, ""},
+		{"AF_VENDOR34", Const, 0, ""},
+		{"AF_VENDOR35", Const, 0, ""},
+		{"AF_VENDOR36", Const, 0, ""},
+		{"AF_VENDOR37", Const, 0, ""},
+		{"AF_VENDOR38", Const, 0, ""},
+		{"AF_VENDOR39", Const, 0, ""},
+		{"AF_VENDOR40", Const, 0, ""},
+		{"AF_VENDOR41", Const, 0, ""},
+		{"AF_VENDOR42", Const, 0, ""},
+		{"AF_VENDOR43", Const, 0, ""},
+		{"AF_VENDOR44", Const, 0, ""},
+		{"AF_VENDOR45", Const, 0, ""},
+		{"AF_VENDOR46", Const, 0, ""},
+		{"AF_VENDOR47", Const, 0, ""},
+		{"AF_WANPIPE", Const, 0, ""},
+		{"AF_X25", Const, 0, ""},
+		{"AI_CANONNAME", Const, 1, ""},
+		{"AI_NUMERICHOST", Const, 1, ""},
+		{"AI_PASSIVE", Const, 1, ""},
+		{"APPLICATION_ERROR", Const, 0, ""},
+		{"ARPHRD_ADAPT", Const, 0, ""},
+		{"ARPHRD_APPLETLK", Const, 0, ""},
+		{"ARPHRD_ARCNET", Const, 0, ""},
+		{"ARPHRD_ASH", Const, 0, ""},
+		{"ARPHRD_ATM", Const, 0, ""},
+		{"ARPHRD_AX25", Const, 0, ""},
+		{"ARPHRD_BIF", Const, 0, ""},
+		{"ARPHRD_CHAOS", Const, 0, ""},
+		{"ARPHRD_CISCO", Const, 0, ""},
+		{"ARPHRD_CSLIP", Const, 0, ""},
+		{"ARPHRD_CSLIP6", Const, 0, ""},
+		{"ARPHRD_DDCMP", Const, 0, ""},
+		{"ARPHRD_DLCI", Const, 0, ""},
+		{"ARPHRD_ECONET", Const, 0, ""},
+		{"ARPHRD_EETHER", Const, 0, ""},
+		{"ARPHRD_ETHER", Const, 0, ""},
+		{"ARPHRD_EUI64", Const, 0, ""},
+		{"ARPHRD_FCAL", Const, 0, ""},
+		{"ARPHRD_FCFABRIC", Const, 0, ""},
+		{"ARPHRD_FCPL", Const, 0, ""},
+		{"ARPHRD_FCPP", Const, 0, ""},
+		{"ARPHRD_FDDI", Const, 0, ""},
+		{"ARPHRD_FRAD", Const, 0, ""},
+		{"ARPHRD_FRELAY", Const, 1, ""},
+		{"ARPHRD_HDLC", Const, 0, ""},
+		{"ARPHRD_HIPPI", Const, 0, ""},
+		{"ARPHRD_HWX25", Const, 0, ""},
+		{"ARPHRD_IEEE1394", Const, 0, ""},
+		{"ARPHRD_IEEE802", Const, 0, ""},
+		{"ARPHRD_IEEE80211", Const, 0, ""},
+		{"ARPHRD_IEEE80211_PRISM", Const, 0, ""},
+		{"ARPHRD_IEEE80211_RADIOTAP", Const, 0, ""},
+		{"ARPHRD_IEEE802154", Const, 0, ""},
+		{"ARPHRD_IEEE802154_PHY", Const, 0, ""},
+		{"ARPHRD_IEEE802_TR", Const, 0, ""},
+		{"ARPHRD_INFINIBAND", Const, 0, ""},
+		{"ARPHRD_IPDDP", Const, 0, ""},
+		{"ARPHRD_IPGRE", Const, 0, ""},
+		{"ARPHRD_IRDA", Const, 0, ""},
+		{"ARPHRD_LAPB", Const, 0, ""},
+		{"ARPHRD_LOCALTLK", Const, 0, ""},
+		{"ARPHRD_LOOPBACK", Const, 0, ""},
+		{"ARPHRD_METRICOM", Const, 0, ""},
+		{"ARPHRD_NETROM", Const, 0, ""},
+		{"ARPHRD_NONE", Const, 0, ""},
+		{"ARPHRD_PIMREG", Const, 0, ""},
+		{"ARPHRD_PPP", Const, 0, ""},
+		{"ARPHRD_PRONET", Const, 0, ""},
+		{"ARPHRD_RAWHDLC", Const, 0, ""},
+		{"ARPHRD_ROSE", Const, 0, ""},
+		{"ARPHRD_RSRVD", Const, 0, ""},
+		{"ARPHRD_SIT", Const, 0, ""},
+		{"ARPHRD_SKIP", Const, 0, ""},
+		{"ARPHRD_SLIP", Const, 0, ""},
+		{"ARPHRD_SLIP6", Const, 0, ""},
+		{"ARPHRD_STRIP", Const, 1, ""},
+		{"ARPHRD_TUNNEL", Const, 0, ""},
+		{"ARPHRD_TUNNEL6", Const, 0, ""},
+		{"ARPHRD_VOID", Const, 0, ""},
+		{"ARPHRD_X25", Const, 0, ""},
+		{"AUTHTYPE_CLIENT", Const, 0, ""},
+		{"AUTHTYPE_SERVER", Const, 0, ""},
+		{"Accept", Func, 0, "func(fd int) (nfd int, sa Sockaddr, err error)"},
+		{"Accept4", Func, 1, "func(fd int, flags int) (nfd int, sa Sockaddr, err error)"},
+		{"AcceptEx", Func, 0, ""},
+		{"Access", Func, 0, "func(path string, mode uint32) (err error)"},
+		{"Acct", Func, 0, "func(path string) (err error)"},
+		{"AddrinfoW", Type, 1, ""},
+		{"AddrinfoW.Addr", Field, 1, ""},
+		{"AddrinfoW.Addrlen", Field, 1, ""},
+		{"AddrinfoW.Canonname", Field, 1, ""},
+		{"AddrinfoW.Family", Field, 1, ""},
+		{"AddrinfoW.Flags", Field, 1, ""},
+		{"AddrinfoW.Next", Field, 1, ""},
+		{"AddrinfoW.Protocol", Field, 1, ""},
+		{"AddrinfoW.Socktype", Field, 1, ""},
+		{"Adjtime", Func, 0, ""},
+		{"Adjtimex", Func, 0, "func(buf *Timex) (state int, err error)"},
+		{"AllThreadsSyscall", Func, 16, "func(trap uintptr, a1 uintptr, a2 uintptr, a3 uintptr) (r1 uintptr, r2 uintptr, err Errno)"},
+		{"AllThreadsSyscall6", Func, 16, "func(trap uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) (r1 uintptr, r2 uintptr, err Errno)"},
+		{"AttachLsf", Func, 0, "func(fd int, i []SockFilter) error"},
+		{"B0", Const, 0, ""},
+		{"B1000000", Const, 0, ""},
+		{"B110", Const, 0, ""},
+		{"B115200", Const, 0, ""},
+		{"B1152000", Const, 0, ""},
+		{"B1200", Const, 0, ""},
+		{"B134", Const, 0, ""},
+		{"B14400", Const, 1, ""},
+		{"B150", Const, 0, ""},
+		{"B1500000", Const, 0, ""},
+		{"B1800", Const, 0, ""},
+		{"B19200", Const, 0, ""},
+		{"B200", Const, 0, ""},
+		{"B2000000", Const, 0, ""},
+		{"B230400", Const, 0, ""},
+		{"B2400", Const, 0, ""},
+		{"B2500000", Const, 0, ""},
+		{"B28800", Const, 1, ""},
+		{"B300", Const, 0, ""},
+		{"B3000000", Const, 0, ""},
+		{"B3500000", Const, 0, ""},
+		{"B38400", Const, 0, ""},
+		{"B4000000", Const, 0, ""},
+		{"B460800", Const, 0, ""},
+		{"B4800", Const, 0, ""},
+		{"B50", Const, 0, ""},
+		{"B500000", Const, 0, ""},
+		{"B57600", Const, 0, ""},
+		{"B576000", Const, 0, ""},
+		{"B600", Const, 0, ""},
+		{"B7200", Const, 1, ""},
+		{"B75", Const, 0, ""},
+		{"B76800", Const, 1, ""},
+		{"B921600", Const, 0, ""},
+		{"B9600", Const, 0, ""},
+		{"BASE_PROTOCOL", Const, 2, ""},
+		{"BIOCFEEDBACK", Const, 0, ""},
+		{"BIOCFLUSH", Const, 0, ""},
+		{"BIOCGBLEN", Const, 0, ""},
+		{"BIOCGDIRECTION", Const, 0, ""},
+		{"BIOCGDIRFILT", Const, 1, ""},
+		{"BIOCGDLT", Const, 0, ""},
+		{"BIOCGDLTLIST", Const, 0, ""},
+		{"BIOCGETBUFMODE", Const, 0, ""},
+		{"BIOCGETIF", Const, 0, ""},
+		{"BIOCGETZMAX", Const, 0, ""},
+		{"BIOCGFEEDBACK", Const, 1, ""},
+		{"BIOCGFILDROP", Const, 1, ""},
+		{"BIOCGHDRCMPLT", Const, 0, ""},
+		{"BIOCGRSIG", Const, 0, ""},
+		{"BIOCGRTIMEOUT", Const, 0, ""},
+		{"BIOCGSEESENT", Const, 0, ""},
+		{"BIOCGSTATS", Const, 0, ""},
+		{"BIOCGSTATSOLD", Const, 1, ""},
+		{"BIOCGTSTAMP", Const, 1, ""},
+		{"BIOCIMMEDIATE", Const, 0, ""},
+		{"BIOCLOCK", Const, 0, ""},
+		{"BIOCPROMISC", Const, 0, ""},
+		{"BIOCROTZBUF", Const, 0, ""},
+		{"BIOCSBLEN", Const, 0, ""},
+		{"BIOCSDIRECTION", Const, 0, ""},
+		{"BIOCSDIRFILT", Const, 1, ""},
+		{"BIOCSDLT", Const, 0, ""},
+		{"BIOCSETBUFMODE", Const, 0, ""},
+		{"BIOCSETF", Const, 0, ""},
+		{"BIOCSETFNR", Const, 0, ""},
+		{"BIOCSETIF", Const, 0, ""},
+		{"BIOCSETWF", Const, 0, ""},
+		{"BIOCSETZBUF", Const, 0, ""},
+		{"BIOCSFEEDBACK", Const, 1, ""},
+		{"BIOCSFILDROP", Const, 1, ""},
+		{"BIOCSHDRCMPLT", Const, 0, ""},
+		{"BIOCSRSIG", Const, 0, ""},
+		{"BIOCSRTIMEOUT", Const, 0, ""},
+		{"BIOCSSEESENT", Const, 0, ""},
+		{"BIOCSTCPF", Const, 1, ""},
+		{"BIOCSTSTAMP", Const, 1, ""},
+		{"BIOCSUDPF", Const, 1, ""},
+		{"BIOCVERSION", Const, 0, ""},
+		{"BPF_A", Const, 0, ""},
+		{"BPF_ABS", Const, 0, ""},
+		{"BPF_ADD", Const, 0, ""},
+		{"BPF_ALIGNMENT", Const, 0, ""},
+		{"BPF_ALIGNMENT32", Const, 1, ""},
+		{"BPF_ALU", Const, 0, ""},
+		{"BPF_AND", Const, 0, ""},
+		{"BPF_B", Const, 0, ""},
+		{"BPF_BUFMODE_BUFFER", Const, 0, ""},
+		{"BPF_BUFMODE_ZBUF", Const, 0, ""},
+		{"BPF_DFLTBUFSIZE", Const, 1, ""},
+		{"BPF_DIRECTION_IN", Const, 1, ""},
+		{"BPF_DIRECTION_OUT", Const, 1, ""},
+		{"BPF_DIV", Const, 0, ""},
+		{"BPF_H", Const, 0, ""},
+		{"BPF_IMM", Const, 0, ""},
+		{"BPF_IND", Const, 0, ""},
+		{"BPF_JA", Const, 0, ""},
+		{"BPF_JEQ", Const, 0, ""},
+		{"BPF_JGE", Const, 0, ""},
+		{"BPF_JGT", Const, 0, ""},
+		{"BPF_JMP", Const, 0, ""},
+		{"BPF_JSET", Const, 0, ""},
+		{"BPF_K", Const, 0, ""},
+		{"BPF_LD", Const, 0, ""},
+		{"BPF_LDX", Const, 0, ""},
+		{"BPF_LEN", Const, 0, ""},
+		{"BPF_LSH", Const, 0, ""},
+		{"BPF_MAJOR_VERSION", Const, 0, ""},
+		{"BPF_MAXBUFSIZE", Const, 0, ""},
+		{"BPF_MAXINSNS", Const, 0, ""},
+		{"BPF_MEM", Const, 0, ""},
+		{"BPF_MEMWORDS", Const, 0, ""},
+		{"BPF_MINBUFSIZE", Const, 0, ""},
+		{"BPF_MINOR_VERSION", Const, 0, ""},
+		{"BPF_MISC", Const, 0, ""},
+		{"BPF_MSH", Const, 0, ""},
+		{"BPF_MUL", Const, 0, ""},
+		{"BPF_NEG", Const, 0, ""},
+		{"BPF_OR", Const, 0, ""},
+		{"BPF_RELEASE", Const, 0, ""},
+		{"BPF_RET", Const, 0, ""},
+		{"BPF_RSH", Const, 0, ""},
+		{"BPF_ST", Const, 0, ""},
+		{"BPF_STX", Const, 0, ""},
+		{"BPF_SUB", Const, 0, ""},
+		{"BPF_TAX", Const, 0, ""},
+		{"BPF_TXA", Const, 0, ""},
+		{"BPF_T_BINTIME", Const, 1, ""},
+		{"BPF_T_BINTIME_FAST", Const, 1, ""},
+		{"BPF_T_BINTIME_MONOTONIC", Const, 1, ""},
+		{"BPF_T_BINTIME_MONOTONIC_FAST", Const, 1, ""},
+		{"BPF_T_FAST", Const, 1, ""},
+		{"BPF_T_FLAG_MASK", Const, 1, ""},
+		{"BPF_T_FORMAT_MASK", Const, 1, ""},
+		{"BPF_T_MICROTIME", Const, 1, ""},
+		{"BPF_T_MICROTIME_FAST", Const, 1, ""},
+		{"BPF_T_MICROTIME_MONOTONIC", Const, 1, ""},
+		{"BPF_T_MICROTIME_MONOTONIC_FAST", Const, 1, ""},
+		{"BPF_T_MONOTONIC", Const, 1, ""},
+		{"BPF_T_MONOTONIC_FAST", Const, 1, ""},
+		{"BPF_T_NANOTIME", Const, 1, ""},
+		{"BPF_T_NANOTIME_FAST", Const, 1, ""},
+		{"BPF_T_NANOTIME_MONOTONIC", Const, 1, ""},
+		{"BPF_T_NANOTIME_MONOTONIC_FAST", Const, 1, ""},
+		{"BPF_T_NONE", Const, 1, ""},
+		{"BPF_T_NORMAL", Const, 1, ""},
+		{"BPF_W", Const, 0, ""},
+		{"BPF_X", Const, 0, ""},
+		{"BRKINT", Const, 0, ""},
+		{"Bind", Func, 0, "func(fd int, sa Sockaddr) (err error)"},
+		{"BindToDevice", Func, 0, "func(fd int, device string) (err error)"},
+		{"BpfBuflen", Func, 0, ""},
+		{"BpfDatalink", Func, 0, ""},
+		{"BpfHdr", Type, 0, ""},
+		{"BpfHdr.Caplen", Field, 0, ""},
+		{"BpfHdr.Datalen", Field, 0, ""},
+		{"BpfHdr.Hdrlen", Field, 0, ""},
+		{"BpfHdr.Pad_cgo_0", Field, 0, ""},
+		{"BpfHdr.Tstamp", Field, 0, ""},
+		{"BpfHeadercmpl", Func, 0, ""},
+		{"BpfInsn", Type, 0, ""},
+		{"BpfInsn.Code", Field, 0, ""},
+		{"BpfInsn.Jf", Field, 0, ""},
+		{"BpfInsn.Jt", Field, 0, ""},
+		{"BpfInsn.K", Field, 0, ""},
+		{"BpfInterface", Func, 0, ""},
+		{"BpfJump", Func, 0, ""},
+		{"BpfProgram", Type, 0, ""},
+		{"BpfProgram.Insns", Field, 0, ""},
+		{"BpfProgram.Len", Field, 0, ""},
+		{"BpfProgram.Pad_cgo_0", Field, 0, ""},
+		{"BpfStat", Type, 0, ""},
+		{"BpfStat.Capt", Field, 2, ""},
+		{"BpfStat.Drop", Field, 0, ""},
+		{"BpfStat.Padding", Field, 2, ""},
+		{"BpfStat.Recv", Field, 0, ""},
+		{"BpfStats", Func, 0, ""},
+		{"BpfStmt", Func, 0, ""},
+		{"BpfTimeout", Func, 0, ""},
+		{"BpfTimeval", Type, 2, ""},
+		{"BpfTimeval.Sec", Field, 2, ""},
+		{"BpfTimeval.Usec", Field, 2, ""},
+		{"BpfVersion", Type, 0, ""},
+		{"BpfVersion.Major", Field, 0, ""},
+		{"BpfVersion.Minor", Field, 0, ""},
+		{"BpfZbuf", Type, 0, ""},
+		{"BpfZbuf.Bufa", Field, 0, ""},
+		{"BpfZbuf.Bufb", Field, 0, ""},
+		{"BpfZbuf.Buflen", Field, 0, ""},
+		{"BpfZbufHeader", Type, 0, ""},
+		{"BpfZbufHeader.Kernel_gen", Field, 0, ""},
+		{"BpfZbufHeader.Kernel_len", Field, 0, ""},
+		{"BpfZbufHeader.User_gen", Field, 0, ""},
+		{"BpfZbufHeader.X_bzh_pad", Field, 0, ""},
+		{"ByHandleFileInformation", Type, 0, ""},
+		{"ByHandleFileInformation.CreationTime", Field, 0, ""},
+		{"ByHandleFileInformation.FileAttributes", Field, 0, ""},
+		{"ByHandleFileInformation.FileIndexHigh", Field, 0, ""},
+		{"ByHandleFileInformation.FileIndexLow", Field, 0, ""},
+		{"ByHandleFileInformation.FileSizeHigh", Field, 0, ""},
+		{"ByHandleFileInformation.FileSizeLow", Field, 0, ""},
+		{"ByHandleFileInformation.LastAccessTime", Field, 0, ""},
+		{"ByHandleFileInformation.LastWriteTime", Field, 0, ""},
+		{"ByHandleFileInformation.NumberOfLinks", Field, 0, ""},
+		{"ByHandleFileInformation.VolumeSerialNumber", Field, 0, ""},
+		{"BytePtrFromString", Func, 1, "func(s string) (*byte, error)"},
+		{"ByteSliceFromString", Func, 1, "func(s string) ([]byte, error)"},
+		{"CCR0_FLUSH", Const, 1, ""},
+		{"CERT_CHAIN_POLICY_AUTHENTICODE", Const, 0, ""},
+		{"CERT_CHAIN_POLICY_AUTHENTICODE_TS", Const, 0, ""},
+		{"CERT_CHAIN_POLICY_BASE", Const, 0, ""},
+		{"CERT_CHAIN_POLICY_BASIC_CONSTRAINTS", Const, 0, ""},
+		{"CERT_CHAIN_POLICY_EV", Const, 0, ""},
+		{"CERT_CHAIN_POLICY_MICROSOFT_ROOT", Const, 0, ""},
+		{"CERT_CHAIN_POLICY_NT_AUTH", Const, 0, ""},
+		{"CERT_CHAIN_POLICY_SSL", Const, 0, ""},
+		{"CERT_E_CN_NO_MATCH", Const, 0, ""},
+		{"CERT_E_EXPIRED", Const, 0, ""},
+		{"CERT_E_PURPOSE", Const, 0, ""},
+		{"CERT_E_ROLE", Const, 0, ""},
+		{"CERT_E_UNTRUSTEDROOT", Const, 0, ""},
+		{"CERT_STORE_ADD_ALWAYS", Const, 0, ""},
+		{"CERT_STORE_DEFER_CLOSE_UNTIL_LAST_FREE_FLAG", Const, 0, ""},
+		{"CERT_STORE_PROV_MEMORY", Const, 0, ""},
+		{"CERT_TRUST_HAS_EXCLUDED_NAME_CONSTRAINT", Const, 0, ""},
+		{"CERT_TRUST_HAS_NOT_DEFINED_NAME_CONSTRAINT", Const, 0, ""},
+		{"CERT_TRUST_HAS_NOT_PERMITTED_NAME_CONSTRAINT", Const, 0, ""},
+		{"CERT_TRUST_HAS_NOT_SUPPORTED_CRITICAL_EXT", Const, 0, ""},
+		{"CERT_TRUST_HAS_NOT_SUPPORTED_NAME_CONSTRAINT", Const, 0, ""},
+		{"CERT_TRUST_INVALID_BASIC_CONSTRAINTS", Const, 0, ""},
+		{"CERT_TRUST_INVALID_EXTENSION", Const, 0, ""},
+		{"CERT_TRUST_INVALID_NAME_CONSTRAINTS", Const, 0, ""},
+		{"CERT_TRUST_INVALID_POLICY_CONSTRAINTS", Const, 0, ""},
+		{"CERT_TRUST_IS_CYCLIC", Const, 0, ""},
+		{"CERT_TRUST_IS_EXPLICIT_DISTRUST", Const, 0, ""},
+		{"CERT_TRUST_IS_NOT_SIGNATURE_VALID", Const, 0, ""},
+		{"CERT_TRUST_IS_NOT_TIME_VALID", Const, 0, ""},
+		{"CERT_TRUST_IS_NOT_VALID_FOR_USAGE", Const, 0, ""},
+		{"CERT_TRUST_IS_OFFLINE_REVOCATION", Const, 0, ""},
+		{"CERT_TRUST_IS_REVOKED", Const, 0, ""},
+		{"CERT_TRUST_IS_UNTRUSTED_ROOT", Const, 0, ""},
+		{"CERT_TRUST_NO_ERROR", Const, 0, ""},
+		{"CERT_TRUST_NO_ISSUANCE_CHAIN_POLICY", Const, 0, ""},
+		{"CERT_TRUST_REVOCATION_STATUS_UNKNOWN", Const, 0, ""},
+		{"CFLUSH", Const, 1, ""},
+		{"CLOCAL", Const, 0, ""},
+		{"CLONE_CHILD_CLEARTID", Const, 2, ""},
+		{"CLONE_CHILD_SETTID", Const, 2, ""},
+		{"CLONE_CLEAR_SIGHAND", Const, 20, ""},
+		{"CLONE_CSIGNAL", Const, 3, ""},
+		{"CLONE_DETACHED", Const, 2, ""},
+		{"CLONE_FILES", Const, 2, ""},
+		{"CLONE_FS", Const, 2, ""},
+		{"CLONE_INTO_CGROUP", Const, 20, ""},
+		{"CLONE_IO", Const, 2, ""},
+		{"CLONE_NEWCGROUP", Const, 20, ""},
+		{"CLONE_NEWIPC", Const, 2, ""},
+		{"CLONE_NEWNET", Const, 2, ""},
+		{"CLONE_NEWNS", Const, 2, ""},
+		{"CLONE_NEWPID", Const, 2, ""},
+		{"CLONE_NEWTIME", Const, 20, ""},
+		{"CLONE_NEWUSER", Const, 2, ""},
+		{"CLONE_NEWUTS", Const, 2, ""},
+		{"CLONE_PARENT", Const, 2, ""},
+		{"CLONE_PARENT_SETTID", Const, 2, ""},
+		{"CLONE_PID", Const, 3, ""},
+		{"CLONE_PIDFD", Const, 20, ""},
+		{"CLONE_PTRACE", Const, 2, ""},
+		{"CLONE_SETTLS", Const, 2, ""},
+		{"CLONE_SIGHAND", Const, 2, ""},
+		{"CLONE_SYSVSEM", Const, 2, ""},
+		{"CLONE_THREAD", Const, 2, ""},
+		{"CLONE_UNTRACED", Const, 2, ""},
+		{"CLONE_VFORK", Const, 2, ""},
+		{"CLONE_VM", Const, 2, ""},
+		{"CPUID_CFLUSH", Const, 1, ""},
+		{"CREAD", Const, 0, ""},
+		{"CREATE_ALWAYS", Const, 0, ""},
+		{"CREATE_NEW", Const, 0, ""},
+		{"CREATE_NEW_PROCESS_GROUP", Const, 1, ""},
+		{"CREATE_UNICODE_ENVIRONMENT", Const, 0, ""},
+		{"CRYPT_DEFAULT_CONTAINER_OPTIONAL", Const, 0, ""},
+		{"CRYPT_DELETEKEYSET", Const, 0, ""},
+		{"CRYPT_MACHINE_KEYSET", Const, 0, ""},
+		{"CRYPT_NEWKEYSET", Const, 0, ""},
+		{"CRYPT_SILENT", Const, 0, ""},
+		{"CRYPT_VERIFYCONTEXT", Const, 0, ""},
+		{"CS5", Const, 0, ""},
+		{"CS6", Const, 0, ""},
+		{"CS7", Const, 0, ""},
+		{"CS8", Const, 0, ""},
+		{"CSIZE", Const, 0, ""},
+		{"CSTART", Const, 1, ""},
+		{"CSTATUS", Const, 1, ""},
+		{"CSTOP", Const, 1, ""},
+		{"CSTOPB", Const, 0, ""},
+		{"CSUSP", Const, 1, ""},
+		{"CTL_MAXNAME", Const, 0, ""},
+		{"CTL_NET", Const, 0, ""},
+		{"CTL_QUERY", Const, 1, ""},
+		{"CTRL_BREAK_EVENT", Const, 1, ""},
+		{"CTRL_CLOSE_EVENT", Const, 14, ""},
+		{"CTRL_C_EVENT", Const, 1, ""},
+		{"CTRL_LOGOFF_EVENT", Const, 14, ""},
+		{"CTRL_SHUTDOWN_EVENT", Const, 14, ""},
+		{"CancelIo", Func, 0, ""},
+		{"CancelIoEx", Func, 1, ""},
+		{"CertAddCertificateContextToStore", Func, 0, ""},
+		{"CertChainContext", Type, 0, ""},
+		{"CertChainContext.ChainCount", Field, 0, ""},
+		{"CertChainContext.Chains", Field, 0, ""},
+		{"CertChainContext.HasRevocationFreshnessTime", Field, 0, ""},
+		{"CertChainContext.LowerQualityChainCount", Field, 0, ""},
+		{"CertChainContext.LowerQualityChains", Field, 0, ""},
+		{"CertChainContext.RevocationFreshnessTime", Field, 0, ""},
+		{"CertChainContext.Size", Field, 0, ""},
+		{"CertChainContext.TrustStatus", Field, 0, ""},
+		{"CertChainElement", Type, 0, ""},
+		{"CertChainElement.ApplicationUsage", Field, 0, ""},
+		{"CertChainElement.CertContext", Field, 0, ""},
+		{"CertChainElement.ExtendedErrorInfo", Field, 0, ""},
+		{"CertChainElement.IssuanceUsage", Field, 0, ""},
+		{"CertChainElement.RevocationInfo", Field, 0, ""},
+		{"CertChainElement.Size", Field, 0, ""},
+		{"CertChainElement.TrustStatus", Field, 0, ""},
+		{"CertChainPara", Type, 0, ""},
+		{"CertChainPara.CacheResync", Field, 0, ""},
+		{"CertChainPara.CheckRevocationFreshnessTime", Field, 0, ""},
+		{"CertChainPara.RequestedUsage", Field, 0, ""},
+		{"CertChainPara.RequstedIssuancePolicy", Field, 0, ""},
+		{"CertChainPara.RevocationFreshnessTime", Field, 0, ""},
+		{"CertChainPara.Size", Field, 0, ""},
+		{"CertChainPara.URLRetrievalTimeout", Field, 0, ""},
+		{"CertChainPolicyPara", Type, 0, ""},
+		{"CertChainPolicyPara.ExtraPolicyPara", Field, 0, ""},
+		{"CertChainPolicyPara.Flags", Field, 0, ""},
+		{"CertChainPolicyPara.Size", Field, 0, ""},
+		{"CertChainPolicyStatus", Type, 0, ""},
+		{"CertChainPolicyStatus.ChainIndex", Field, 0, ""},
+		{"CertChainPolicyStatus.ElementIndex", Field, 0, ""},
+		{"CertChainPolicyStatus.Error", Field, 0, ""},
+		{"CertChainPolicyStatus.ExtraPolicyStatus", Field, 0, ""},
+		{"CertChainPolicyStatus.Size", Field, 0, ""},
+		{"CertCloseStore", Func, 0, ""},
+		{"CertContext", Type, 0, ""},
+		{"CertContext.CertInfo", Field, 0, ""},
+		{"CertContext.EncodedCert", Field, 0, ""},
+		{"CertContext.EncodingType", Field, 0, ""},
+		{"CertContext.Length", Field, 0, ""},
+		{"CertContext.Store", Field, 0, ""},
+		{"CertCreateCertificateContext", Func, 0, ""},
+		{"CertEnhKeyUsage", Type, 0, ""},
+		{"CertEnhKeyUsage.Length", Field, 0, ""},
+		{"CertEnhKeyUsage.UsageIdentifiers", Field, 0, ""},
+		{"CertEnumCertificatesInStore", Func, 0, ""},
+		{"CertFreeCertificateChain", Func, 0, ""},
+		{"CertFreeCertificateContext", Func, 0, ""},
+		{"CertGetCertificateChain", Func, 0, ""},
+		{"CertInfo", Type, 11, ""},
+		{"CertOpenStore", Func, 0, ""},
+		{"CertOpenSystemStore", Func, 0, ""},
+		{"CertRevocationCrlInfo", Type, 11, ""},
+		{"CertRevocationInfo", Type, 0, ""},
+		{"CertRevocationInfo.CrlInfo", Field, 0, ""},
+		{"CertRevocationInfo.FreshnessTime", Field, 0, ""},
+		{"CertRevocationInfo.HasFreshnessTime", Field, 0, ""},
+		{"CertRevocationInfo.OidSpecificInfo", Field, 0, ""},
+		{"CertRevocationInfo.RevocationOid", Field, 0, ""},
+		{"CertRevocationInfo.RevocationResult", Field, 0, ""},
+		{"CertRevocationInfo.Size", Field, 0, ""},
+		{"CertSimpleChain", Type, 0, ""},
+		{"CertSimpleChain.Elements", Field, 0, ""},
+		{"CertSimpleChain.HasRevocationFreshnessTime", Field, 0, ""},
+		{"CertSimpleChain.NumElements", Field, 0, ""},
+		{"CertSimpleChain.RevocationFreshnessTime", Field, 0, ""},
+		{"CertSimpleChain.Size", Field, 0, ""},
+		{"CertSimpleChain.TrustListInfo", Field, 0, ""},
+		{"CertSimpleChain.TrustStatus", Field, 0, ""},
+		{"CertTrustListInfo", Type, 11, ""},
+		{"CertTrustStatus", Type, 0, ""},
+		{"CertTrustStatus.ErrorStatus", Field, 0, ""},
+		{"CertTrustStatus.InfoStatus", Field, 0, ""},
+		{"CertUsageMatch", Type, 0, ""},
+		{"CertUsageMatch.Type", Field, 0, ""},
+		{"CertUsageMatch.Usage", Field, 0, ""},
+		{"CertVerifyCertificateChainPolicy", Func, 0, ""},
+		{"Chdir", Func, 0, "func(path string) (err error)"},
+		{"CheckBpfVersion", Func, 0, ""},
+		{"Chflags", Func, 0, ""},
+		{"Chmod", Func, 0, "func(path string, mode uint32) (err error)"},
+		{"Chown", Func, 0, "func(path string, uid int, gid int) (err error)"},
+		{"Chroot", Func, 0, "func(path string) (err error)"},
+		{"Clearenv", Func, 0, "func()"},
+		{"Close", Func, 0, "func(fd int) (err error)"},
+		{"CloseHandle", Func, 0, ""},
+		{"CloseOnExec", Func, 0, "func(fd int)"},
+		{"Closesocket", Func, 0, ""},
+		{"CmsgLen", Func, 0, "func(datalen int) int"},
+		{"CmsgSpace", Func, 0, "func(datalen int) int"},
+		{"Cmsghdr", Type, 0, ""},
+		{"Cmsghdr.Len", Field, 0, ""},
+		{"Cmsghdr.Level", Field, 0, ""},
+		{"Cmsghdr.Type", Field, 0, ""},
+		{"Cmsghdr.X__cmsg_data", Field, 0, ""},
+		{"CommandLineToArgv", Func, 0, ""},
+		{"ComputerName", Func, 0, ""},
+		{"Conn", Type, 9, ""},
+		{"Connect", Func, 0, "func(fd int, sa Sockaddr) (err error)"},
+		{"ConnectEx", Func, 1, ""},
+		{"ConvertSidToStringSid", Func, 0, ""},
+		{"ConvertStringSidToSid", Func, 0, ""},
+		{"CopySid", Func, 0, ""},
+		{"Creat", Func, 0, "func(path string, mode uint32) (fd int, err error)"},
+		{"CreateDirectory", Func, 0, ""},
+		{"CreateFile", Func, 0, ""},
+		{"CreateFileMapping", Func, 0, ""},
+		{"CreateHardLink", Func, 4, ""},
+		{"CreateIoCompletionPort", Func, 0, ""},
+		{"CreatePipe", Func, 0, ""},
+		{"CreateProcess", Func, 0, ""},
+		{"CreateProcessAsUser", Func, 10, ""},
+		{"CreateSymbolicLink", Func, 4, ""},
+		{"CreateToolhelp32Snapshot", Func, 4, ""},
+		{"Credential", Type, 0, ""},
+		{"Credential.Gid", Field, 0, ""},
+		{"Credential.Groups", Field, 0, ""},
+		{"Credential.NoSetGroups", Field, 9, ""},
+		{"Credential.Uid", Field, 0, ""},
+		{"CryptAcquireContext", Func, 0, ""},
+		{"CryptGenRandom", Func, 0, ""},
+		{"CryptReleaseContext", Func, 0, ""},
+		{"DIOCBSFLUSH", Const, 1, ""},
+		{"DIOCOSFPFLUSH", Const, 1, ""},
+		{"DLL", Type, 0, ""},
+		{"DLL.Handle", Field, 0, ""},
+		{"DLL.Name", Field, 0, ""},
+		{"DLLError", Type, 0, ""},
+		{"DLLError.Err", Field, 0, ""},
+		{"DLLError.Msg", Field, 0, ""},
+		{"DLLError.ObjName", Field, 0, ""},
+		{"DLT_A429", Const, 0, ""},
+		{"DLT_A653_ICM", Const, 0, ""},
+		{"DLT_AIRONET_HEADER", Const, 0, ""},
+		{"DLT_AOS", Const, 1, ""},
+		{"DLT_APPLE_IP_OVER_IEEE1394", Const, 0, ""},
+		{"DLT_ARCNET", Const, 0, ""},
+		{"DLT_ARCNET_LINUX", Const, 0, ""},
+		{"DLT_ATM_CLIP", Const, 0, ""},
+		{"DLT_ATM_RFC1483", Const, 0, ""},
+		{"DLT_AURORA", Const, 0, ""},
+		{"DLT_AX25", Const, 0, ""},
+		{"DLT_AX25_KISS", Const, 0, ""},
+		{"DLT_BACNET_MS_TP", Const, 0, ""},
+		{"DLT_BLUETOOTH_HCI_H4", Const, 0, ""},
+		{"DLT_BLUETOOTH_HCI_H4_WITH_PHDR", Const, 0, ""},
+		{"DLT_CAN20B", Const, 0, ""},
+		{"DLT_CAN_SOCKETCAN", Const, 1, ""},
+		{"DLT_CHAOS", Const, 0, ""},
+		{"DLT_CHDLC", Const, 0, ""},
+		{"DLT_CISCO_IOS", Const, 0, ""},
+		{"DLT_C_HDLC", Const, 0, ""},
+		{"DLT_C_HDLC_WITH_DIR", Const, 0, ""},
+		{"DLT_DBUS", Const, 1, ""},
+		{"DLT_DECT", Const, 1, ""},
+		{"DLT_DOCSIS", Const, 0, ""},
+		{"DLT_DVB_CI", Const, 1, ""},
+		{"DLT_ECONET", Const, 0, ""},
+		{"DLT_EN10MB", Const, 0, ""},
+		{"DLT_EN3MB", Const, 0, ""},
+		{"DLT_ENC", Const, 0, ""},
+		{"DLT_ERF", Const, 0, ""},
+		{"DLT_ERF_ETH", Const, 0, ""},
+		{"DLT_ERF_POS", Const, 0, ""},
+		{"DLT_FC_2", Const, 1, ""},
+		{"DLT_FC_2_WITH_FRAME_DELIMS", Const, 1, ""},
+		{"DLT_FDDI", Const, 0, ""},
+		{"DLT_FLEXRAY", Const, 0, ""},
+		{"DLT_FRELAY", Const, 0, ""},
+		{"DLT_FRELAY_WITH_DIR", Const, 0, ""},
+		{"DLT_GCOM_SERIAL", Const, 0, ""},
+		{"DLT_GCOM_T1E1", Const, 0, ""},
+		{"DLT_GPF_F", Const, 0, ""},
+		{"DLT_GPF_T", Const, 0, ""},
+		{"DLT_GPRS_LLC", Const, 0, ""},
+		{"DLT_GSMTAP_ABIS", Const, 1, ""},
+		{"DLT_GSMTAP_UM", Const, 1, ""},
+		{"DLT_HDLC", Const, 1, ""},
+		{"DLT_HHDLC", Const, 0, ""},
+		{"DLT_HIPPI", Const, 1, ""},
+		{"DLT_IBM_SN", Const, 0, ""},
+		{"DLT_IBM_SP", Const, 0, ""},
+		{"DLT_IEEE802", Const, 0, ""},
+		{"DLT_IEEE802_11", Const, 0, ""},
+		{"DLT_IEEE802_11_RADIO", Const, 0, ""},
+		{"DLT_IEEE802_11_RADIO_AVS", Const, 0, ""},
+		{"DLT_IEEE802_15_4", Const, 0, ""},
+		{"DLT_IEEE802_15_4_LINUX", Const, 0, ""},
+		{"DLT_IEEE802_15_4_NOFCS", Const, 1, ""},
+		{"DLT_IEEE802_15_4_NONASK_PHY", Const, 0, ""},
+		{"DLT_IEEE802_16_MAC_CPS", Const, 0, ""},
+		{"DLT_IEEE802_16_MAC_CPS_RADIO", Const, 0, ""},
+		{"DLT_IPFILTER", Const, 0, ""},
+		{"DLT_IPMB", Const, 0, ""},
+		{"DLT_IPMB_LINUX", Const, 0, ""},
+		{"DLT_IPNET", Const, 1, ""},
+		{"DLT_IPOIB", Const, 1, ""},
+		{"DLT_IPV4", Const, 1, ""},
+		{"DLT_IPV6", Const, 1, ""},
+		{"DLT_IP_OVER_FC", Const, 0, ""},
+		{"DLT_JUNIPER_ATM1", Const, 0, ""},
+		{"DLT_JUNIPER_ATM2", Const, 0, ""},
+		{"DLT_JUNIPER_ATM_CEMIC", Const, 1, ""},
+		{"DLT_JUNIPER_CHDLC", Const, 0, ""},
+		{"DLT_JUNIPER_ES", Const, 0, ""},
+		{"DLT_JUNIPER_ETHER", Const, 0, ""},
+		{"DLT_JUNIPER_FIBRECHANNEL", Const, 1, ""},
+		{"DLT_JUNIPER_FRELAY", Const, 0, ""},
+		{"DLT_JUNIPER_GGSN", Const, 0, ""},
+		{"DLT_JUNIPER_ISM", Const, 0, ""},
+		{"DLT_JUNIPER_MFR", Const, 0, ""},
+		{"DLT_JUNIPER_MLFR", Const, 0, ""},
+		{"DLT_JUNIPER_MLPPP", Const, 0, ""},
+		{"DLT_JUNIPER_MONITOR", Const, 0, ""},
+		{"DLT_JUNIPER_PIC_PEER", Const, 0, ""},
+		{"DLT_JUNIPER_PPP", Const, 0, ""},
+		{"DLT_JUNIPER_PPPOE", Const, 0, ""},
+		{"DLT_JUNIPER_PPPOE_ATM", Const, 0, ""},
+		{"DLT_JUNIPER_SERVICES", Const, 0, ""},
+		{"DLT_JUNIPER_SRX_E2E", Const, 1, ""},
+		{"DLT_JUNIPER_ST", Const, 0, ""},
+		{"DLT_JUNIPER_VP", Const, 0, ""},
+		{"DLT_JUNIPER_VS", Const, 1, ""},
+		{"DLT_LAPB_WITH_DIR", Const, 0, ""},
+		{"DLT_LAPD", Const, 0, ""},
+		{"DLT_LIN", Const, 0, ""},
+		{"DLT_LINUX_EVDEV", Const, 1, ""},
+		{"DLT_LINUX_IRDA", Const, 0, ""},
+		{"DLT_LINUX_LAPD", Const, 0, ""},
+		{"DLT_LINUX_PPP_WITHDIRECTION", Const, 0, ""},
+		{"DLT_LINUX_SLL", Const, 0, ""},
+		{"DLT_LOOP", Const, 0, ""},
+		{"DLT_LTALK", Const, 0, ""},
+		{"DLT_MATCHING_MAX", Const, 1, ""},
+		{"DLT_MATCHING_MIN", Const, 1, ""},
+		{"DLT_MFR", Const, 0, ""},
+		{"DLT_MOST", Const, 0, ""},
+		{"DLT_MPEG_2_TS", Const, 1, ""},
+		{"DLT_MPLS", Const, 1, ""},
+		{"DLT_MTP2", Const, 0, ""},
+		{"DLT_MTP2_WITH_PHDR", Const, 0, ""},
+		{"DLT_MTP3", Const, 0, ""},
+		{"DLT_MUX27010", Const, 1, ""},
+		{"DLT_NETANALYZER", Const, 1, ""},
+		{"DLT_NETANALYZER_TRANSPARENT", Const, 1, ""},
+		{"DLT_NFC_LLCP", Const, 1, ""},
+		{"DLT_NFLOG", Const, 1, ""},
+		{"DLT_NG40", Const, 1, ""},
+		{"DLT_NULL", Const, 0, ""},
+		{"DLT_PCI_EXP", Const, 0, ""},
+		{"DLT_PFLOG", Const, 0, ""},
+		{"DLT_PFSYNC", Const, 0, ""},
+		{"DLT_PPI", Const, 0, ""},
+		{"DLT_PPP", Const, 0, ""},
+		{"DLT_PPP_BSDOS", Const, 0, ""},
+		{"DLT_PPP_ETHER", Const, 0, ""},
+		{"DLT_PPP_PPPD", Const, 0, ""},
+		{"DLT_PPP_SERIAL", Const, 0, ""},
+		{"DLT_PPP_WITH_DIR", Const, 0, ""},
+		{"DLT_PPP_WITH_DIRECTION", Const, 0, ""},
+		{"DLT_PRISM_HEADER", Const, 0, ""},
+		{"DLT_PRONET", Const, 0, ""},
+		{"DLT_RAIF1", Const, 0, ""},
+		{"DLT_RAW", Const, 0, ""},
+		{"DLT_RAWAF_MASK", Const, 1, ""},
+		{"DLT_RIO", Const, 0, ""},
+		{"DLT_SCCP", Const, 0, ""},
+		{"DLT_SITA", Const, 0, ""},
+		{"DLT_SLIP", Const, 0, ""},
+		{"DLT_SLIP_BSDOS", Const, 0, ""},
+		{"DLT_STANAG_5066_D_PDU", Const, 1, ""},
+		{"DLT_SUNATM", Const, 0, ""},
+		{"DLT_SYMANTEC_FIREWALL", Const, 0, ""},
+		{"DLT_TZSP", Const, 0, ""},
+		{"DLT_USB", Const, 0, ""},
+		{"DLT_USB_LINUX", Const, 0, ""},
+		{"DLT_USB_LINUX_MMAPPED", Const, 1, ""},
+		{"DLT_USER0", Const, 0, ""},
+		{"DLT_USER1", Const, 0, ""},
+		{"DLT_USER10", Const, 0, ""},
+		{"DLT_USER11", Const, 0, ""},
+		{"DLT_USER12", Const, 0, ""},
+		{"DLT_USER13", Const, 0, ""},
+		{"DLT_USER14", Const, 0, ""},
+		{"DLT_USER15", Const, 0, ""},
+		{"DLT_USER2", Const, 0, ""},
+		{"DLT_USER3", Const, 0, ""},
+		{"DLT_USER4", Const, 0, ""},
+		{"DLT_USER5", Const, 0, ""},
+		{"DLT_USER6", Const, 0, ""},
+		{"DLT_USER7", Const, 0, ""},
+		{"DLT_USER8", Const, 0, ""},
+		{"DLT_USER9", Const, 0, ""},
+		{"DLT_WIHART", Const, 1, ""},
+		{"DLT_X2E_SERIAL", Const, 0, ""},
+		{"DLT_X2E_XORAYA", Const, 0, ""},
+		{"DNSMXData", Type, 0, ""},
+		{"DNSMXData.NameExchange", Field, 0, ""},
+		{"DNSMXData.Pad", Field, 0, ""},
+		{"DNSMXData.Preference", Field, 0, ""},
+		{"DNSPTRData", Type, 0, ""},
+		{"DNSPTRData.Host", Field, 0, ""},
+		{"DNSRecord", Type, 0, ""},
+		{"DNSRecord.Data", Field, 0, ""},
+		{"DNSRecord.Dw", Field, 0, ""},
+		{"DNSRecord.Length", Field, 0, ""},
+		{"DNSRecord.Name", Field, 0, ""},
+		{"DNSRecord.Next", Field, 0, ""},
+		{"DNSRecord.Reserved", Field, 0, ""},
+		{"DNSRecord.Ttl", Field, 0, ""},
+		{"DNSRecord.Type", Field, 0, ""},
+		{"DNSSRVData", Type, 0, ""},
+		{"DNSSRVData.Pad", Field, 0, ""},
+		{"DNSSRVData.Port", Field, 0, ""},
+		{"DNSSRVData.Priority", Field, 0, ""},
+		{"DNSSRVData.Target", Field, 0, ""},
+		{"DNSSRVData.Weight", Field, 0, ""},
+		{"DNSTXTData", Type, 0, ""},
+		{"DNSTXTData.StringArray", Field, 0, ""},
+		{"DNSTXTData.StringCount", Field, 0, ""},
+		{"DNS_INFO_NO_RECORDS", Const, 4, ""},
+		{"DNS_TYPE_A", Const, 0, ""},
+		{"DNS_TYPE_A6", Const, 0, ""},
+		{"DNS_TYPE_AAAA", Const, 0, ""},
+		{"DNS_TYPE_ADDRS", Const, 0, ""},
+		{"DNS_TYPE_AFSDB", Const, 0, ""},
+		{"DNS_TYPE_ALL", Const, 0, ""},
+		{"DNS_TYPE_ANY", Const, 0, ""},
+		{"DNS_TYPE_ATMA", Const, 0, ""},
+		{"DNS_TYPE_AXFR", Const, 0, ""},
+		{"DNS_TYPE_CERT", Const, 0, ""},
+		{"DNS_TYPE_CNAME", Const, 0, ""},
+		{"DNS_TYPE_DHCID", Const, 0, ""},
+		{"DNS_TYPE_DNAME", Const, 0, ""},
+		{"DNS_TYPE_DNSKEY", Const, 0, ""},
+		{"DNS_TYPE_DS", Const, 0, ""},
+		{"DNS_TYPE_EID", Const, 0, ""},
+		{"DNS_TYPE_GID", Const, 0, ""},
+		{"DNS_TYPE_GPOS", Const, 0, ""},
+		{"DNS_TYPE_HINFO", Const, 0, ""},
+		{"DNS_TYPE_ISDN", Const, 0, ""},
+		{"DNS_TYPE_IXFR", Const, 0, ""},
+		{"DNS_TYPE_KEY", Const, 0, ""},
+		{"DNS_TYPE_KX", Const, 0, ""},
+		{"DNS_TYPE_LOC", Const, 0, ""},
+		{"DNS_TYPE_MAILA", Const, 0, ""},
+		{"DNS_TYPE_MAILB", Const, 0, ""},
+		{"DNS_TYPE_MB", Const, 0, ""},
+		{"DNS_TYPE_MD", Const, 0, ""},
+		{"DNS_TYPE_MF", Const, 0, ""},
+		{"DNS_TYPE_MG", Const, 0, ""},
+		{"DNS_TYPE_MINFO", Const, 0, ""},
+		{"DNS_TYPE_MR", Const, 0, ""},
+		{"DNS_TYPE_MX", Const, 0, ""},
+		{"DNS_TYPE_NAPTR", Const, 0, ""},
+		{"DNS_TYPE_NBSTAT", Const, 0, ""},
+		{"DNS_TYPE_NIMLOC", Const, 0, ""},
+		{"DNS_TYPE_NS", Const, 0, ""},
+		{"DNS_TYPE_NSAP", Const, 0, ""},
+		{"DNS_TYPE_NSAPPTR", Const, 0, ""},
+		{"DNS_TYPE_NSEC", Const, 0, ""},
+		{"DNS_TYPE_NULL", Const, 0, ""},
+		{"DNS_TYPE_NXT", Const, 0, ""},
+		{"DNS_TYPE_OPT", Const, 0, ""},
+		{"DNS_TYPE_PTR", Const, 0, ""},
+		{"DNS_TYPE_PX", Const, 0, ""},
+		{"DNS_TYPE_RP", Const, 0, ""},
+		{"DNS_TYPE_RRSIG", Const, 0, ""},
+		{"DNS_TYPE_RT", Const, 0, ""},
+		{"DNS_TYPE_SIG", Const, 0, ""},
+		{"DNS_TYPE_SINK", Const, 0, ""},
+		{"DNS_TYPE_SOA", Const, 0, ""},
+		{"DNS_TYPE_SRV", Const, 0, ""},
+		{"DNS_TYPE_TEXT", Const, 0, ""},
+		{"DNS_TYPE_TKEY", Const, 0, ""},
+		{"DNS_TYPE_TSIG", Const, 0, ""},
+		{"DNS_TYPE_UID", Const, 0, ""},
+		{"DNS_TYPE_UINFO", Const, 0, ""},
+		{"DNS_TYPE_UNSPEC", Const, 0, ""},
+		{"DNS_TYPE_WINS", Const, 0, ""},
+		{"DNS_TYPE_WINSR", Const, 0, ""},
+		{"DNS_TYPE_WKS", Const, 0, ""},
+		{"DNS_TYPE_X25", Const, 0, ""},
+		{"DT_BLK", Const, 0, ""},
+		{"DT_CHR", Const, 0, ""},
+		{"DT_DIR", Const, 0, ""},
+		{"DT_FIFO", Const, 0, ""},
+		{"DT_LNK", Const, 0, ""},
+		{"DT_REG", Const, 0, ""},
+		{"DT_SOCK", Const, 0, ""},
+		{"DT_UNKNOWN", Const, 0, ""},
+		{"DT_WHT", Const, 0, ""},
+		{"DUPLICATE_CLOSE_SOURCE", Const, 0, ""},
+		{"DUPLICATE_SAME_ACCESS", Const, 0, ""},
+		{"DeleteFile", Func, 0, ""},
+		{"DetachLsf", Func, 0, "func(fd int) error"},
+		{"DeviceIoControl", Func, 4, ""},
+		{"Dirent", Type, 0, ""},
+		{"Dirent.Fileno", Field, 0, ""},
+		{"Dirent.Ino", Field, 0, ""},
+		{"Dirent.Name", Field, 0, ""},
+		{"Dirent.Namlen", Field, 0, ""},
+		{"Dirent.Off", Field, 0, ""},
+		{"Dirent.Pad0", Field, 12, ""},
+		{"Dirent.Pad1", Field, 12, ""},
+		{"Dirent.Pad_cgo_0", Field, 0, ""},
+		{"Dirent.Reclen", Field, 0, ""},
+		{"Dirent.Seekoff", Field, 0, ""},
+		{"Dirent.Type", Field, 0, ""},
+		{"Dirent.X__d_padding", Field, 3, ""},
+		{"DnsNameCompare", Func, 4, ""},
+		{"DnsQuery", Func, 0, ""},
+		{"DnsRecordListFree", Func, 0, ""},
+		{"DnsSectionAdditional", Const, 4, ""},
+		{"DnsSectionAnswer", Const, 4, ""},
+		{"DnsSectionAuthority", Const, 4, ""},
+		{"DnsSectionQuestion", Const, 4, ""},
+		{"Dup", Func, 0, "func(oldfd int) (fd int, err error)"},
+		{"Dup2", Func, 0, "func(oldfd int, newfd int) (err error)"},
+		{"Dup3", Func, 2, "func(oldfd int, newfd int, flags int) (err error)"},
+		{"DuplicateHandle", Func, 0, ""},
+		{"E2BIG", Const, 0, ""},
+		{"EACCES", Const, 0, ""},
+		{"EADDRINUSE", Const, 0, ""},
+		{"EADDRNOTAVAIL", Const, 0, ""},
+		{"EADV", Const, 0, ""},
+		{"EAFNOSUPPORT", Const, 0, ""},
+		{"EAGAIN", Const, 0, ""},
+		{"EALREADY", Const, 0, ""},
+		{"EAUTH", Const, 0, ""},
+		{"EBADARCH", Const, 0, ""},
+		{"EBADE", Const, 0, ""},
+		{"EBADEXEC", Const, 0, ""},
+		{"EBADF", Const, 0, ""},
+		{"EBADFD", Const, 0, ""},
+		{"EBADMACHO", Const, 0, ""},
+		{"EBADMSG", Const, 0, ""},
+		{"EBADR", Const, 0, ""},
+		{"EBADRPC", Const, 0, ""},
+		{"EBADRQC", Const, 0, ""},
+		{"EBADSLT", Const, 0, ""},
+		{"EBFONT", Const, 0, ""},
+		{"EBUSY", Const, 0, ""},
+		{"ECANCELED", Const, 0, ""},
+		{"ECAPMODE", Const, 1, ""},
+		{"ECHILD", Const, 0, ""},
+		{"ECHO", Const, 0, ""},
+		{"ECHOCTL", Const, 0, ""},
+		{"ECHOE", Const, 0, ""},
+		{"ECHOK", Const, 0, ""},
+		{"ECHOKE", Const, 0, ""},
+		{"ECHONL", Const, 0, ""},
+		{"ECHOPRT", Const, 0, ""},
+		{"ECHRNG", Const, 0, ""},
+		{"ECOMM", Const, 0, ""},
+		{"ECONNABORTED", Const, 0, ""},
+		{"ECONNREFUSED", Const, 0, ""},
+		{"ECONNRESET", Const, 0, ""},
+		{"EDEADLK", Const, 0, ""},
+		{"EDEADLOCK", Const, 0, ""},
+		{"EDESTADDRREQ", Const, 0, ""},
+		{"EDEVERR", Const, 0, ""},
+		{"EDOM", Const, 0, ""},
+		{"EDOOFUS", Const, 0, ""},
+		{"EDOTDOT", Const, 0, ""},
+		{"EDQUOT", Const, 0, ""},
+		{"EEXIST", Const, 0, ""},
+		{"EFAULT", Const, 0, ""},
+		{"EFBIG", Const, 0, ""},
+		{"EFER_LMA", Const, 1, ""},
+		{"EFER_LME", Const, 1, ""},
+		{"EFER_NXE", Const, 1, ""},
+		{"EFER_SCE", Const, 1, ""},
+		{"EFTYPE", Const, 0, ""},
+		{"EHOSTDOWN", Const, 0, ""},
+		{"EHOSTUNREACH", Const, 0, ""},
+		{"EHWPOISON", Const, 0, ""},
+		{"EIDRM", Const, 0, ""},
+		{"EILSEQ", Const, 0, ""},
+		{"EINPROGRESS", Const, 0, ""},
+		{"EINTR", Const, 0, ""},
+		{"EINVAL", Const, 0, ""},
+		{"EIO", Const, 0, ""},
+		{"EIPSEC", Const, 1, ""},
+		{"EISCONN", Const, 0, ""},
+		{"EISDIR", Const, 0, ""},
+		{"EISNAM", Const, 0, ""},
+		{"EKEYEXPIRED", Const, 0, ""},
+		{"EKEYREJECTED", Const, 0, ""},
+		{"EKEYREVOKED", Const, 0, ""},
+		{"EL2HLT", Const, 0, ""},
+		{"EL2NSYNC", Const, 0, ""},
+		{"EL3HLT", Const, 0, ""},
+		{"EL3RST", Const, 0, ""},
+		{"ELAST", Const, 0, ""},
+		{"ELF_NGREG", Const, 0, ""},
+		{"ELF_PRARGSZ", Const, 0, ""},
+		{"ELIBACC", Const, 0, ""},
+		{"ELIBBAD", Const, 0, ""},
+		{"ELIBEXEC", Const, 0, ""},
+		{"ELIBMAX", Const, 0, ""},
+		{"ELIBSCN", Const, 0, ""},
+		{"ELNRNG", Const, 0, ""},
+		{"ELOOP", Const, 0, ""},
+		{"EMEDIUMTYPE", Const, 0, ""},
+		{"EMFILE", Const, 0, ""},
+		{"EMLINK", Const, 0, ""},
+		{"EMSGSIZE", Const, 0, ""},
+		{"EMT_TAGOVF", Const, 1, ""},
+		{"EMULTIHOP", Const, 0, ""},
+		{"EMUL_ENABLED", Const, 1, ""},
+		{"EMUL_LINUX", Const, 1, ""},
+		{"EMUL_LINUX32", Const, 1, ""},
+		{"EMUL_MAXID", Const, 1, ""},
+		{"EMUL_NATIVE", Const, 1, ""},
+		{"ENAMETOOLONG", Const, 0, ""},
+		{"ENAVAIL", Const, 0, ""},
+		{"ENDRUNDISC", Const, 1, ""},
+		{"ENEEDAUTH", Const, 0, ""},
+		{"ENETDOWN", Const, 0, ""},
+		{"ENETRESET", Const, 0, ""},
+		{"ENETUNREACH", Const, 0, ""},
+		{"ENFILE", Const, 0, ""},
+		{"ENOANO", Const, 0, ""},
+		{"ENOATTR", Const, 0, ""},
+		{"ENOBUFS", Const, 0, ""},
+		{"ENOCSI", Const, 0, ""},
+		{"ENODATA", Const, 0, ""},
+		{"ENODEV", Const, 0, ""},
+		{"ENOENT", Const, 0, ""},
+		{"ENOEXEC", Const, 0, ""},
+		{"ENOKEY", Const, 0, ""},
+		{"ENOLCK", Const, 0, ""},
+		{"ENOLINK", Const, 0, ""},
+		{"ENOMEDIUM", Const, 0, ""},
+		{"ENOMEM", Const, 0, ""},
+		{"ENOMSG", Const, 0, ""},
+		{"ENONET", Const, 0, ""},
+		{"ENOPKG", Const, 0, ""},
+		{"ENOPOLICY", Const, 0, ""},
+		{"ENOPROTOOPT", Const, 0, ""},
+		{"ENOSPC", Const, 0, ""},
+		{"ENOSR", Const, 0, ""},
+		{"ENOSTR", Const, 0, ""},
+		{"ENOSYS", Const, 0, ""},
+		{"ENOTBLK", Const, 0, ""},
+		{"ENOTCAPABLE", Const, 0, ""},
+		{"ENOTCONN", Const, 0, ""},
+		{"ENOTDIR", Const, 0, ""},
+		{"ENOTEMPTY", Const, 0, ""},
+		{"ENOTNAM", Const, 0, ""},
+		{"ENOTRECOVERABLE", Const, 0, ""},
+		{"ENOTSOCK", Const, 0, ""},
+		{"ENOTSUP", Const, 0, ""},
+		{"ENOTTY", Const, 0, ""},
+		{"ENOTUNIQ", Const, 0, ""},
+		{"ENXIO", Const, 0, ""},
+		{"EN_SW_CTL_INF", Const, 1, ""},
+		{"EN_SW_CTL_PREC", Const, 1, ""},
+		{"EN_SW_CTL_ROUND", Const, 1, ""},
+		{"EN_SW_DATACHAIN", Const, 1, ""},
+		{"EN_SW_DENORM", Const, 1, ""},
+		{"EN_SW_INVOP", Const, 1, ""},
+		{"EN_SW_OVERFLOW", Const, 1, ""},
+		{"EN_SW_PRECLOSS", Const, 1, ""},
+		{"EN_SW_UNDERFLOW", Const, 1, ""},
+		{"EN_SW_ZERODIV", Const, 1, ""},
+		{"EOPNOTSUPP", Const, 0, ""},
+		{"EOVERFLOW", Const, 0, ""},
+		{"EOWNERDEAD", Const, 0, ""},
+		{"EPERM", Const, 0, ""},
+		{"EPFNOSUPPORT", Const, 0, ""},
+		{"EPIPE", Const, 0, ""},
+		{"EPOLLERR", Const, 0, ""},
+		{"EPOLLET", Const, 0, ""},
+		{"EPOLLHUP", Const, 0, ""},
+		{"EPOLLIN", Const, 0, ""},
+		{"EPOLLMSG", Const, 0, ""},
+		{"EPOLLONESHOT", Const, 0, ""},
+		{"EPOLLOUT", Const, 0, ""},
+		{"EPOLLPRI", Const, 0, ""},
+		{"EPOLLRDBAND", Const, 0, ""},
+		{"EPOLLRDHUP", Const, 0, ""},
+		{"EPOLLRDNORM", Const, 0, ""},
+		{"EPOLLWRBAND", Const, 0, ""},
+		{"EPOLLWRNORM", Const, 0, ""},
+		{"EPOLL_CLOEXEC", Const, 0, ""},
+		{"EPOLL_CTL_ADD", Const, 0, ""},
+		{"EPOLL_CTL_DEL", Const, 0, ""},
+		{"EPOLL_CTL_MOD", Const, 0, ""},
+		{"EPOLL_NONBLOCK", Const, 0, ""},
+		{"EPROCLIM", Const, 0, ""},
+		{"EPROCUNAVAIL", Const, 0, ""},
+		{"EPROGMISMATCH", Const, 0, ""},
+		{"EPROGUNAVAIL", Const, 0, ""},
+		{"EPROTO", Const, 0, ""},
+		{"EPROTONOSUPPORT", Const, 0, ""},
+		{"EPROTOTYPE", Const, 0, ""},
+		{"EPWROFF", Const, 0, ""},
+		{"EQFULL", Const, 16, ""},
+		{"ERANGE", Const, 0, ""},
+		{"EREMCHG", Const, 0, ""},
+		{"EREMOTE", Const, 0, ""},
+		{"EREMOTEIO", Const, 0, ""},
+		{"ERESTART", Const, 0, ""},
+		{"ERFKILL", Const, 0, ""},
+		{"EROFS", Const, 0, ""},
+		{"ERPCMISMATCH", Const, 0, ""},
+		{"ERROR_ACCESS_DENIED", Const, 0, ""},
+		{"ERROR_ALREADY_EXISTS", Const, 0, ""},
+		{"ERROR_BROKEN_PIPE", Const, 0, ""},
+		{"ERROR_BUFFER_OVERFLOW", Const, 0, ""},
+		{"ERROR_DIR_NOT_EMPTY", Const, 8, ""},
+		{"ERROR_ENVVAR_NOT_FOUND", Const, 0, ""},
+		{"ERROR_FILE_EXISTS", Const, 0, ""},
+		{"ERROR_FILE_NOT_FOUND", Const, 0, ""},
+		{"ERROR_HANDLE_EOF", Const, 2, ""},
+		{"ERROR_INSUFFICIENT_BUFFER", Const, 0, ""},
+		{"ERROR_IO_PENDING", Const, 0, ""},
+		{"ERROR_MOD_NOT_FOUND", Const, 0, ""},
+		{"ERROR_MORE_DATA", Const, 3, ""},
+		{"ERROR_NETNAME_DELETED", Const, 3, ""},
+		{"ERROR_NOT_FOUND", Const, 1, ""},
+		{"ERROR_NO_MORE_FILES", Const, 0, ""},
+		{"ERROR_OPERATION_ABORTED", Const, 0, ""},
+		{"ERROR_PATH_NOT_FOUND", Const, 0, ""},
+		{"ERROR_PRIVILEGE_NOT_HELD", Const, 4, ""},
+		{"ERROR_PROC_NOT_FOUND", Const, 0, ""},
+		{"ESHLIBVERS", Const, 0, ""},
+		{"ESHUTDOWN", Const, 0, ""},
+		{"ESOCKTNOSUPPORT", Const, 0, ""},
+		{"ESPIPE", Const, 0, ""},
+		{"ESRCH", Const, 0, ""},
+		{"ESRMNT", Const, 0, ""},
+		{"ESTALE", Const, 0, ""},
+		{"ESTRPIPE", Const, 0, ""},
+		{"ETHERCAP_JUMBO_MTU", Const, 1, ""},
+		{"ETHERCAP_VLAN_HWTAGGING", Const, 1, ""},
+		{"ETHERCAP_VLAN_MTU", Const, 1, ""},
+		{"ETHERMIN", Const, 1, ""},
+		{"ETHERMTU", Const, 1, ""},
+		{"ETHERMTU_JUMBO", Const, 1, ""},
+		{"ETHERTYPE_8023", Const, 1, ""},
+		{"ETHERTYPE_AARP", Const, 1, ""},
+		{"ETHERTYPE_ACCTON", Const, 1, ""},
+		{"ETHERTYPE_AEONIC", Const, 1, ""},
+		{"ETHERTYPE_ALPHA", Const, 1, ""},
+		{"ETHERTYPE_AMBER", Const, 1, ""},
+		{"ETHERTYPE_AMOEBA", Const, 1, ""},
+		{"ETHERTYPE_AOE", Const, 1, ""},
+		{"ETHERTYPE_APOLLO", Const, 1, ""},
+		{"ETHERTYPE_APOLLODOMAIN", Const, 1, ""},
+		{"ETHERTYPE_APPLETALK", Const, 1, ""},
+		{"ETHERTYPE_APPLITEK", Const, 1, ""},
+		{"ETHERTYPE_ARGONAUT", Const, 1, ""},
+		{"ETHERTYPE_ARP", Const, 1, ""},
+		{"ETHERTYPE_AT", Const, 1, ""},
+		{"ETHERTYPE_ATALK", Const, 1, ""},
+		{"ETHERTYPE_ATOMIC", Const, 1, ""},
+		{"ETHERTYPE_ATT", Const, 1, ""},
+		{"ETHERTYPE_ATTSTANFORD", Const, 1, ""},
+		{"ETHERTYPE_AUTOPHON", Const, 1, ""},
+		{"ETHERTYPE_AXIS", Const, 1, ""},
+		{"ETHERTYPE_BCLOOP", Const, 1, ""},
+		{"ETHERTYPE_BOFL", Const, 1, ""},
+		{"ETHERTYPE_CABLETRON", Const, 1, ""},
+		{"ETHERTYPE_CHAOS", Const, 1, ""},
+		{"ETHERTYPE_COMDESIGN", Const, 1, ""},
+		{"ETHERTYPE_COMPUGRAPHIC", Const, 1, ""},
+		{"ETHERTYPE_COUNTERPOINT", Const, 1, ""},
+		{"ETHERTYPE_CRONUS", Const, 1, ""},
+		{"ETHERTYPE_CRONUSVLN", Const, 1, ""},
+		{"ETHERTYPE_DCA", Const, 1, ""},
+		{"ETHERTYPE_DDE", Const, 1, ""},
+		{"ETHERTYPE_DEBNI", Const, 1, ""},
+		{"ETHERTYPE_DECAM", Const, 1, ""},
+		{"ETHERTYPE_DECCUST", Const, 1, ""},
+		{"ETHERTYPE_DECDIAG", Const, 1, ""},
+		{"ETHERTYPE_DECDNS", Const, 1, ""},
+		{"ETHERTYPE_DECDTS", Const, 1, ""},
+		{"ETHERTYPE_DECEXPER", Const, 1, ""},
+		{"ETHERTYPE_DECLAST", Const, 1, ""},
+		{"ETHERTYPE_DECLTM", Const, 1, ""},
+		{"ETHERTYPE_DECMUMPS", Const, 1, ""},
+		{"ETHERTYPE_DECNETBIOS", Const, 1, ""},
+		{"ETHERTYPE_DELTACON", Const, 1, ""},
+		{"ETHERTYPE_DIDDLE", Const, 1, ""},
+		{"ETHERTYPE_DLOG1", Const, 1, ""},
+		{"ETHERTYPE_DLOG2", Const, 1, ""},
+		{"ETHERTYPE_DN", Const, 1, ""},
+		{"ETHERTYPE_DOGFIGHT", Const, 1, ""},
+		{"ETHERTYPE_DSMD", Const, 1, ""},
+		{"ETHERTYPE_ECMA", Const, 1, ""},
+		{"ETHERTYPE_ENCRYPT", Const, 1, ""},
+		{"ETHERTYPE_ES", Const, 1, ""},
+		{"ETHERTYPE_EXCELAN", Const, 1, ""},
+		{"ETHERTYPE_EXPERDATA", Const, 1, ""},
+		{"ETHERTYPE_FLIP", Const, 1, ""},
+		{"ETHERTYPE_FLOWCONTROL", Const, 1, ""},
+		{"ETHERTYPE_FRARP", Const, 1, ""},
+		{"ETHERTYPE_GENDYN", Const, 1, ""},
+		{"ETHERTYPE_HAYES", Const, 1, ""},
+		{"ETHERTYPE_HIPPI_FP", Const, 1, ""},
+		{"ETHERTYPE_HITACHI", Const, 1, ""},
+		{"ETHERTYPE_HP", Const, 1, ""},
+		{"ETHERTYPE_IEEEPUP", Const, 1, ""},
+		{"ETHERTYPE_IEEEPUPAT", Const, 1, ""},
+		{"ETHERTYPE_IMLBL", Const, 1, ""},
+		{"ETHERTYPE_IMLBLDIAG", Const, 1, ""},
+		{"ETHERTYPE_IP", Const, 1, ""},
+		{"ETHERTYPE_IPAS", Const, 1, ""},
+		{"ETHERTYPE_IPV6", Const, 1, ""},
+		{"ETHERTYPE_IPX", Const, 1, ""},
+		{"ETHERTYPE_IPXNEW", Const, 1, ""},
+		{"ETHERTYPE_KALPANA", Const, 1, ""},
+		{"ETHERTYPE_LANBRIDGE", Const, 1, ""},
+		{"ETHERTYPE_LANPROBE", Const, 1, ""},
+		{"ETHERTYPE_LAT", Const, 1, ""},
+		{"ETHERTYPE_LBACK", Const, 1, ""},
+		{"ETHERTYPE_LITTLE", Const, 1, ""},
+		{"ETHERTYPE_LLDP", Const, 1, ""},
+		{"ETHERTYPE_LOGICRAFT", Const, 1, ""},
+		{"ETHERTYPE_LOOPBACK", Const, 1, ""},
+		{"ETHERTYPE_MATRA", Const, 1, ""},
+		{"ETHERTYPE_MAX", Const, 1, ""},
+		{"ETHERTYPE_MERIT", Const, 1, ""},
+		{"ETHERTYPE_MICP", Const, 1, ""},
+		{"ETHERTYPE_MOPDL", Const, 1, ""},
+		{"ETHERTYPE_MOPRC", Const, 1, ""},
+		{"ETHERTYPE_MOTOROLA", Const, 1, ""},
+		{"ETHERTYPE_MPLS", Const, 1, ""},
+		{"ETHERTYPE_MPLS_MCAST", Const, 1, ""},
+		{"ETHERTYPE_MUMPS", Const, 1, ""},
+		{"ETHERTYPE_NBPCC", Const, 1, ""},
+		{"ETHERTYPE_NBPCLAIM", Const, 1, ""},
+		{"ETHERTYPE_NBPCLREQ", Const, 1, ""},
+		{"ETHERTYPE_NBPCLRSP", Const, 1, ""},
+		{"ETHERTYPE_NBPCREQ", Const, 1, ""},
+		{"ETHERTYPE_NBPCRSP", Const, 1, ""},
+		{"ETHERTYPE_NBPDG", Const, 1, ""},
+		{"ETHERTYPE_NBPDGB", Const, 1, ""},
+		{"ETHERTYPE_NBPDLTE", Const, 1, ""},
+		{"ETHERTYPE_NBPRAR", Const, 1, ""},
+		{"ETHERTYPE_NBPRAS", Const, 1, ""},
+		{"ETHERTYPE_NBPRST", Const, 1, ""},
+		{"ETHERTYPE_NBPSCD", Const, 1, ""},
+		{"ETHERTYPE_NBPVCD", Const, 1, ""},
+		{"ETHERTYPE_NBS", Const, 1, ""},
+		{"ETHERTYPE_NCD", Const, 1, ""},
+		{"ETHERTYPE_NESTAR", Const, 1, ""},
+		{"ETHERTYPE_NETBEUI", Const, 1, ""},
+		{"ETHERTYPE_NOVELL", Const, 1, ""},
+		{"ETHERTYPE_NS", Const, 1, ""},
+		{"ETHERTYPE_NSAT", Const, 1, ""},
+		{"ETHERTYPE_NSCOMPAT", Const, 1, ""},
+		{"ETHERTYPE_NTRAILER", Const, 1, ""},
+		{"ETHERTYPE_OS9", Const, 1, ""},
+		{"ETHERTYPE_OS9NET", Const, 1, ""},
+		{"ETHERTYPE_PACER", Const, 1, ""},
+		{"ETHERTYPE_PAE", Const, 1, ""},
+		{"ETHERTYPE_PCS", Const, 1, ""},
+		{"ETHERTYPE_PLANNING", Const, 1, ""},
+		{"ETHERTYPE_PPP", Const, 1, ""},
+		{"ETHERTYPE_PPPOE", Const, 1, ""},
+		{"ETHERTYPE_PPPOEDISC", Const, 1, ""},
+		{"ETHERTYPE_PRIMENTS", Const, 1, ""},
+		{"ETHERTYPE_PUP", Const, 1, ""},
+		{"ETHERTYPE_PUPAT", Const, 1, ""},
+		{"ETHERTYPE_QINQ", Const, 1, ""},
+		{"ETHERTYPE_RACAL", Const, 1, ""},
+		{"ETHERTYPE_RATIONAL", Const, 1, ""},
+		{"ETHERTYPE_RAWFR", Const, 1, ""},
+		{"ETHERTYPE_RCL", Const, 1, ""},
+		{"ETHERTYPE_RDP", Const, 1, ""},
+		{"ETHERTYPE_RETIX", Const, 1, ""},
+		{"ETHERTYPE_REVARP", Const, 1, ""},
+		{"ETHERTYPE_SCA", Const, 1, ""},
+		{"ETHERTYPE_SECTRA", Const, 1, ""},
+		{"ETHERTYPE_SECUREDATA", Const, 1, ""},
+		{"ETHERTYPE_SGITW", Const, 1, ""},
+		{"ETHERTYPE_SG_BOUNCE", Const, 1, ""},
+		{"ETHERTYPE_SG_DIAG", Const, 1, ""},
+		{"ETHERTYPE_SG_NETGAMES", Const, 1, ""},
+		{"ETHERTYPE_SG_RESV", Const, 1, ""},
+		{"ETHERTYPE_SIMNET", Const, 1, ""},
+		{"ETHERTYPE_SLOW", Const, 1, ""},
+		{"ETHERTYPE_SLOWPROTOCOLS", Const, 1, ""},
+		{"ETHERTYPE_SNA", Const, 1, ""},
+		{"ETHERTYPE_SNMP", Const, 1, ""},
+		{"ETHERTYPE_SONIX", Const, 1, ""},
+		{"ETHERTYPE_SPIDER", Const, 1, ""},
+		{"ETHERTYPE_SPRITE", Const, 1, ""},
+		{"ETHERTYPE_STP", Const, 1, ""},
+		{"ETHERTYPE_TALARIS", Const, 1, ""},
+		{"ETHERTYPE_TALARISMC", Const, 1, ""},
+		{"ETHERTYPE_TCPCOMP", Const, 1, ""},
+		{"ETHERTYPE_TCPSM", Const, 1, ""},
+		{"ETHERTYPE_TEC", Const, 1, ""},
+		{"ETHERTYPE_TIGAN", Const, 1, ""},
+		{"ETHERTYPE_TRAIL", Const, 1, ""},
+		{"ETHERTYPE_TRANSETHER", Const, 1, ""},
+		{"ETHERTYPE_TYMSHARE", Const, 1, ""},
+		{"ETHERTYPE_UBBST", Const, 1, ""},
+		{"ETHERTYPE_UBDEBUG", Const, 1, ""},
+		{"ETHERTYPE_UBDIAGLOOP", Const, 1, ""},
+		{"ETHERTYPE_UBDL", Const, 1, ""},
+		{"ETHERTYPE_UBNIU", Const, 1, ""},
+		{"ETHERTYPE_UBNMC", Const, 1, ""},
+		{"ETHERTYPE_VALID", Const, 1, ""},
+		{"ETHERTYPE_VARIAN", Const, 1, ""},
+		{"ETHERTYPE_VAXELN", Const, 1, ""},
+		{"ETHERTYPE_VEECO", Const, 1, ""},
+		{"ETHERTYPE_VEXP", Const, 1, ""},
+		{"ETHERTYPE_VGLAB", Const, 1, ""},
+		{"ETHERTYPE_VINES", Const, 1, ""},
+		{"ETHERTYPE_VINESECHO", Const, 1, ""},
+		{"ETHERTYPE_VINESLOOP", Const, 1, ""},
+		{"ETHERTYPE_VITAL", Const, 1, ""},
+		{"ETHERTYPE_VLAN", Const, 1, ""},
+		{"ETHERTYPE_VLTLMAN", Const, 1, ""},
+		{"ETHERTYPE_VPROD", Const, 1, ""},
+		{"ETHERTYPE_VURESERVED", Const, 1, ""},
+		{"ETHERTYPE_WATERLOO", Const, 1, ""},
+		{"ETHERTYPE_WELLFLEET", Const, 1, ""},
+		{"ETHERTYPE_X25", Const, 1, ""},
+		{"ETHERTYPE_X75", Const, 1, ""},
+		{"ETHERTYPE_XNSSM", Const, 1, ""},
+		{"ETHERTYPE_XTP", Const, 1, ""},
+		{"ETHER_ADDR_LEN", Const, 1, ""},
+		{"ETHER_ALIGN", Const, 1, ""},
+		{"ETHER_CRC_LEN", Const, 1, ""},
+		{"ETHER_CRC_POLY_BE", Const, 1, ""},
+		{"ETHER_CRC_POLY_LE", Const, 1, ""},
+		{"ETHER_HDR_LEN", Const, 1, ""},
+		{"ETHER_MAX_DIX_LEN", Const, 1, ""},
+		{"ETHER_MAX_LEN", Const, 1, ""},
+		{"ETHER_MAX_LEN_JUMBO", Const, 1, ""},
+		{"ETHER_MIN_LEN", Const, 1, ""},
+		{"ETHER_PPPOE_ENCAP_LEN", Const, 1, ""},
+		{"ETHER_TYPE_LEN", Const, 1, ""},
+		{"ETHER_VLAN_ENCAP_LEN", Const, 1, ""},
+		{"ETH_P_1588", Const, 0, ""},
+		{"ETH_P_8021Q", Const, 0, ""},
+		{"ETH_P_802_2", Const, 0, ""},
+		{"ETH_P_802_3", Const, 0, ""},
+		{"ETH_P_AARP", Const, 0, ""},
+		{"ETH_P_ALL", Const, 0, ""},
+		{"ETH_P_AOE", Const, 0, ""},
+		{"ETH_P_ARCNET", Const, 0, ""},
+		{"ETH_P_ARP", Const, 0, ""},
+		{"ETH_P_ATALK", Const, 0, ""},
+		{"ETH_P_ATMFATE", Const, 0, ""},
+		{"ETH_P_ATMMPOA", Const, 0, ""},
+		{"ETH_P_AX25", Const, 0, ""},
+		{"ETH_P_BPQ", Const, 0, ""},
+		{"ETH_P_CAIF", Const, 0, ""},
+		{"ETH_P_CAN", Const, 0, ""},
+		{"ETH_P_CONTROL", Const, 0, ""},
+		{"ETH_P_CUST", Const, 0, ""},
+		{"ETH_P_DDCMP", Const, 0, ""},
+		{"ETH_P_DEC", Const, 0, ""},
+		{"ETH_P_DIAG", Const, 0, ""},
+		{"ETH_P_DNA_DL", Const, 0, ""},
+		{"ETH_P_DNA_RC", Const, 0, ""},
+		{"ETH_P_DNA_RT", Const, 0, ""},
+		{"ETH_P_DSA", Const, 0, ""},
+		{"ETH_P_ECONET", Const, 0, ""},
+		{"ETH_P_EDSA", Const, 0, ""},
+		{"ETH_P_FCOE", Const, 0, ""},
+		{"ETH_P_FIP", Const, 0, ""},
+		{"ETH_P_HDLC", Const, 0, ""},
+		{"ETH_P_IEEE802154", Const, 0, ""},
+		{"ETH_P_IEEEPUP", Const, 0, ""},
+		{"ETH_P_IEEEPUPAT", Const, 0, ""},
+		{"ETH_P_IP", Const, 0, ""},
+		{"ETH_P_IPV6", Const, 0, ""},
+		{"ETH_P_IPX", Const, 0, ""},
+		{"ETH_P_IRDA", Const, 0, ""},
+		{"ETH_P_LAT", Const, 0, ""},
+		{"ETH_P_LINK_CTL", Const, 0, ""},
+		{"ETH_P_LOCALTALK", Const, 0, ""},
+		{"ETH_P_LOOP", Const, 0, ""},
+		{"ETH_P_MOBITEX", Const, 0, ""},
+		{"ETH_P_MPLS_MC", Const, 0, ""},
+		{"ETH_P_MPLS_UC", Const, 0, ""},
+		{"ETH_P_PAE", Const, 0, ""},
+		{"ETH_P_PAUSE", Const, 0, ""},
+		{"ETH_P_PHONET", Const, 0, ""},
+		{"ETH_P_PPPTALK", Const, 0, ""},
+		{"ETH_P_PPP_DISC", Const, 0, ""},
+		{"ETH_P_PPP_MP", Const, 0, ""},
+		{"ETH_P_PPP_SES", Const, 0, ""},
+		{"ETH_P_PUP", Const, 0, ""},
+		{"ETH_P_PUPAT", Const, 0, ""},
+		{"ETH_P_RARP", Const, 0, ""},
+		{"ETH_P_SCA", Const, 0, ""},
+		{"ETH_P_SLOW", Const, 0, ""},
+		{"ETH_P_SNAP", Const, 0, ""},
+		{"ETH_P_TEB", Const, 0, ""},
+		{"ETH_P_TIPC", Const, 0, ""},
+		{"ETH_P_TRAILER", Const, 0, ""},
+		{"ETH_P_TR_802_2", Const, 0, ""},
+		{"ETH_P_WAN_PPP", Const, 0, ""},
+		{"ETH_P_WCCP", Const, 0, ""},
+		{"ETH_P_X25", Const, 0, ""},
+		{"ETIME", Const, 0, ""},
+		{"ETIMEDOUT", Const, 0, ""},
+		{"ETOOMANYREFS", Const, 0, ""},
+		{"ETXTBSY", Const, 0, ""},
+		{"EUCLEAN", Const, 0, ""},
+		{"EUNATCH", Const, 0, ""},
+		{"EUSERS", Const, 0, ""},
+		{"EVFILT_AIO", Const, 0, ""},
+		{"EVFILT_FS", Const, 0, ""},
+		{"EVFILT_LIO", Const, 0, ""},
+		{"EVFILT_MACHPORT", Const, 0, ""},
+		{"EVFILT_PROC", Const, 0, ""},
+		{"EVFILT_READ", Const, 0, ""},
+		{"EVFILT_SIGNAL", Const, 0, ""},
+		{"EVFILT_SYSCOUNT", Const, 0, ""},
+		{"EVFILT_THREADMARKER", Const, 0, ""},
+		{"EVFILT_TIMER", Const, 0, ""},
+		{"EVFILT_USER", Const, 0, ""},
+		{"EVFILT_VM", Const, 0, ""},
+		{"EVFILT_VNODE", Const, 0, ""},
+		{"EVFILT_WRITE", Const, 0, ""},
+		{"EV_ADD", Const, 0, ""},
+		{"EV_CLEAR", Const, 0, ""},
+		{"EV_DELETE", Const, 0, ""},
+		{"EV_DISABLE", Const, 0, ""},
+		{"EV_DISPATCH", Const, 0, ""},
+		{"EV_DROP", Const, 3, ""},
+		{"EV_ENABLE", Const, 0, ""},
+		{"EV_EOF", Const, 0, ""},
+		{"EV_ERROR", Const, 0, ""},
+		{"EV_FLAG0", Const, 0, ""},
+		{"EV_FLAG1", Const, 0, ""},
+		{"EV_ONESHOT", Const, 0, ""},
+		{"EV_OOBAND", Const, 0, ""},
+		{"EV_POLL", Const, 0, ""},
+		{"EV_RECEIPT", Const, 0, ""},
+		{"EV_SYSFLAGS", Const, 0, ""},
+		{"EWINDOWS", Const, 0, ""},
+		{"EWOULDBLOCK", Const, 0, ""},
+		{"EXDEV", Const, 0, ""},
+		{"EXFULL", Const, 0, ""},
+		{"EXTA", Const, 0, ""},
+		{"EXTB", Const, 0, ""},
+		{"EXTPROC", Const, 0, ""},
+		{"Environ", Func, 0, "func() []string"},
+		{"EpollCreate", Func, 0, "func(size int) (fd int, err error)"},
+		{"EpollCreate1", Func, 0, "func(flag int) (fd int, err error)"},
+		{"EpollCtl", Func, 0, "func(epfd int, op int, fd int, event *EpollEvent) (err error)"},
+		{"EpollEvent", Type, 0, ""},
+		{"EpollEvent.Events", Field, 0, ""},
+		{"EpollEvent.Fd", Field, 0, ""},
+		{"EpollEvent.Pad", Field, 0, ""},
+		{"EpollEvent.PadFd", Field, 0, ""},
+		{"EpollWait", Func, 0, "func(epfd int, events []EpollEvent, msec int) (n int, err error)"},
+		{"Errno", Type, 0, ""},
+		{"EscapeArg", Func, 0, ""},
+		{"Exchangedata", Func, 0, ""},
+		{"Exec", Func, 0, "func(argv0 string, argv []string, envv []string) (err error)"},
+		{"Exit", Func, 0, "func(code int)"},
+		{"ExitProcess", Func, 0, ""},
+		{"FD_CLOEXEC", Const, 0, ""},
+		{"FD_SETSIZE", Const, 0, ""},
+		{"FILE_ACTION_ADDED", Const, 0, ""},
+		{"FILE_ACTION_MODIFIED", Const, 0, ""},
+		{"FILE_ACTION_REMOVED", Const, 0, ""},
+		{"FILE_ACTION_RENAMED_NEW_NAME", Const, 0, ""},
+		{"FILE_ACTION_RENAMED_OLD_NAME", Const, 0, ""},
+		{"FILE_APPEND_DATA", Const, 0, ""},
+		{"FILE_ATTRIBUTE_ARCHIVE", Const, 0, ""},
+		{"FILE_ATTRIBUTE_DIRECTORY", Const, 0, ""},
+		{"FILE_ATTRIBUTE_HIDDEN", Const, 0, ""},
+		{"FILE_ATTRIBUTE_NORMAL", Const, 0, ""},
+		{"FILE_ATTRIBUTE_READONLY", Const, 0, ""},
+		{"FILE_ATTRIBUTE_REPARSE_POINT", Const, 4, ""},
+		{"FILE_ATTRIBUTE_SYSTEM", Const, 0, ""},
+		{"FILE_BEGIN", Const, 0, ""},
+		{"FILE_CURRENT", Const, 0, ""},
+		{"FILE_END", Const, 0, ""},
+		{"FILE_FLAG_BACKUP_SEMANTICS", Const, 0, ""},
+		{"FILE_FLAG_OPEN_REPARSE_POINT", Const, 4, ""},
+		{"FILE_FLAG_OVERLAPPED", Const, 0, ""},
+		{"FILE_LIST_DIRECTORY", Const, 0, ""},
+		{"FILE_MAP_COPY", Const, 0, ""},
+		{"FILE_MAP_EXECUTE", Const, 0, ""},
+		{"FILE_MAP_READ", Const, 0, ""},
+		{"FILE_MAP_WRITE", Const, 0, ""},
+		{"FILE_NOTIFY_CHANGE_ATTRIBUTES", Const, 0, ""},
+		{"FILE_NOTIFY_CHANGE_CREATION", Const, 0, ""},
+		{"FILE_NOTIFY_CHANGE_DIR_NAME", Const, 0, ""},
+		{"FILE_NOTIFY_CHANGE_FILE_NAME", Const, 0, ""},
+		{"FILE_NOTIFY_CHANGE_LAST_ACCESS", Const, 0, ""},
+		{"FILE_NOTIFY_CHANGE_LAST_WRITE", Const, 0, ""},
+		{"FILE_NOTIFY_CHANGE_SIZE", Const, 0, ""},
+		{"FILE_SHARE_DELETE", Const, 0, ""},
+		{"FILE_SHARE_READ", Const, 0, ""},
+		{"FILE_SHARE_WRITE", Const, 0, ""},
+		{"FILE_SKIP_COMPLETION_PORT_ON_SUCCESS", Const, 2, ""},
+		{"FILE_SKIP_SET_EVENT_ON_HANDLE", Const, 2, ""},
+		{"FILE_TYPE_CHAR", Const, 0, ""},
+		{"FILE_TYPE_DISK", Const, 0, ""},
+		{"FILE_TYPE_PIPE", Const, 0, ""},
+		{"FILE_TYPE_REMOTE", Const, 0, ""},
+		{"FILE_TYPE_UNKNOWN", Const, 0, ""},
+		{"FILE_WRITE_ATTRIBUTES", Const, 0, ""},
+		{"FLUSHO", Const, 0, ""},
+		{"FORMAT_MESSAGE_ALLOCATE_BUFFER", Const, 0, ""},
+		{"FORMAT_MESSAGE_ARGUMENT_ARRAY", Const, 0, ""},
+		{"FORMAT_MESSAGE_FROM_HMODULE", Const, 0, ""},
+		{"FORMAT_MESSAGE_FROM_STRING", Const, 0, ""},
+		{"FORMAT_MESSAGE_FROM_SYSTEM", Const, 0, ""},
+		{"FORMAT_MESSAGE_IGNORE_INSERTS", Const, 0, ""},
+		{"FORMAT_MESSAGE_MAX_WIDTH_MASK", Const, 0, ""},
+		{"FSCTL_GET_REPARSE_POINT", Const, 4, ""},
+		{"F_ADDFILESIGS", Const, 0, ""},
+		{"F_ADDSIGS", Const, 0, ""},
+		{"F_ALLOCATEALL", Const, 0, ""},
+		{"F_ALLOCATECONTIG", Const, 0, ""},
+		{"F_CANCEL", Const, 0, ""},
+		{"F_CHKCLEAN", Const, 0, ""},
+		{"F_CLOSEM", Const, 1, ""},
+		{"F_DUP2FD", Const, 0, ""},
+		{"F_DUP2FD_CLOEXEC", Const, 1, ""},
+		{"F_DUPFD", Const, 0, ""},
+		{"F_DUPFD_CLOEXEC", Const, 0, ""},
+		{"F_EXLCK", Const, 0, ""},
+		{"F_FINDSIGS", Const, 16, ""},
+		{"F_FLUSH_DATA", Const, 0, ""},
+		{"F_FREEZE_FS", Const, 0, ""},
+		{"F_FSCTL", Const, 1, ""},
+		{"F_FSDIRMASK", Const, 1, ""},
+		{"F_FSIN", Const, 1, ""},
+		{"F_FSINOUT", Const, 1, ""},
+		{"F_FSOUT", Const, 1, ""},
+		{"F_FSPRIV", Const, 1, ""},
+		{"F_FSVOID", Const, 1, ""},
+		{"F_FULLFSYNC", Const, 0, ""},
+		{"F_GETCODEDIR", Const, 16, ""},
+		{"F_GETFD", Const, 0, ""},
+		{"F_GETFL", Const, 0, ""},
+		{"F_GETLEASE", Const, 0, ""},
+		{"F_GETLK", Const, 0, ""},
+		{"F_GETLK64", Const, 0, ""},
+		{"F_GETLKPID", Const, 0, ""},
+		{"F_GETNOSIGPIPE", Const, 0, ""},
+		{"F_GETOWN", Const, 0, ""},
+		{"F_GETOWN_EX", Const, 0, ""},
+		{"F_GETPATH", Const, 0, ""},
+		{"F_GETPATH_MTMINFO", Const, 0, ""},
+		{"F_GETPIPE_SZ", Const, 0, ""},
+		{"F_GETPROTECTIONCLASS", Const, 0, ""},
+		{"F_GETPROTECTIONLEVEL", Const, 16, ""},
+		{"F_GETSIG", Const, 0, ""},
+		{"F_GLOBAL_NOCACHE", Const, 0, ""},
+		{"F_LOCK", Const, 0, ""},
+		{"F_LOG2PHYS", Const, 0, ""},
+		{"F_LOG2PHYS_EXT", Const, 0, ""},
+		{"F_MARKDEPENDENCY", Const, 0, ""},
+		{"F_MAXFD", Const, 1, ""},
+		{"F_NOCACHE", Const, 0, ""},
+		{"F_NODIRECT", Const, 0, ""},
+		{"F_NOTIFY", Const, 0, ""},
+		{"F_OGETLK", Const, 0, ""},
+		{"F_OK", Const, 0, ""},
+		{"F_OSETLK", Const, 0, ""},
+		{"F_OSETLKW", Const, 0, ""},
+		{"F_PARAM_MASK", Const, 1, ""},
+		{"F_PARAM_MAX", Const, 1, ""},
+		{"F_PATHPKG_CHECK", Const, 0, ""},
+		{"F_PEOFPOSMODE", Const, 0, ""},
+		{"F_PREALLOCATE", Const, 0, ""},
+		{"F_RDADVISE", Const, 0, ""},
+		{"F_RDAHEAD", Const, 0, ""},
+		{"F_RDLCK", Const, 0, ""},
+		{"F_READAHEAD", Const, 0, ""},
+		{"F_READBOOTSTRAP", Const, 0, ""},
+		{"F_SETBACKINGSTORE", Const, 0, ""},
+		{"F_SETFD", Const, 0, ""},
+		{"F_SETFL", Const, 0, ""},
+		{"F_SETLEASE", Const, 0, ""},
+		{"F_SETLK", Const, 0, ""},
+		{"F_SETLK64", Const, 0, ""},
+		{"F_SETLKW", Const, 0, ""},
+		{"F_SETLKW64", Const, 0, ""},
+		{"F_SETLKWTIMEOUT", Const, 16, ""},
+		{"F_SETLK_REMOTE", Const, 0, ""},
+		{"F_SETNOSIGPIPE", Const, 0, ""},
+		{"F_SETOWN", Const, 0, ""},
+		{"F_SETOWN_EX", Const, 0, ""},
+		{"F_SETPIPE_SZ", Const, 0, ""},
+		{"F_SETPROTECTIONCLASS", Const, 0, ""},
+		{"F_SETSIG", Const, 0, ""},
+		{"F_SETSIZE", Const, 0, ""},
+		{"F_SHLCK", Const, 0, ""},
+		{"F_SINGLE_WRITER", Const, 16, ""},
+		{"F_TEST", Const, 0, ""},
+		{"F_THAW_FS", Const, 0, ""},
+		{"F_TLOCK", Const, 0, ""},
+		{"F_TRANSCODEKEY", Const, 16, ""},
+		{"F_ULOCK", Const, 0, ""},
+		{"F_UNLCK", Const, 0, ""},
+		{"F_UNLCKSYS", Const, 0, ""},
+		{"F_VOLPOSMODE", Const, 0, ""},
+		{"F_WRITEBOOTSTRAP", Const, 0, ""},
+		{"F_WRLCK", Const, 0, ""},
+		{"Faccessat", Func, 0, "func(dirfd int, path string, mode uint32, flags int) (err error)"},
+		{"Fallocate", Func, 0, "func(fd int, mode uint32, off int64, len int64) (err error)"},
+		{"Fbootstraptransfer_t", Type, 0, ""},
+		{"Fbootstraptransfer_t.Buffer", Field, 0, ""},
+		{"Fbootstraptransfer_t.Length", Field, 0, ""},
+		{"Fbootstraptransfer_t.Offset", Field, 0, ""},
+		{"Fchdir", Func, 0, "func(fd int) (err error)"},
+		{"Fchflags", Func, 0, ""},
+		{"Fchmod", Func, 0, "func(fd int, mode uint32) (err error)"},
+		{"Fchmodat", Func, 0, "func(dirfd int, path string, mode uint32, flags int) error"},
+		{"Fchown", Func, 0, "func(fd int, uid int, gid int) (err error)"},
+		{"Fchownat", Func, 0, "func(dirfd int, path string, uid int, gid int, flags int) (err error)"},
+		{"FcntlFlock", Func, 3, "func(fd uintptr, cmd int, lk *Flock_t) error"},
+		{"FdSet", Type, 0, ""},
+		{"FdSet.Bits", Field, 0, ""},
+		{"FdSet.X__fds_bits", Field, 0, ""},
+		{"Fdatasync", Func, 0, "func(fd int) (err error)"},
+		{"FileNotifyInformation", Type, 0, ""},
+		{"FileNotifyInformation.Action", Field, 0, ""},
+		{"FileNotifyInformation.FileName", Field, 0, ""},
+		{"FileNotifyInformation.FileNameLength", Field, 0, ""},
+		{"FileNotifyInformation.NextEntryOffset", Field, 0, ""},
+		{"Filetime", Type, 0, ""},
+		{"Filetime.HighDateTime", Field, 0, ""},
+		{"Filetime.LowDateTime", Field, 0, ""},
+		{"FindClose", Func, 0, ""},
+		{"FindFirstFile", Func, 0, ""},
+		{"FindNextFile", Func, 0, ""},
+		{"Flock", Func, 0, "func(fd int, how int) (err error)"},
+		{"Flock_t", Type, 0, ""},
+		{"Flock_t.Len", Field, 0, ""},
+		{"Flock_t.Pad_cgo_0", Field, 0, ""},
+		{"Flock_t.Pad_cgo_1", Field, 3, ""},
+		{"Flock_t.Pid", Field, 0, ""},
+		{"Flock_t.Start", Field, 0, ""},
+		{"Flock_t.Sysid", Field, 0, ""},
+		{"Flock_t.Type", Field, 0, ""},
+		{"Flock_t.Whence", Field, 0, ""},
+		{"FlushBpf", Func, 0, ""},
+		{"FlushFileBuffers", Func, 0, ""},
+		{"FlushViewOfFile", Func, 0, ""},
+		{"ForkExec", Func, 0, "func(argv0 string, argv []string, attr *ProcAttr) (pid int, err error)"},
+		{"ForkLock", Var, 0, ""},
+		{"FormatMessage", Func, 0, ""},
+		{"Fpathconf", Func, 0, ""},
+		{"FreeAddrInfoW", Func, 1, ""},
+		{"FreeEnvironmentStrings", Func, 0, ""},
+		{"FreeLibrary", Func, 0, ""},
+		{"Fsid", Type, 0, ""},
+		{"Fsid.Val", Field, 0, ""},
+		{"Fsid.X__fsid_val", Field, 2, ""},
+		{"Fsid.X__val", Field, 0, ""},
+		{"Fstat", Func, 0, "func(fd int, stat *Stat_t) (err error)"},
+		{"Fstatat", Func, 12, ""},
+		{"Fstatfs", Func, 0, "func(fd int, buf *Statfs_t) (err error)"},
+		{"Fstore_t", Type, 0, ""},
+		{"Fstore_t.Bytesalloc", Field, 0, ""},
+		{"Fstore_t.Flags", Field, 0, ""},
+		{"Fstore_t.Length", Field, 0, ""},
+		{"Fstore_t.Offset", Field, 0, ""},
+		{"Fstore_t.Posmode", Field, 0, ""},
+		{"Fsync", Func, 0, "func(fd int) (err error)"},
+		{"Ftruncate", Func, 0, "func(fd int, length int64) (err error)"},
+		{"FullPath", Func, 4, ""},
+		{"Futimes", Func, 0, "func(fd int, tv []Timeval) (err error)"},
+		{"Futimesat", Func, 0, "func(dirfd int, path string, tv []Timeval) (err error)"},
+		{"GENERIC_ALL", Const, 0, ""},
+		{"GENERIC_EXECUTE", Const, 0, ""},
+		{"GENERIC_READ", Const, 0, ""},
+		{"GENERIC_WRITE", Const, 0, ""},
+		{"GUID", Type, 1, ""},
+		{"GUID.Data1", Field, 1, ""},
+		{"GUID.Data2", Field, 1, ""},
+		{"GUID.Data3", Field, 1, ""},
+		{"GUID.Data4", Field, 1, ""},
+		{"GetAcceptExSockaddrs", Func, 0, ""},
+		{"GetAdaptersInfo", Func, 0, ""},
+		{"GetAddrInfoW", Func, 1, ""},
+		{"GetCommandLine", Func, 0, ""},
+		{"GetComputerName", Func, 0, ""},
+		{"GetConsoleMode", Func, 1, ""},
+		{"GetCurrentDirectory", Func, 0, ""},
+		{"GetCurrentProcess", Func, 0, ""},
+		{"GetEnvironmentStrings", Func, 0, ""},
+		{"GetEnvironmentVariable", Func, 0, ""},
+		{"GetExitCodeProcess", Func, 0, ""},
+		{"GetFileAttributes", Func, 0, ""},
+		{"GetFileAttributesEx", Func, 0, ""},
+		{"GetFileExInfoStandard", Const, 0, ""},
+		{"GetFileExMaxInfoLevel", Const, 0, ""},
+		{"GetFileInformationByHandle", Func, 0, ""},
+		{"GetFileType", Func, 0, ""},
+		{"GetFullPathName", Func, 0, ""},
+		{"GetHostByName", Func, 0, ""},
+		{"GetIfEntry", Func, 0, ""},
+		{"GetLastError", Func, 0, ""},
+		{"GetLengthSid", Func, 0, ""},
+		{"GetLongPathName", Func, 0, ""},
+		{"GetProcAddress", Func, 0, ""},
+		{"GetProcessTimes", Func, 0, ""},
+		{"GetProtoByName", Func, 0, ""},
+		{"GetQueuedCompletionStatus", Func, 0, ""},
+		{"GetServByName", Func, 0, ""},
+		{"GetShortPathName", Func, 0, ""},
+		{"GetStartupInfo", Func, 0, ""},
+		{"GetStdHandle", Func, 0, ""},
+		{"GetSystemTimeAsFileTime", Func, 0, ""},
+		{"GetTempPath", Func, 0, ""},
+		{"GetTimeZoneInformation", Func, 0, ""},
+		{"GetTokenInformation", Func, 0, ""},
+		{"GetUserNameEx", Func, 0, ""},
+		{"GetUserProfileDirectory", Func, 0, ""},
+		{"GetVersion", Func, 0, ""},
+		{"Getcwd", Func, 0, "func(buf []byte) (n int, err error)"},
+		{"Getdents", Func, 0, "func(fd int, buf []byte) (n int, err error)"},
+		{"Getdirentries", Func, 0, ""},
+		{"Getdtablesize", Func, 0, ""},
+		{"Getegid", Func, 0, "func() (egid int)"},
+		{"Getenv", Func, 0, "func(key string) (value string, found bool)"},
+		{"Geteuid", Func, 0, "func() (euid int)"},
+		{"Getfsstat", Func, 0, ""},
+		{"Getgid", Func, 0, "func() (gid int)"},
+		{"Getgroups", Func, 0, "func() (gids []int, err error)"},
+		{"Getpagesize", Func, 0, "func() int"},
+		{"Getpeername", Func, 0, "func(fd int) (sa Sockaddr, err error)"},
+		{"Getpgid", Func, 0, "func(pid int) (pgid int, err error)"},
+		{"Getpgrp", Func, 0, "func() (pid int)"},
+		{"Getpid", Func, 0, "func() (pid int)"},
+		{"Getppid", Func, 0, "func() (ppid int)"},
+		{"Getpriority", Func, 0, "func(which int, who int) (prio int, err error)"},
+		{"Getrlimit", Func, 0, "func(resource int, rlim *Rlimit) (err error)"},
+		{"Getrusage", Func, 0, "func(who int, rusage *Rusage) (err error)"},
+		{"Getsid", Func, 0, ""},
+		{"Getsockname", Func, 0, "func(fd int) (sa Sockaddr, err error)"},
+		{"Getsockopt", Func, 1, ""},
+		{"GetsockoptByte", Func, 0, ""},
+		{"GetsockoptICMPv6Filter", Func, 2, "func(fd int, level int, opt int) (*ICMPv6Filter, error)"},
+		{"GetsockoptIPMreq", Func, 0, "func(fd int, level int, opt int) (*IPMreq, error)"},
+		{"GetsockoptIPMreqn", Func, 0, "func(fd int, level int, opt int) (*IPMreqn, error)"},
+		{"GetsockoptIPv6MTUInfo", Func, 2, "func(fd int, level int, opt int) (*IPv6MTUInfo, error)"},
+		{"GetsockoptIPv6Mreq", Func, 0, "func(fd int, level int, opt int) (*IPv6Mreq, error)"},
+		{"GetsockoptInet4Addr", Func, 0, "func(fd int, level int, opt int) (value [4]byte, err error)"},
+		{"GetsockoptInt", Func, 0, "func(fd int, level int, opt int) (value int, err error)"},
+		{"GetsockoptUcred", Func, 1, "func(fd int, level int, opt int) (*Ucred, error)"},
+		{"Gettid", Func, 0, "func() (tid int)"},
+		{"Gettimeofday", Func, 0, "func(tv *Timeval) (err error)"},
+		{"Getuid", Func, 0, "func() (uid int)"},
+		{"Getwd", Func, 0, "func() (wd string, err error)"},
+		{"Getxattr", Func, 1, "func(path string, attr string, dest []byte) (sz int, err error)"},
+		{"HANDLE_FLAG_INHERIT", Const, 0, ""},
+		{"HKEY_CLASSES_ROOT", Const, 0, ""},
+		{"HKEY_CURRENT_CONFIG", Const, 0, ""},
+		{"HKEY_CURRENT_USER", Const, 0, ""},
+		{"HKEY_DYN_DATA", Const, 0, ""},
+		{"HKEY_LOCAL_MACHINE", Const, 0, ""},
+		{"HKEY_PERFORMANCE_DATA", Const, 0, ""},
+		{"HKEY_USERS", Const, 0, ""},
+		{"HUPCL", Const, 0, ""},
+		{"Handle", Type, 0, ""},
+		{"Hostent", Type, 0, ""},
+		{"Hostent.AddrList", Field, 0, ""},
+		{"Hostent.AddrType", Field, 0, ""},
+		{"Hostent.Aliases", Field, 0, ""},
+		{"Hostent.Length", Field, 0, ""},
+		{"Hostent.Name", Field, 0, ""},
+		{"ICANON", Const, 0, ""},
+		{"ICMP6_FILTER", Const, 2, ""},
+		{"ICMPV6_FILTER", Const, 2, ""},
+		{"ICMPv6Filter", Type, 2, ""},
+		{"ICMPv6Filter.Data", Field, 2, ""},
+		{"ICMPv6Filter.Filt", Field, 2, ""},
+		{"ICRNL", Const, 0, ""},
+		{"IEXTEN", Const, 0, ""},
+		{"IFAN_ARRIVAL", Const, 1, ""},
+		{"IFAN_DEPARTURE", Const, 1, ""},
+		{"IFA_ADDRESS", Const, 0, ""},
+		{"IFA_ANYCAST", Const, 0, ""},
+		{"IFA_BROADCAST", Const, 0, ""},
+		{"IFA_CACHEINFO", Const, 0, ""},
+		{"IFA_F_DADFAILED", Const, 0, ""},
+		{"IFA_F_DEPRECATED", Const, 0, ""},
+		{"IFA_F_HOMEADDRESS", Const, 0, ""},
+		{"IFA_F_NODAD", Const, 0, ""},
+		{"IFA_F_OPTIMISTIC", Const, 0, ""},
+		{"IFA_F_PERMANENT", Const, 0, ""},
+		{"IFA_F_SECONDARY", Const, 0, ""},
+		{"IFA_F_TEMPORARY", Const, 0, ""},
+		{"IFA_F_TENTATIVE", Const, 0, ""},
+		{"IFA_LABEL", Const, 0, ""},
+		{"IFA_LOCAL", Const, 0, ""},
+		{"IFA_MAX", Const, 0, ""},
+		{"IFA_MULTICAST", Const, 0, ""},
+		{"IFA_ROUTE", Const, 1, ""},
+		{"IFA_UNSPEC", Const, 0, ""},
+		{"IFF_ALLMULTI", Const, 0, ""},
+		{"IFF_ALTPHYS", Const, 0, ""},
+		{"IFF_AUTOMEDIA", Const, 0, ""},
+		{"IFF_BROADCAST", Const, 0, ""},
+		{"IFF_CANTCHANGE", Const, 0, ""},
+		{"IFF_CANTCONFIG", Const, 1, ""},
+		{"IFF_DEBUG", Const, 0, ""},
+		{"IFF_DRV_OACTIVE", Const, 0, ""},
+		{"IFF_DRV_RUNNING", Const, 0, ""},
+		{"IFF_DYING", Const, 0, ""},
+		{"IFF_DYNAMIC", Const, 0, ""},
+		{"IFF_LINK0", Const, 0, ""},
+		{"IFF_LINK1", Const, 0, ""},
+		{"IFF_LINK2", Const, 0, ""},
+		{"IFF_LOOPBACK", Const, 0, ""},
+		{"IFF_MASTER", Const, 0, ""},
+		{"IFF_MONITOR", Const, 0, ""},
+		{"IFF_MULTICAST", Const, 0, ""},
+		{"IFF_NOARP", Const, 0, ""},
+		{"IFF_NOTRAILERS", Const, 0, ""},
+		{"IFF_NO_PI", Const, 0, ""},
+		{"IFF_OACTIVE", Const, 0, ""},
+		{"IFF_ONE_QUEUE", Const, 0, ""},
+		{"IFF_POINTOPOINT", Const, 0, ""},
+		{"IFF_POINTTOPOINT", Const, 0, ""},
+		{"IFF_PORTSEL", Const, 0, ""},
+		{"IFF_PPROMISC", Const, 0, ""},
+		{"IFF_PROMISC", Const, 0, ""},
+		{"IFF_RENAMING", Const, 0, ""},
+		{"IFF_RUNNING", Const, 0, ""},
+		{"IFF_SIMPLEX", Const, 0, ""},
+		{"IFF_SLAVE", Const, 0, ""},
+		{"IFF_SMART", Const, 0, ""},
+		{"IFF_STATICARP", Const, 0, ""},
+		{"IFF_TAP", Const, 0, ""},
+		{"IFF_TUN", Const, 0, ""},
+		{"IFF_TUN_EXCL", Const, 0, ""},
+		{"IFF_UP", Const, 0, ""},
+		{"IFF_VNET_HDR", Const, 0, ""},
+		{"IFLA_ADDRESS", Const, 0, ""},
+		{"IFLA_BROADCAST", Const, 0, ""},
+		{"IFLA_COST", Const, 0, ""},
+		{"IFLA_IFALIAS", Const, 0, ""},
+		{"IFLA_IFNAME", Const, 0, ""},
+		{"IFLA_LINK", Const, 0, ""},
+		{"IFLA_LINKINFO", Const, 0, ""},
+		{"IFLA_LINKMODE", Const, 0, ""},
+		{"IFLA_MAP", Const, 0, ""},
+		{"IFLA_MASTER", Const, 0, ""},
+		{"IFLA_MAX", Const, 0, ""},
+		{"IFLA_MTU", Const, 0, ""},
+		{"IFLA_NET_NS_PID", Const, 0, ""},
+		{"IFLA_OPERSTATE", Const, 0, ""},
+		{"IFLA_PRIORITY", Const, 0, ""},
+		{"IFLA_PROTINFO", Const, 0, ""},
+		{"IFLA_QDISC", Const, 0, ""},
+		{"IFLA_STATS", Const, 0, ""},
+		{"IFLA_TXQLEN", Const, 0, ""},
+		{"IFLA_UNSPEC", Const, 0, ""},
+		{"IFLA_WEIGHT", Const, 0, ""},
+		{"IFLA_WIRELESS", Const, 0, ""},
+		{"IFNAMSIZ", Const, 0, ""},
+		{"IFT_1822", Const, 0, ""},
+		{"IFT_A12MPPSWITCH", Const, 0, ""},
+		{"IFT_AAL2", Const, 0, ""},
+		{"IFT_AAL5", Const, 0, ""},
+		{"IFT_ADSL", Const, 0, ""},
+		{"IFT_AFLANE8023", Const, 0, ""},
+		{"IFT_AFLANE8025", Const, 0, ""},
+		{"IFT_ARAP", Const, 0, ""},
+		{"IFT_ARCNET", Const, 0, ""},
+		{"IFT_ARCNETPLUS", Const, 0, ""},
+		{"IFT_ASYNC", Const, 0, ""},
+		{"IFT_ATM", Const, 0, ""},
+		{"IFT_ATMDXI", Const, 0, ""},
+		{"IFT_ATMFUNI", Const, 0, ""},
+		{"IFT_ATMIMA", Const, 0, ""},
+		{"IFT_ATMLOGICAL", Const, 0, ""},
+		{"IFT_ATMRADIO", Const, 0, ""},
+		{"IFT_ATMSUBINTERFACE", Const, 0, ""},
+		{"IFT_ATMVCIENDPT", Const, 0, ""},
+		{"IFT_ATMVIRTUAL", Const, 0, ""},
+		{"IFT_BGPPOLICYACCOUNTING", Const, 0, ""},
+		{"IFT_BLUETOOTH", Const, 1, ""},
+		{"IFT_BRIDGE", Const, 0, ""},
+		{"IFT_BSC", Const, 0, ""},
+		{"IFT_CARP", Const, 0, ""},
+		{"IFT_CCTEMUL", Const, 0, ""},
+		{"IFT_CELLULAR", Const, 0, ""},
+		{"IFT_CEPT", Const, 0, ""},
+		{"IFT_CES", Const, 0, ""},
+		{"IFT_CHANNEL", Const, 0, ""},
+		{"IFT_CNR", Const, 0, ""},
+		{"IFT_COFFEE", Const, 0, ""},
+		{"IFT_COMPOSITELINK", Const, 0, ""},
+		{"IFT_DCN", Const, 0, ""},
+		{"IFT_DIGITALPOWERLINE", Const, 0, ""},
+		{"IFT_DIGITALWRAPPEROVERHEADCHANNEL", Const, 0, ""},
+		{"IFT_DLSW", Const, 0, ""},
+		{"IFT_DOCSCABLEDOWNSTREAM", Const, 0, ""},
+		{"IFT_DOCSCABLEMACLAYER", Const, 0, ""},
+		{"IFT_DOCSCABLEUPSTREAM", Const, 0, ""},
+		{"IFT_DOCSCABLEUPSTREAMCHANNEL", Const, 1, ""},
+		{"IFT_DS0", Const, 0, ""},
+		{"IFT_DS0BUNDLE", Const, 0, ""},
+		{"IFT_DS1FDL", Const, 0, ""},
+		{"IFT_DS3", Const, 0, ""},
+		{"IFT_DTM", Const, 0, ""},
+		{"IFT_DUMMY", Const, 1, ""},
+		{"IFT_DVBASILN", Const, 0, ""},
+		{"IFT_DVBASIOUT", Const, 0, ""},
+		{"IFT_DVBRCCDOWNSTREAM", Const, 0, ""},
+		{"IFT_DVBRCCMACLAYER", Const, 0, ""},
+		{"IFT_DVBRCCUPSTREAM", Const, 0, ""},
+		{"IFT_ECONET", Const, 1, ""},
+		{"IFT_ENC", Const, 0, ""},
+		{"IFT_EON", Const, 0, ""},
+		{"IFT_EPLRS", Const, 0, ""},
+		{"IFT_ESCON", Const, 0, ""},
+		{"IFT_ETHER", Const, 0, ""},
+		{"IFT_FAITH", Const, 0, ""},
+		{"IFT_FAST", Const, 0, ""},
+		{"IFT_FASTETHER", Const, 0, ""},
+		{"IFT_FASTETHERFX", Const, 0, ""},
+		{"IFT_FDDI", Const, 0, ""},
+		{"IFT_FIBRECHANNEL", Const, 0, ""},
+		{"IFT_FRAMERELAYINTERCONNECT", Const, 0, ""},
+		{"IFT_FRAMERELAYMPI", Const, 0, ""},
+		{"IFT_FRDLCIENDPT", Const, 0, ""},
+		{"IFT_FRELAY", Const, 0, ""},
+		{"IFT_FRELAYDCE", Const, 0, ""},
+		{"IFT_FRF16MFRBUNDLE", Const, 0, ""},
+		{"IFT_FRFORWARD", Const, 0, ""},
+		{"IFT_G703AT2MB", Const, 0, ""},
+		{"IFT_G703AT64K", Const, 0, ""},
+		{"IFT_GIF", Const, 0, ""},
+		{"IFT_GIGABITETHERNET", Const, 0, ""},
+		{"IFT_GR303IDT", Const, 0, ""},
+		{"IFT_GR303RDT", Const, 0, ""},
+		{"IFT_H323GATEKEEPER", Const, 0, ""},
+		{"IFT_H323PROXY", Const, 0, ""},
+		{"IFT_HDH1822", Const, 0, ""},
+		{"IFT_HDLC", Const, 0, ""},
+		{"IFT_HDSL2", Const, 0, ""},
+		{"IFT_HIPERLAN2", Const, 0, ""},
+		{"IFT_HIPPI", Const, 0, ""},
+		{"IFT_HIPPIINTERFACE", Const, 0, ""},
+		{"IFT_HOSTPAD", Const, 0, ""},
+		{"IFT_HSSI", Const, 0, ""},
+		{"IFT_HY", Const, 0, ""},
+		{"IFT_IBM370PARCHAN", Const, 0, ""},
+		{"IFT_IDSL", Const, 0, ""},
+		{"IFT_IEEE1394", Const, 0, ""},
+		{"IFT_IEEE80211", Const, 0, ""},
+		{"IFT_IEEE80212", Const, 0, ""},
+		{"IFT_IEEE8023ADLAG", Const, 0, ""},
+		{"IFT_IFGSN", Const, 0, ""},
+		{"IFT_IMT", Const, 0, ""},
+		{"IFT_INFINIBAND", Const, 1, ""},
+		{"IFT_INTERLEAVE", Const, 0, ""},
+		{"IFT_IP", Const, 0, ""},
+		{"IFT_IPFORWARD", Const, 0, ""},
+		{"IFT_IPOVERATM", Const, 0, ""},
+		{"IFT_IPOVERCDLC", Const, 0, ""},
+		{"IFT_IPOVERCLAW", Const, 0, ""},
+		{"IFT_IPSWITCH", Const, 0, ""},
+		{"IFT_IPXIP", Const, 0, ""},
+		{"IFT_ISDN", Const, 0, ""},
+		{"IFT_ISDNBASIC", Const, 0, ""},
+		{"IFT_ISDNPRIMARY", Const, 0, ""},
+		{"IFT_ISDNS", Const, 0, ""},
+		{"IFT_ISDNU", Const, 0, ""},
+		{"IFT_ISO88022LLC", Const, 0, ""},
+		{"IFT_ISO88023", Const, 0, ""},
+		{"IFT_ISO88024", Const, 0, ""},
+		{"IFT_ISO88025", Const, 0, ""},
+		{"IFT_ISO88025CRFPINT", Const, 0, ""},
+		{"IFT_ISO88025DTR", Const, 0, ""},
+		{"IFT_ISO88025FIBER", Const, 0, ""},
+		{"IFT_ISO88026", Const, 0, ""},
+		{"IFT_ISUP", Const, 0, ""},
+		{"IFT_L2VLAN", Const, 0, ""},
+		{"IFT_L3IPVLAN", Const, 0, ""},
+		{"IFT_L3IPXVLAN", Const, 0, ""},
+		{"IFT_LAPB", Const, 0, ""},
+		{"IFT_LAPD", Const, 0, ""},
+		{"IFT_LAPF", Const, 0, ""},
+		{"IFT_LINEGROUP", Const, 1, ""},
+		{"IFT_LOCALTALK", Const, 0, ""},
+		{"IFT_LOOP", Const, 0, ""},
+		{"IFT_MEDIAMAILOVERIP", Const, 0, ""},
+		{"IFT_MFSIGLINK", Const, 0, ""},
+		{"IFT_MIOX25", Const, 0, ""},
+		{"IFT_MODEM", Const, 0, ""},
+		{"IFT_MPC", Const, 0, ""},
+		{"IFT_MPLS", Const, 0, ""},
+		{"IFT_MPLSTUNNEL", Const, 0, ""},
+		{"IFT_MSDSL", Const, 0, ""},
+		{"IFT_MVL", Const, 0, ""},
+		{"IFT_MYRINET", Const, 0, ""},
+		{"IFT_NFAS", Const, 0, ""},
+		{"IFT_NSIP", Const, 0, ""},
+		{"IFT_OPTICALCHANNEL", Const, 0, ""},
+		{"IFT_OPTICALTRANSPORT", Const, 0, ""},
+		{"IFT_OTHER", Const, 0, ""},
+		{"IFT_P10", Const, 0, ""},
+		{"IFT_P80", Const, 0, ""},
+		{"IFT_PARA", Const, 0, ""},
+		{"IFT_PDP", Const, 0, ""},
+		{"IFT_PFLOG", Const, 0, ""},
+		{"IFT_PFLOW", Const, 1, ""},
+		{"IFT_PFSYNC", Const, 0, ""},
+		{"IFT_PLC", Const, 0, ""},
+		{"IFT_PON155", Const, 1, ""},
+		{"IFT_PON622", Const, 1, ""},
+		{"IFT_POS", Const, 0, ""},
+		{"IFT_PPP", Const, 0, ""},
+		{"IFT_PPPMULTILINKBUNDLE", Const, 0, ""},
+		{"IFT_PROPATM", Const, 1, ""},
+		{"IFT_PROPBWAP2MP", Const, 0, ""},
+		{"IFT_PROPCNLS", Const, 0, ""},
+		{"IFT_PROPDOCSWIRELESSDOWNSTREAM", Const, 0, ""},
+		{"IFT_PROPDOCSWIRELESSMACLAYER", Const, 0, ""},
+		{"IFT_PROPDOCSWIRELESSUPSTREAM", Const, 0, ""},
+		{"IFT_PROPMUX", Const, 0, ""},
+		{"IFT_PROPVIRTUAL", Const, 0, ""},
+		{"IFT_PROPWIRELESSP2P", Const, 0, ""},
+		{"IFT_PTPSERIAL", Const, 0, ""},
+		{"IFT_PVC", Const, 0, ""},
+		{"IFT_Q2931", Const, 1, ""},
+		{"IFT_QLLC", Const, 0, ""},
+		{"IFT_RADIOMAC", Const, 0, ""},
+		{"IFT_RADSL", Const, 0, ""},
+		{"IFT_REACHDSL", Const, 0, ""},
+		{"IFT_RFC1483", Const, 0, ""},
+		{"IFT_RS232", Const, 0, ""},
+		{"IFT_RSRB", Const, 0, ""},
+		{"IFT_SDLC", Const, 0, ""},
+		{"IFT_SDSL", Const, 0, ""},
+		{"IFT_SHDSL", Const, 0, ""},
+		{"IFT_SIP", Const, 0, ""},
+		{"IFT_SIPSIG", Const, 1, ""},
+		{"IFT_SIPTG", Const, 1, ""},
+		{"IFT_SLIP", Const, 0, ""},
+		{"IFT_SMDSDXI", Const, 0, ""},
+		{"IFT_SMDSICIP", Const, 0, ""},
+		{"IFT_SONET", Const, 0, ""},
+		{"IFT_SONETOVERHEADCHANNEL", Const, 0, ""},
+		{"IFT_SONETPATH", Const, 0, ""},
+		{"IFT_SONETVT", Const, 0, ""},
+		{"IFT_SRP", Const, 0, ""},
+		{"IFT_SS7SIGLINK", Const, 0, ""},
+		{"IFT_STACKTOSTACK", Const, 0, ""},
+		{"IFT_STARLAN", Const, 0, ""},
+		{"IFT_STF", Const, 0, ""},
+		{"IFT_T1", Const, 0, ""},
+		{"IFT_TDLC", Const, 0, ""},
+		{"IFT_TELINK", Const, 1, ""},
+		{"IFT_TERMPAD", Const, 0, ""},
+		{"IFT_TR008", Const, 0, ""},
+		{"IFT_TRANSPHDLC", Const, 0, ""},
+		{"IFT_TUNNEL", Const, 0, ""},
+		{"IFT_ULTRA", Const, 0, ""},
+		{"IFT_USB", Const, 0, ""},
+		{"IFT_V11", Const, 0, ""},
+		{"IFT_V35", Const, 0, ""},
+		{"IFT_V36", Const, 0, ""},
+		{"IFT_V37", Const, 0, ""},
+		{"IFT_VDSL", Const, 0, ""},
+		{"IFT_VIRTUALIPADDRESS", Const, 0, ""},
+		{"IFT_VIRTUALTG", Const, 1, ""},
+		{"IFT_VOICEDID", Const, 1, ""},
+		{"IFT_VOICEEM", Const, 0, ""},
+		{"IFT_VOICEEMFGD", Const, 1, ""},
+		{"IFT_VOICEENCAP", Const, 0, ""},
+		{"IFT_VOICEFGDEANA", Const, 1, ""},
+		{"IFT_VOICEFXO", Const, 0, ""},
+		{"IFT_VOICEFXS", Const, 0, ""},
+		{"IFT_VOICEOVERATM", Const, 0, ""},
+		{"IFT_VOICEOVERCABLE", Const, 1, ""},
+		{"IFT_VOICEOVERFRAMERELAY", Const, 0, ""},
+		{"IFT_VOICEOVERIP", Const, 0, ""},
+		{"IFT_X213", Const, 0, ""},
+		{"IFT_X25", Const, 0, ""},
+		{"IFT_X25DDN", Const, 0, ""},
+		{"IFT_X25HUNTGROUP", Const, 0, ""},
+		{"IFT_X25MLP", Const, 0, ""},
+		{"IFT_X25PLE", Const, 0, ""},
+		{"IFT_XETHER", Const, 0, ""},
+		{"IGNBRK", Const, 0, ""},
+		{"IGNCR", Const, 0, ""},
+		{"IGNORE", Const, 0, ""},
+		{"IGNPAR", Const, 0, ""},
+		{"IMAXBEL", Const, 0, ""},
+		{"INFINITE", Const, 0, ""},
+		{"INLCR", Const, 0, ""},
+		{"INPCK", Const, 0, ""},
+		{"INVALID_FILE_ATTRIBUTES", Const, 0, ""},
+		{"IN_ACCESS", Const, 0, ""},
+		{"IN_ALL_EVENTS", Const, 0, ""},
+		{"IN_ATTRIB", Const, 0, ""},
+		{"IN_CLASSA_HOST", Const, 0, ""},
+		{"IN_CLASSA_MAX", Const, 0, ""},
+		{"IN_CLASSA_NET", Const, 0, ""},
+		{"IN_CLASSA_NSHIFT", Const, 0, ""},
+		{"IN_CLASSB_HOST", Const, 0, ""},
+		{"IN_CLASSB_MAX", Const, 0, ""},
+		{"IN_CLASSB_NET", Const, 0, ""},
+		{"IN_CLASSB_NSHIFT", Const, 0, ""},
+		{"IN_CLASSC_HOST", Const, 0, ""},
+		{"IN_CLASSC_NET", Const, 0, ""},
+		{"IN_CLASSC_NSHIFT", Const, 0, ""},
+		{"IN_CLASSD_HOST", Const, 0, ""},
+		{"IN_CLASSD_NET", Const, 0, ""},
+		{"IN_CLASSD_NSHIFT", Const, 0, ""},
+		{"IN_CLOEXEC", Const, 0, ""},
+		{"IN_CLOSE", Const, 0, ""},
+		{"IN_CLOSE_NOWRITE", Const, 0, ""},
+		{"IN_CLOSE_WRITE", Const, 0, ""},
+		{"IN_CREATE", Const, 0, ""},
+		{"IN_DELETE", Const, 0, ""},
+		{"IN_DELETE_SELF", Const, 0, ""},
+		{"IN_DONT_FOLLOW", Const, 0, ""},
+		{"IN_EXCL_UNLINK", Const, 0, ""},
+		{"IN_IGNORED", Const, 0, ""},
+		{"IN_ISDIR", Const, 0, ""},
+		{"IN_LINKLOCALNETNUM", Const, 0, ""},
+		{"IN_LOOPBACKNET", Const, 0, ""},
+		{"IN_MASK_ADD", Const, 0, ""},
+		{"IN_MODIFY", Const, 0, ""},
+		{"IN_MOVE", Const, 0, ""},
+		{"IN_MOVED_FROM", Const, 0, ""},
+		{"IN_MOVED_TO", Const, 0, ""},
+		{"IN_MOVE_SELF", Const, 0, ""},
+		{"IN_NONBLOCK", Const, 0, ""},
+		{"IN_ONESHOT", Const, 0, ""},
+		{"IN_ONLYDIR", Const, 0, ""},
+		{"IN_OPEN", Const, 0, ""},
+		{"IN_Q_OVERFLOW", Const, 0, ""},
+		{"IN_RFC3021_HOST", Const, 1, ""},
+		{"IN_RFC3021_MASK", Const, 1, ""},
+		{"IN_RFC3021_NET", Const, 1, ""},
+		{"IN_RFC3021_NSHIFT", Const, 1, ""},
+		{"IN_UNMOUNT", Const, 0, ""},
+		{"IOC_IN", Const, 1, ""},
+		{"IOC_INOUT", Const, 1, ""},
+		{"IOC_OUT", Const, 1, ""},
+		{"IOC_VENDOR", Const, 3, ""},
+		{"IOC_WS2", Const, 1, ""},
+		{"IO_REPARSE_TAG_SYMLINK", Const, 4, ""},
+		{"IPMreq", Type, 0, ""},
+		{"IPMreq.Interface", Field, 0, ""},
+		{"IPMreq.Multiaddr", Field, 0, ""},
+		{"IPMreqn", Type, 0, ""},
+		{"IPMreqn.Address", Field, 0, ""},
+		{"IPMreqn.Ifindex", Field, 0, ""},
+		{"IPMreqn.Multiaddr", Field, 0, ""},
+		{"IPPROTO_3PC", Const, 0, ""},
+		{"IPPROTO_ADFS", Const, 0, ""},
+		{"IPPROTO_AH", Const, 0, ""},
+		{"IPPROTO_AHIP", Const, 0, ""},
+		{"IPPROTO_APES", Const, 0, ""},
+		{"IPPROTO_ARGUS", Const, 0, ""},
+		{"IPPROTO_AX25", Const, 0, ""},
+		{"IPPROTO_BHA", Const, 0, ""},
+		{"IPPROTO_BLT", Const, 0, ""},
+		{"IPPROTO_BRSATMON", Const, 0, ""},
+		{"IPPROTO_CARP", Const, 0, ""},
+		{"IPPROTO_CFTP", Const, 0, ""},
+		{"IPPROTO_CHAOS", Const, 0, ""},
+		{"IPPROTO_CMTP", Const, 0, ""},
+		{"IPPROTO_COMP", Const, 0, ""},
+		{"IPPROTO_CPHB", Const, 0, ""},
+		{"IPPROTO_CPNX", Const, 0, ""},
+		{"IPPROTO_DCCP", Const, 0, ""},
+		{"IPPROTO_DDP", Const, 0, ""},
+		{"IPPROTO_DGP", Const, 0, ""},
+		{"IPPROTO_DIVERT", Const, 0, ""},
+		{"IPPROTO_DIVERT_INIT", Const, 3, ""},
+		{"IPPROTO_DIVERT_RESP", Const, 3, ""},
+		{"IPPROTO_DONE", Const, 0, ""},
+		{"IPPROTO_DSTOPTS", Const, 0, ""},
+		{"IPPROTO_EGP", Const, 0, ""},
+		{"IPPROTO_EMCON", Const, 0, ""},
+		{"IPPROTO_ENCAP", Const, 0, ""},
+		{"IPPROTO_EON", Const, 0, ""},
+		{"IPPROTO_ESP", Const, 0, ""},
+		{"IPPROTO_ETHERIP", Const, 0, ""},
+		{"IPPROTO_FRAGMENT", Const, 0, ""},
+		{"IPPROTO_GGP", Const, 0, ""},
+		{"IPPROTO_GMTP", Const, 0, ""},
+		{"IPPROTO_GRE", Const, 0, ""},
+		{"IPPROTO_HELLO", Const, 0, ""},
+		{"IPPROTO_HMP", Const, 0, ""},
+		{"IPPROTO_HOPOPTS", Const, 0, ""},
+		{"IPPROTO_ICMP", Const, 0, ""},
+		{"IPPROTO_ICMPV6", Const, 0, ""},
+		{"IPPROTO_IDP", Const, 0, ""},
+		{"IPPROTO_IDPR", Const, 0, ""},
+		{"IPPROTO_IDRP", Const, 0, ""},
+		{"IPPROTO_IGMP", Const, 0, ""},
+		{"IPPROTO_IGP", Const, 0, ""},
+		{"IPPROTO_IGRP", Const, 0, ""},
+		{"IPPROTO_IL", Const, 0, ""},
+		{"IPPROTO_INLSP", Const, 0, ""},
+		{"IPPROTO_INP", Const, 0, ""},
+		{"IPPROTO_IP", Const, 0, ""},
+		{"IPPROTO_IPCOMP", Const, 0, ""},
+		{"IPPROTO_IPCV", Const, 0, ""},
+		{"IPPROTO_IPEIP", Const, 0, ""},
+		{"IPPROTO_IPIP", Const, 0, ""},
+		{"IPPROTO_IPPC", Const, 0, ""},
+		{"IPPROTO_IPV4", Const, 0, ""},
+		{"IPPROTO_IPV6", Const, 0, ""},
+		{"IPPROTO_IPV6_ICMP", Const, 1, ""},
+		{"IPPROTO_IRTP", Const, 0, ""},
+		{"IPPROTO_KRYPTOLAN", Const, 0, ""},
+		{"IPPROTO_LARP", Const, 0, ""},
+		{"IPPROTO_LEAF1", Const, 0, ""},
+		{"IPPROTO_LEAF2", Const, 0, ""},
+		{"IPPROTO_MAX", Const, 0, ""},
+		{"IPPROTO_MAXID", Const, 0, ""},
+		{"IPPROTO_MEAS", Const, 0, ""},
+		{"IPPROTO_MH", Const, 1, ""},
+		{"IPPROTO_MHRP", Const, 0, ""},
+		{"IPPROTO_MICP", Const, 0, ""},
+		{"IPPROTO_MOBILE", Const, 0, ""},
+		{"IPPROTO_MPLS", Const, 1, ""},
+		{"IPPROTO_MTP", Const, 0, ""},
+		{"IPPROTO_MUX", Const, 0, ""},
+		{"IPPROTO_ND", Const, 0, ""},
+		{"IPPROTO_NHRP", Const, 0, ""},
+		{"IPPROTO_NONE", Const, 0, ""},
+		{"IPPROTO_NSP", Const, 0, ""},
+		{"IPPROTO_NVPII", Const, 0, ""},
+		{"IPPROTO_OLD_DIVERT", Const, 0, ""},
+		{"IPPROTO_OSPFIGP", Const, 0, ""},
+		{"IPPROTO_PFSYNC", Const, 0, ""},
+		{"IPPROTO_PGM", Const, 0, ""},
+		{"IPPROTO_PIGP", Const, 0, ""},
+		{"IPPROTO_PIM", Const, 0, ""},
+		{"IPPROTO_PRM", Const, 0, ""},
+		{"IPPROTO_PUP", Const, 0, ""},
+		{"IPPROTO_PVP", Const, 0, ""},
+		{"IPPROTO_RAW", Const, 0, ""},
+		{"IPPROTO_RCCMON", Const, 0, ""},
+		{"IPPROTO_RDP", Const, 0, ""},
+		{"IPPROTO_ROUTING", Const, 0, ""},
+		{"IPPROTO_RSVP", Const, 0, ""},
+		{"IPPROTO_RVD", Const, 0, ""},
+		{"IPPROTO_SATEXPAK", Const, 0, ""},
+		{"IPPROTO_SATMON", Const, 0, ""},
+		{"IPPROTO_SCCSP", Const, 0, ""},
+		{"IPPROTO_SCTP", Const, 0, ""},
+		{"IPPROTO_SDRP", Const, 0, ""},
+		{"IPPROTO_SEND", Const, 1, ""},
+		{"IPPROTO_SEP", Const, 0, ""},
+		{"IPPROTO_SKIP", Const, 0, ""},
+		{"IPPROTO_SPACER", Const, 0, ""},
+		{"IPPROTO_SRPC", Const, 0, ""},
+		{"IPPROTO_ST", Const, 0, ""},
+		{"IPPROTO_SVMTP", Const, 0, ""},
+		{"IPPROTO_SWIPE", Const, 0, ""},
+		{"IPPROTO_TCF", Const, 0, ""},
+		{"IPPROTO_TCP", Const, 0, ""},
+		{"IPPROTO_TLSP", Const, 0, ""},
+		{"IPPROTO_TP", Const, 0, ""},
+		{"IPPROTO_TPXX", Const, 0, ""},
+		{"IPPROTO_TRUNK1", Const, 0, ""},
+		{"IPPROTO_TRUNK2", Const, 0, ""},
+		{"IPPROTO_TTP", Const, 0, ""},
+		{"IPPROTO_UDP", Const, 0, ""},
+		{"IPPROTO_UDPLITE", Const, 0, ""},
+		{"IPPROTO_VINES", Const, 0, ""},
+		{"IPPROTO_VISA", Const, 0, ""},
+		{"IPPROTO_VMTP", Const, 0, ""},
+		{"IPPROTO_VRRP", Const, 1, ""},
+		{"IPPROTO_WBEXPAK", Const, 0, ""},
+		{"IPPROTO_WBMON", Const, 0, ""},
+		{"IPPROTO_WSN", Const, 0, ""},
+		{"IPPROTO_XNET", Const, 0, ""},
+		{"IPPROTO_XTP", Const, 0, ""},
+		{"IPV6_2292DSTOPTS", Const, 0, ""},
+		{"IPV6_2292HOPLIMIT", Const, 0, ""},
+		{"IPV6_2292HOPOPTS", Const, 0, ""},
+		{"IPV6_2292NEXTHOP", Const, 0, ""},
+		{"IPV6_2292PKTINFO", Const, 0, ""},
+		{"IPV6_2292PKTOPTIONS", Const, 0, ""},
+		{"IPV6_2292RTHDR", Const, 0, ""},
+		{"IPV6_ADDRFORM", Const, 0, ""},
+		{"IPV6_ADD_MEMBERSHIP", Const, 0, ""},
+		{"IPV6_AUTHHDR", Const, 0, ""},
+		{"IPV6_AUTH_LEVEL", Const, 1, ""},
+		{"IPV6_AUTOFLOWLABEL", Const, 0, ""},
+		{"IPV6_BINDANY", Const, 0, ""},
+		{"IPV6_BINDV6ONLY", Const, 0, ""},
+		{"IPV6_BOUND_IF", Const, 0, ""},
+		{"IPV6_CHECKSUM", Const, 0, ""},
+		{"IPV6_DEFAULT_MULTICAST_HOPS", Const, 0, ""},
+		{"IPV6_DEFAULT_MULTICAST_LOOP", Const, 0, ""},
+		{"IPV6_DEFHLIM", Const, 0, ""},
+		{"IPV6_DONTFRAG", Const, 0, ""},
+		{"IPV6_DROP_MEMBERSHIP", Const, 0, ""},
+		{"IPV6_DSTOPTS", Const, 0, ""},
+		{"IPV6_ESP_NETWORK_LEVEL", Const, 1, ""},
+		{"IPV6_ESP_TRANS_LEVEL", Const, 1, ""},
+		{"IPV6_FAITH", Const, 0, ""},
+		{"IPV6_FLOWINFO_MASK", Const, 0, ""},
+		{"IPV6_FLOWLABEL_MASK", Const, 0, ""},
+		{"IPV6_FRAGTTL", Const, 0, ""},
+		{"IPV6_FW_ADD", Const, 0, ""},
+		{"IPV6_FW_DEL", Const, 0, ""},
+		{"IPV6_FW_FLUSH", Const, 0, ""},
+		{"IPV6_FW_GET", Const, 0, ""},
+		{"IPV6_FW_ZERO", Const, 0, ""},
+		{"IPV6_HLIMDEC", Const, 0, ""},
+		{"IPV6_HOPLIMIT", Const, 0, ""},
+		{"IPV6_HOPOPTS", Const, 0, ""},
+		{"IPV6_IPCOMP_LEVEL", Const, 1, ""},
+		{"IPV6_IPSEC_POLICY", Const, 0, ""},
+		{"IPV6_JOIN_ANYCAST", Const, 0, ""},
+		{"IPV6_JOIN_GROUP", Const, 0, ""},
+		{"IPV6_LEAVE_ANYCAST", Const, 0, ""},
+		{"IPV6_LEAVE_GROUP", Const, 0, ""},
+		{"IPV6_MAXHLIM", Const, 0, ""},
+		{"IPV6_MAXOPTHDR", Const, 0, ""},
+		{"IPV6_MAXPACKET", Const, 0, ""},
+		{"IPV6_MAX_GROUP_SRC_FILTER", Const, 0, ""},
+		{"IPV6_MAX_MEMBERSHIPS", Const, 0, ""},
+		{"IPV6_MAX_SOCK_SRC_FILTER", Const, 0, ""},
+		{"IPV6_MIN_MEMBERSHIPS", Const, 0, ""},
+		{"IPV6_MMTU", Const, 0, ""},
+		{"IPV6_MSFILTER", Const, 0, ""},
+		{"IPV6_MTU", Const, 0, ""},
+		{"IPV6_MTU_DISCOVER", Const, 0, ""},
+		{"IPV6_MULTICAST_HOPS", Const, 0, ""},
+		{"IPV6_MULTICAST_IF", Const, 0, ""},
+		{"IPV6_MULTICAST_LOOP", Const, 0, ""},
+		{"IPV6_NEXTHOP", Const, 0, ""},
+		{"IPV6_OPTIONS", Const, 1, ""},
+		{"IPV6_PATHMTU", Const, 0, ""},
+		{"IPV6_PIPEX", Const, 1, ""},
+		{"IPV6_PKTINFO", Const, 0, ""},
+		{"IPV6_PMTUDISC_DO", Const, 0, ""},
+		{"IPV6_PMTUDISC_DONT", Const, 0, ""},
+		{"IPV6_PMTUDISC_PROBE", Const, 0, ""},
+		{"IPV6_PMTUDISC_WANT", Const, 0, ""},
+		{"IPV6_PORTRANGE", Const, 0, ""},
+		{"IPV6_PORTRANGE_DEFAULT", Const, 0, ""},
+		{"IPV6_PORTRANGE_HIGH", Const, 0, ""},
+		{"IPV6_PORTRANGE_LOW", Const, 0, ""},
+		{"IPV6_PREFER_TEMPADDR", Const, 0, ""},
+		{"IPV6_RECVDSTOPTS", Const, 0, ""},
+		{"IPV6_RECVDSTPORT", Const, 3, ""},
+		{"IPV6_RECVERR", Const, 0, ""},
+		{"IPV6_RECVHOPLIMIT", Const, 0, ""},
+		{"IPV6_RECVHOPOPTS", Const, 0, ""},
+		{"IPV6_RECVPATHMTU", Const, 0, ""},
+		{"IPV6_RECVPKTINFO", Const, 0, ""},
+		{"IPV6_RECVRTHDR", Const, 0, ""},
+		{"IPV6_RECVTCLASS", Const, 0, ""},
+		{"IPV6_ROUTER_ALERT", Const, 0, ""},
+		{"IPV6_RTABLE", Const, 1, ""},
+		{"IPV6_RTHDR", Const, 0, ""},
+		{"IPV6_RTHDRDSTOPTS", Const, 0, ""},
+		{"IPV6_RTHDR_LOOSE", Const, 0, ""},
+		{"IPV6_RTHDR_STRICT", Const, 0, ""},
+		{"IPV6_RTHDR_TYPE_0", Const, 0, ""},
+		{"IPV6_RXDSTOPTS", Const, 0, ""},
+		{"IPV6_RXHOPOPTS", Const, 0, ""},
+		{"IPV6_SOCKOPT_RESERVED1", Const, 0, ""},
+		{"IPV6_TCLASS", Const, 0, ""},
+		{"IPV6_UNICAST_HOPS", Const, 0, ""},
+		{"IPV6_USE_MIN_MTU", Const, 0, ""},
+		{"IPV6_V6ONLY", Const, 0, ""},
+		{"IPV6_VERSION", Const, 0, ""},
+		{"IPV6_VERSION_MASK", Const, 0, ""},
+		{"IPV6_XFRM_POLICY", Const, 0, ""},
+		{"IP_ADD_MEMBERSHIP", Const, 0, ""},
+		{"IP_ADD_SOURCE_MEMBERSHIP", Const, 0, ""},
+		{"IP_AUTH_LEVEL", Const, 1, ""},
+		{"IP_BINDANY", Const, 0, ""},
+		{"IP_BLOCK_SOURCE", Const, 0, ""},
+		{"IP_BOUND_IF", Const, 0, ""},
+		{"IP_DEFAULT_MULTICAST_LOOP", Const, 0, ""},
+		{"IP_DEFAULT_MULTICAST_TTL", Const, 0, ""},
+		{"IP_DF", Const, 0, ""},
+		{"IP_DIVERTFL", Const, 3, ""},
+		{"IP_DONTFRAG", Const, 0, ""},
+		{"IP_DROP_MEMBERSHIP", Const, 0, ""},
+		{"IP_DROP_SOURCE_MEMBERSHIP", Const, 0, ""},
+		{"IP_DUMMYNET3", Const, 0, ""},
+		{"IP_DUMMYNET_CONFIGURE", Const, 0, ""},
+		{"IP_DUMMYNET_DEL", Const, 0, ""},
+		{"IP_DUMMYNET_FLUSH", Const, 0, ""},
+		{"IP_DUMMYNET_GET", Const, 0, ""},
+		{"IP_EF", Const, 1, ""},
+		{"IP_ERRORMTU", Const, 1, ""},
+		{"IP_ESP_NETWORK_LEVEL", Const, 1, ""},
+		{"IP_ESP_TRANS_LEVEL", Const, 1, ""},
+		{"IP_FAITH", Const, 0, ""},
+		{"IP_FREEBIND", Const, 0, ""},
+		{"IP_FW3", Const, 0, ""},
+		{"IP_FW_ADD", Const, 0, ""},
+		{"IP_FW_DEL", Const, 0, ""},
+		{"IP_FW_FLUSH", Const, 0, ""},
+		{"IP_FW_GET", Const, 0, ""},
+		{"IP_FW_NAT_CFG", Const, 0, ""},
+		{"IP_FW_NAT_DEL", Const, 0, ""},
+		{"IP_FW_NAT_GET_CONFIG", Const, 0, ""},
+		{"IP_FW_NAT_GET_LOG", Const, 0, ""},
+		{"IP_FW_RESETLOG", Const, 0, ""},
+		{"IP_FW_TABLE_ADD", Const, 0, ""},
+		{"IP_FW_TABLE_DEL", Const, 0, ""},
+		{"IP_FW_TABLE_FLUSH", Const, 0, ""},
+		{"IP_FW_TABLE_GETSIZE", Const, 0, ""},
+		{"IP_FW_TABLE_LIST", Const, 0, ""},
+		{"IP_FW_ZERO", Const, 0, ""},
+		{"IP_HDRINCL", Const, 0, ""},
+		{"IP_IPCOMP_LEVEL", Const, 1, ""},
+		{"IP_IPSECFLOWINFO", Const, 1, ""},
+		{"IP_IPSEC_LOCAL_AUTH", Const, 1, ""},
+		{"IP_IPSEC_LOCAL_CRED", Const, 1, ""},
+		{"IP_IPSEC_LOCAL_ID", Const, 1, ""},
+		{"IP_IPSEC_POLICY", Const, 0, ""},
+		{"IP_IPSEC_REMOTE_AUTH", Const, 1, ""},
+		{"IP_IPSEC_REMOTE_CRED", Const, 1, ""},
+		{"IP_IPSEC_REMOTE_ID", Const, 1, ""},
+		{"IP_MAXPACKET", Const, 0, ""},
+		{"IP_MAX_GROUP_SRC_FILTER", Const, 0, ""},
+		{"IP_MAX_MEMBERSHIPS", Const, 0, ""},
+		{"IP_MAX_SOCK_MUTE_FILTER", Const, 0, ""},
+		{"IP_MAX_SOCK_SRC_FILTER", Const, 0, ""},
+		{"IP_MAX_SOURCE_FILTER", Const, 0, ""},
+		{"IP_MF", Const, 0, ""},
+		{"IP_MINFRAGSIZE", Const, 1, ""},
+		{"IP_MINTTL", Const, 0, ""},
+		{"IP_MIN_MEMBERSHIPS", Const, 0, ""},
+		{"IP_MSFILTER", Const, 0, ""},
+		{"IP_MSS", Const, 0, ""},
+		{"IP_MTU", Const, 0, ""},
+		{"IP_MTU_DISCOVER", Const, 0, ""},
+		{"IP_MULTICAST_IF", Const, 0, ""},
+		{"IP_MULTICAST_IFINDEX", Const, 0, ""},
+		{"IP_MULTICAST_LOOP", Const, 0, ""},
+		{"IP_MULTICAST_TTL", Const, 0, ""},
+		{"IP_MULTICAST_VIF", Const, 0, ""},
+		{"IP_NAT__XXX", Const, 0, ""},
+		{"IP_OFFMASK", Const, 0, ""},
+		{"IP_OLD_FW_ADD", Const, 0, ""},
+		{"IP_OLD_FW_DEL", Const, 0, ""},
+		{"IP_OLD_FW_FLUSH", Const, 0, ""},
+		{"IP_OLD_FW_GET", Const, 0, ""},
+		{"IP_OLD_FW_RESETLOG", Const, 0, ""},
+		{"IP_OLD_FW_ZERO", Const, 0, ""},
+		{"IP_ONESBCAST", Const, 0, ""},
+		{"IP_OPTIONS", Const, 0, ""},
+		{"IP_ORIGDSTADDR", Const, 0, ""},
+		{"IP_PASSSEC", Const, 0, ""},
+		{"IP_PIPEX", Const, 1, ""},
+		{"IP_PKTINFO", Const, 0, ""},
+		{"IP_PKTOPTIONS", Const, 0, ""},
+		{"IP_PMTUDISC", Const, 0, ""},
+		{"IP_PMTUDISC_DO", Const, 0, ""},
+		{"IP_PMTUDISC_DONT", Const, 0, ""},
+		{"IP_PMTUDISC_PROBE", Const, 0, ""},
+		{"IP_PMTUDISC_WANT", Const, 0, ""},
+		{"IP_PORTRANGE", Const, 0, ""},
+		{"IP_PORTRANGE_DEFAULT", Const, 0, ""},
+		{"IP_PORTRANGE_HIGH", Const, 0, ""},
+		{"IP_PORTRANGE_LOW", Const, 0, ""},
+		{"IP_RECVDSTADDR", Const, 0, ""},
+		{"IP_RECVDSTPORT", Const, 1, ""},
+		{"IP_RECVERR", Const, 0, ""},
+		{"IP_RECVIF", Const, 0, ""},
+		{"IP_RECVOPTS", Const, 0, ""},
+		{"IP_RECVORIGDSTADDR", Const, 0, ""},
+		{"IP_RECVPKTINFO", Const, 0, ""},
+		{"IP_RECVRETOPTS", Const, 0, ""},
+		{"IP_RECVRTABLE", Const, 1, ""},
+		{"IP_RECVTOS", Const, 0, ""},
+		{"IP_RECVTTL", Const, 0, ""},
+		{"IP_RETOPTS", Const, 0, ""},
+		{"IP_RF", Const, 0, ""},
+		{"IP_ROUTER_ALERT", Const, 0, ""},
+		{"IP_RSVP_OFF", Const, 0, ""},
+		{"IP_RSVP_ON", Const, 0, ""},
+		{"IP_RSVP_VIF_OFF", Const, 0, ""},
+		{"IP_RSVP_VIF_ON", Const, 0, ""},
+		{"IP_RTABLE", Const, 1, ""},
+		{"IP_SENDSRCADDR", Const, 0, ""},
+		{"IP_STRIPHDR", Const, 0, ""},
+		{"IP_TOS", Const, 0, ""},
+		{"IP_TRAFFIC_MGT_BACKGROUND", Const, 0, ""},
+		{"IP_TRANSPARENT", Const, 0, ""},
+		{"IP_TTL", Const, 0, ""},
+		{"IP_UNBLOCK_SOURCE", Const, 0, ""},
+		{"IP_XFRM_POLICY", Const, 0, ""},
+		{"IPv6MTUInfo", Type, 2, ""},
+		{"IPv6MTUInfo.Addr", Field, 2, ""},
+		{"IPv6MTUInfo.Mtu", Field, 2, ""},
+		{"IPv6Mreq", Type, 0, ""},
+		{"IPv6Mreq.Interface", Field, 0, ""},
+		{"IPv6Mreq.Multiaddr", Field, 0, ""},
+		{"ISIG", Const, 0, ""},
+		{"ISTRIP", Const, 0, ""},
+		{"IUCLC", Const, 0, ""},
+		{"IUTF8", Const, 0, ""},
+		{"IXANY", Const, 0, ""},
+		{"IXOFF", Const, 0, ""},
+		{"IXON", Const, 0, ""},
+		{"IfAddrmsg", Type, 0, ""},
+		{"IfAddrmsg.Family", Field, 0, ""},
+		{"IfAddrmsg.Flags", Field, 0, ""},
+		{"IfAddrmsg.Index", Field, 0, ""},
+		{"IfAddrmsg.Prefixlen", Field, 0, ""},
+		{"IfAddrmsg.Scope", Field, 0, ""},
+		{"IfAnnounceMsghdr", Type, 1, ""},
+		{"IfAnnounceMsghdr.Hdrlen", Field, 2, ""},
+		{"IfAnnounceMsghdr.Index", Field, 1, ""},
+		{"IfAnnounceMsghdr.Msglen", Field, 1, ""},
+		{"IfAnnounceMsghdr.Name", Field, 1, ""},
+		{"IfAnnounceMsghdr.Type", Field, 1, ""},
+		{"IfAnnounceMsghdr.Version", Field, 1, ""},
+		{"IfAnnounceMsghdr.What", Field, 1, ""},
+		{"IfData", Type, 0, ""},
+		{"IfData.Addrlen", Field, 0, ""},
+		{"IfData.Baudrate", Field, 0, ""},
+		{"IfData.Capabilities", Field, 2, ""},
+		{"IfData.Collisions", Field, 0, ""},
+		{"IfData.Datalen", Field, 0, ""},
+		{"IfData.Epoch", Field, 0, ""},
+		{"IfData.Hdrlen", Field, 0, ""},
+		{"IfData.Hwassist", Field, 0, ""},
+		{"IfData.Ibytes", Field, 0, ""},
+		{"IfData.Ierrors", Field, 0, ""},
+		{"IfData.Imcasts", Field, 0, ""},
+		{"IfData.Ipackets", Field, 0, ""},
+		{"IfData.Iqdrops", Field, 0, ""},
+		{"IfData.Lastchange", Field, 0, ""},
+		{"IfData.Link_state", Field, 0, ""},
+		{"IfData.Mclpool", Field, 2, ""},
+		{"IfData.Metric", Field, 0, ""},
+		{"IfData.Mtu", Field, 0, ""},
+		{"IfData.Noproto", Field, 0, ""},
+		{"IfData.Obytes", Field, 0, ""},
+		{"IfData.Oerrors", Field, 0, ""},
+		{"IfData.Omcasts", Field, 0, ""},
+		{"IfData.Opackets", Field, 0, ""},
+		{"IfData.Pad", Field, 2, ""},
+		{"IfData.Pad_cgo_0", Field, 2, ""},
+		{"IfData.Pad_cgo_1", Field, 2, ""},
+		{"IfData.Physical", Field, 0, ""},
+		{"IfData.Recvquota", Field, 0, ""},
+		{"IfData.Recvtiming", Field, 0, ""},
+		{"IfData.Reserved1", Field, 0, ""},
+		{"IfData.Reserved2", Field, 0, ""},
+		{"IfData.Spare_char1", Field, 0, ""},
+		{"IfData.Spare_char2", Field, 0, ""},
+		{"IfData.Type", Field, 0, ""},
+		{"IfData.Typelen", Field, 0, ""},
+		{"IfData.Unused1", Field, 0, ""},
+		{"IfData.Unused2", Field, 0, ""},
+		{"IfData.Xmitquota", Field, 0, ""},
+		{"IfData.Xmittiming", Field, 0, ""},
+		{"IfInfomsg", Type, 0, ""},
+		{"IfInfomsg.Change", Field, 0, ""},
+		{"IfInfomsg.Family", Field, 0, ""},
+		{"IfInfomsg.Flags", Field, 0, ""},
+		{"IfInfomsg.Index", Field, 0, ""},
+		{"IfInfomsg.Type", Field, 0, ""},
+		{"IfInfomsg.X__ifi_pad", Field, 0, ""},
+		{"IfMsghdr", Type, 0, ""},
+		{"IfMsghdr.Addrs", Field, 0, ""},
+		{"IfMsghdr.Data", Field, 0, ""},
+		{"IfMsghdr.Flags", Field, 0, ""},
+		{"IfMsghdr.Hdrlen", Field, 2, ""},
+		{"IfMsghdr.Index", Field, 0, ""},
+		{"IfMsghdr.Msglen", Field, 0, ""},
+		{"IfMsghdr.Pad1", Field, 2, ""},
+		{"IfMsghdr.Pad2", Field, 2, ""},
+		{"IfMsghdr.Pad_cgo_0", Field, 0, ""},
+		{"IfMsghdr.Pad_cgo_1", Field, 2, ""},
+		{"IfMsghdr.Tableid", Field, 2, ""},
+		{"IfMsghdr.Type", Field, 0, ""},
+		{"IfMsghdr.Version", Field, 0, ""},
+		{"IfMsghdr.Xflags", Field, 2, ""},
+		{"IfaMsghdr", Type, 0, ""},
+		{"IfaMsghdr.Addrs", Field, 0, ""},
+		{"IfaMsghdr.Flags", Field, 0, ""},
+		{"IfaMsghdr.Hdrlen", Field, 2, ""},
+		{"IfaMsghdr.Index", Field, 0, ""},
+		{"IfaMsghdr.Metric", Field, 0, ""},
+		{"IfaMsghdr.Msglen", Field, 0, ""},
+		{"IfaMsghdr.Pad1", Field, 2, ""},
+		{"IfaMsghdr.Pad2", Field, 2, ""},
+		{"IfaMsghdr.Pad_cgo_0", Field, 0, ""},
+		{"IfaMsghdr.Tableid", Field, 2, ""},
+		{"IfaMsghdr.Type", Field, 0, ""},
+		{"IfaMsghdr.Version", Field, 0, ""},
+		{"IfmaMsghdr", Type, 0, ""},
+		{"IfmaMsghdr.Addrs", Field, 0, ""},
+		{"IfmaMsghdr.Flags", Field, 0, ""},
+		{"IfmaMsghdr.Index", Field, 0, ""},
+		{"IfmaMsghdr.Msglen", Field, 0, ""},
+		{"IfmaMsghdr.Pad_cgo_0", Field, 0, ""},
+		{"IfmaMsghdr.Type", Field, 0, ""},
+		{"IfmaMsghdr.Version", Field, 0, ""},
+		{"IfmaMsghdr2", Type, 0, ""},
+		{"IfmaMsghdr2.Addrs", Field, 0, ""},
+		{"IfmaMsghdr2.Flags", Field, 0, ""},
+		{"IfmaMsghdr2.Index", Field, 0, ""},
+		{"IfmaMsghdr2.Msglen", Field, 0, ""},
+		{"IfmaMsghdr2.Pad_cgo_0", Field, 0, ""},
+		{"IfmaMsghdr2.Refcount", Field, 0, ""},
+		{"IfmaMsghdr2.Type", Field, 0, ""},
+		{"IfmaMsghdr2.Version", Field, 0, ""},
+		{"ImplementsGetwd", Const, 0, ""},
+		{"Inet4Pktinfo", Type, 0, ""},
+		{"Inet4Pktinfo.Addr", Field, 0, ""},
+		{"Inet4Pktinfo.Ifindex", Field, 0, ""},
+		{"Inet4Pktinfo.Spec_dst", Field, 0, ""},
+		{"Inet6Pktinfo", Type, 0, ""},
+		{"Inet6Pktinfo.Addr", Field, 0, ""},
+		{"Inet6Pktinfo.Ifindex", Field, 0, ""},
+		{"InotifyAddWatch", Func, 0, "func(fd int, pathname string, mask uint32) (watchdesc int, err error)"},
+		{"InotifyEvent", Type, 0, ""},
+		{"InotifyEvent.Cookie", Field, 0, ""},
+		{"InotifyEvent.Len", Field, 0, ""},
+		{"InotifyEvent.Mask", Field, 0, ""},
+		{"InotifyEvent.Name", Field, 0, ""},
+		{"InotifyEvent.Wd", Field, 0, ""},
+		{"InotifyInit", Func, 0, "func() (fd int, err error)"},
+		{"InotifyInit1", Func, 0, "func(flags int) (fd int, err error)"},
+		{"InotifyRmWatch", Func, 0, "func(fd int, watchdesc uint32) (success int, err error)"},
+		{"InterfaceAddrMessage", Type, 0, ""},
+		{"InterfaceAddrMessage.Data", Field, 0, ""},
+		{"InterfaceAddrMessage.Header", Field, 0, ""},
+		{"InterfaceAnnounceMessage", Type, 1, ""},
+		{"InterfaceAnnounceMessage.Header", Field, 1, ""},
+		{"InterfaceInfo", Type, 0, ""},
+		{"InterfaceInfo.Address", Field, 0, ""},
+		{"InterfaceInfo.BroadcastAddress", Field, 0, ""},
+		{"InterfaceInfo.Flags", Field, 0, ""},
+		{"InterfaceInfo.Netmask", Field, 0, ""},
+		{"InterfaceMessage", Type, 0, ""},
+		{"InterfaceMessage.Data", Field, 0, ""},
+		{"InterfaceMessage.Header", Field, 0, ""},
+		{"InterfaceMulticastAddrMessage", Type, 0, ""},
+		{"InterfaceMulticastAddrMessage.Data", Field, 0, ""},
+		{"InterfaceMulticastAddrMessage.Header", Field, 0, ""},
+		{"InvalidHandle", Const, 0, ""},
+		{"Ioperm", Func, 0, "func(from int, num int, on int) (err error)"},
+		{"Iopl", Func, 0, "func(level int) (err error)"},
+		{"Iovec", Type, 0, ""},
+		{"Iovec.Base", Field, 0, ""},
+		{"Iovec.Len", Field, 0, ""},
+		{"IpAdapterInfo", Type, 0, ""},
+		{"IpAdapterInfo.AdapterName", Field, 0, ""},
+		{"IpAdapterInfo.Address", Field, 0, ""},
+		{"IpAdapterInfo.AddressLength", Field, 0, ""},
+		{"IpAdapterInfo.ComboIndex", Field, 0, ""},
+		{"IpAdapterInfo.CurrentIpAddress", Field, 0, ""},
+		{"IpAdapterInfo.Description", Field, 0, ""},
+		{"IpAdapterInfo.DhcpEnabled", Field, 0, ""},
+		{"IpAdapterInfo.DhcpServer", Field, 0, ""},
+		{"IpAdapterInfo.GatewayList", Field, 0, ""},
+		{"IpAdapterInfo.HaveWins", Field, 0, ""},
+		{"IpAdapterInfo.Index", Field, 0, ""},
+		{"IpAdapterInfo.IpAddressList", Field, 0, ""},
+		{"IpAdapterInfo.LeaseExpires", Field, 0, ""},
+		{"IpAdapterInfo.LeaseObtained", Field, 0, ""},
+		{"IpAdapterInfo.Next", Field, 0, ""},
+		{"IpAdapterInfo.PrimaryWinsServer", Field, 0, ""},
+		{"IpAdapterInfo.SecondaryWinsServer", Field, 0, ""},
+		{"IpAdapterInfo.Type", Field, 0, ""},
+		{"IpAddrString", Type, 0, ""},
+		{"IpAddrString.Context", Field, 0, ""},
+		{"IpAddrString.IpAddress", Field, 0, ""},
+		{"IpAddrString.IpMask", Field, 0, ""},
+		{"IpAddrString.Next", Field, 0, ""},
+		{"IpAddressString", Type, 0, ""},
+		{"IpAddressString.String", Field, 0, ""},
+		{"IpMaskString", Type, 0, ""},
+		{"IpMaskString.String", Field, 2, ""},
+		{"Issetugid", Func, 0, ""},
+		{"KEY_ALL_ACCESS", Const, 0, ""},
+		{"KEY_CREATE_LINK", Const, 0, ""},
+		{"KEY_CREATE_SUB_KEY", Const, 0, ""},
+		{"KEY_ENUMERATE_SUB_KEYS", Const, 0, ""},
+		{"KEY_EXECUTE", Const, 0, ""},
+		{"KEY_NOTIFY", Const, 0, ""},
+		{"KEY_QUERY_VALUE", Const, 0, ""},
+		{"KEY_READ", Const, 0, ""},
+		{"KEY_SET_VALUE", Const, 0, ""},
+		{"KEY_WOW64_32KEY", Const, 0, ""},
+		{"KEY_WOW64_64KEY", Const, 0, ""},
+		{"KEY_WRITE", Const, 0, ""},
+		{"Kevent", Func, 0, ""},
+		{"Kevent_t", Type, 0, ""},
+		{"Kevent_t.Data", Field, 0, ""},
+		{"Kevent_t.Fflags", Field, 0, ""},
+		{"Kevent_t.Filter", Field, 0, ""},
+		{"Kevent_t.Flags", Field, 0, ""},
+		{"Kevent_t.Ident", Field, 0, ""},
+		{"Kevent_t.Pad_cgo_0", Field, 2, ""},
+		{"Kevent_t.Udata", Field, 0, ""},
+		{"Kill", Func, 0, "func(pid int, sig Signal) (err error)"},
+		{"Klogctl", Func, 0, "func(typ int, buf []byte) (n int, err error)"},
+		{"Kqueue", Func, 0, ""},
+		{"LANG_ENGLISH", Const, 0, ""},
+		{"LAYERED_PROTOCOL", Const, 2, ""},
+		{"LCNT_OVERLOAD_FLUSH", Const, 1, ""},
+		{"LINUX_REBOOT_CMD_CAD_OFF", Const, 0, ""},
+		{"LINUX_REBOOT_CMD_CAD_ON", Const, 0, ""},
+		{"LINUX_REBOOT_CMD_HALT", Const, 0, ""},
+		{"LINUX_REBOOT_CMD_KEXEC", Const, 0, ""},
+		{"LINUX_REBOOT_CMD_POWER_OFF", Const, 0, ""},
+		{"LINUX_REBOOT_CMD_RESTART", Const, 0, ""},
+		{"LINUX_REBOOT_CMD_RESTART2", Const, 0, ""},
+		{"LINUX_REBOOT_CMD_SW_SUSPEND", Const, 0, ""},
+		{"LINUX_REBOOT_MAGIC1", Const, 0, ""},
+		{"LINUX_REBOOT_MAGIC2", Const, 0, ""},
+		{"LOCK_EX", Const, 0, ""},
+		{"LOCK_NB", Const, 0, ""},
+		{"LOCK_SH", Const, 0, ""},
+		{"LOCK_UN", Const, 0, ""},
+		{"LazyDLL", Type, 0, ""},
+		{"LazyDLL.Name", Field, 0, ""},
+		{"LazyProc", Type, 0, ""},
+		{"LazyProc.Name", Field, 0, ""},
+		{"Lchown", Func, 0, "func(path string, uid int, gid int) (err error)"},
+		{"Linger", Type, 0, ""},
+		{"Linger.Linger", Field, 0, ""},
+		{"Linger.Onoff", Field, 0, ""},
+		{"Link", Func, 0, "func(oldpath string, newpath string) (err error)"},
+		{"Listen", Func, 0, "func(s int, n int) (err error)"},
+		{"Listxattr", Func, 1, "func(path string, dest []byte) (sz int, err error)"},
+		{"LoadCancelIoEx", Func, 1, ""},
+		{"LoadConnectEx", Func, 1, ""},
+		{"LoadCreateSymbolicLink", Func, 4, ""},
+		{"LoadDLL", Func, 0, ""},
+		{"LoadGetAddrInfo", Func, 1, ""},
+		{"LoadLibrary", Func, 0, ""},
+		{"LoadSetFileCompletionNotificationModes", Func, 2, ""},
+		{"LocalFree", Func, 0, ""},
+		{"Log2phys_t", Type, 0, ""},
+		{"Log2phys_t.Contigbytes", Field, 0, ""},
+		{"Log2phys_t.Devoffset", Field, 0, ""},
+		{"Log2phys_t.Flags", Field, 0, ""},
+		{"LookupAccountName", Func, 0, ""},
+		{"LookupAccountSid", Func, 0, ""},
+		{"LookupSID", Func, 0, ""},
+		{"LsfJump", Func, 0, "func(code int, k int, jt int, jf int) *SockFilter"},
+		{"LsfSocket", Func, 0, "func(ifindex int, proto int) (int, error)"},
+		{"LsfStmt", Func, 0, "func(code int, k int) *SockFilter"},
+		{"Lstat", Func, 0, "func(path string, stat *Stat_t) (err error)"},
+		{"MADV_AUTOSYNC", Const, 1, ""},
+		{"MADV_CAN_REUSE", Const, 0, ""},
+		{"MADV_CORE", Const, 1, ""},
+		{"MADV_DOFORK", Const, 0, ""},
+		{"MADV_DONTFORK", Const, 0, ""},
+		{"MADV_DONTNEED", Const, 0, ""},
+		{"MADV_FREE", Const, 0, ""},
+		{"MADV_FREE_REUSABLE", Const, 0, ""},
+		{"MADV_FREE_REUSE", Const, 0, ""},
+		{"MADV_HUGEPAGE", Const, 0, ""},
+		{"MADV_HWPOISON", Const, 0, ""},
+		{"MADV_MERGEABLE", Const, 0, ""},
+		{"MADV_NOCORE", Const, 1, ""},
+		{"MADV_NOHUGEPAGE", Const, 0, ""},
+		{"MADV_NORMAL", Const, 0, ""},
+		{"MADV_NOSYNC", Const, 1, ""},
+		{"MADV_PROTECT", Const, 1, ""},
+		{"MADV_RANDOM", Const, 0, ""},
+		{"MADV_REMOVE", Const, 0, ""},
+		{"MADV_SEQUENTIAL", Const, 0, ""},
+		{"MADV_SPACEAVAIL", Const, 3, ""},
+		{"MADV_UNMERGEABLE", Const, 0, ""},
+		{"MADV_WILLNEED", Const, 0, ""},
+		{"MADV_ZERO_WIRED_PAGES", Const, 0, ""},
+		{"MAP_32BIT", Const, 0, ""},
+		{"MAP_ALIGNED_SUPER", Const, 3, ""},
+		{"MAP_ALIGNMENT_16MB", Const, 3, ""},
+		{"MAP_ALIGNMENT_1TB", Const, 3, ""},
+		{"MAP_ALIGNMENT_256TB", Const, 3, ""},
+		{"MAP_ALIGNMENT_4GB", Const, 3, ""},
+		{"MAP_ALIGNMENT_64KB", Const, 3, ""},
+		{"MAP_ALIGNMENT_64PB", Const, 3, ""},
+		{"MAP_ALIGNMENT_MASK", Const, 3, ""},
+		{"MAP_ALIGNMENT_SHIFT", Const, 3, ""},
+		{"MAP_ANON", Const, 0, ""},
+		{"MAP_ANONYMOUS", Const, 0, ""},
+		{"MAP_COPY", Const, 0, ""},
+		{"MAP_DENYWRITE", Const, 0, ""},
+		{"MAP_EXECUTABLE", Const, 0, ""},
+		{"MAP_FILE", Const, 0, ""},
+		{"MAP_FIXED", Const, 0, ""},
+		{"MAP_FLAGMASK", Const, 3, ""},
+		{"MAP_GROWSDOWN", Const, 0, ""},
+		{"MAP_HASSEMAPHORE", Const, 0, ""},
+		{"MAP_HUGETLB", Const, 0, ""},
+		{"MAP_INHERIT", Const, 3, ""},
+		{"MAP_INHERIT_COPY", Const, 3, ""},
+		{"MAP_INHERIT_DEFAULT", Const, 3, ""},
+		{"MAP_INHERIT_DONATE_COPY", Const, 3, ""},
+		{"MAP_INHERIT_NONE", Const, 3, ""},
+		{"MAP_INHERIT_SHARE", Const, 3, ""},
+		{"MAP_JIT", Const, 0, ""},
+		{"MAP_LOCKED", Const, 0, ""},
+		{"MAP_NOCACHE", Const, 0, ""},
+		{"MAP_NOCORE", Const, 1, ""},
+		{"MAP_NOEXTEND", Const, 0, ""},
+		{"MAP_NONBLOCK", Const, 0, ""},
+		{"MAP_NORESERVE", Const, 0, ""},
+		{"MAP_NOSYNC", Const, 1, ""},
+		{"MAP_POPULATE", Const, 0, ""},
+		{"MAP_PREFAULT_READ", Const, 1, ""},
+		{"MAP_PRIVATE", Const, 0, ""},
+		{"MAP_RENAME", Const, 0, ""},
+		{"MAP_RESERVED0080", Const, 0, ""},
+		{"MAP_RESERVED0100", Const, 1, ""},
+		{"MAP_SHARED", Const, 0, ""},
+		{"MAP_STACK", Const, 0, ""},
+		{"MAP_TRYFIXED", Const, 3, ""},
+		{"MAP_TYPE", Const, 0, ""},
+		{"MAP_WIRED", Const, 3, ""},
+		{"MAXIMUM_REPARSE_DATA_BUFFER_SIZE", Const, 4, ""},
+		{"MAXLEN_IFDESCR", Const, 0, ""},
+		{"MAXLEN_PHYSADDR", Const, 0, ""},
+		{"MAX_ADAPTER_ADDRESS_LENGTH", Const, 0, ""},
+		{"MAX_ADAPTER_DESCRIPTION_LENGTH", Const, 0, ""},
+		{"MAX_ADAPTER_NAME_LENGTH", Const, 0, ""},
+		{"MAX_COMPUTERNAME_LENGTH", Const, 0, ""},
+		{"MAX_INTERFACE_NAME_LEN", Const, 0, ""},
+		{"MAX_LONG_PATH", Const, 0, ""},
+		{"MAX_PATH", Const, 0, ""},
+		{"MAX_PROTOCOL_CHAIN", Const, 2, ""},
+		{"MCL_CURRENT", Const, 0, ""},
+		{"MCL_FUTURE", Const, 0, ""},
+		{"MNT_DETACH", Const, 0, ""},
+		{"MNT_EXPIRE", Const, 0, ""},
+		{"MNT_FORCE", Const, 0, ""},
+		{"MSG_BCAST", Const, 1, ""},
+		{"MSG_CMSG_CLOEXEC", Const, 0, ""},
+		{"MSG_COMPAT", Const, 0, ""},
+		{"MSG_CONFIRM", Const, 0, ""},
+		{"MSG_CONTROLMBUF", Const, 1, ""},
+		{"MSG_CTRUNC", Const, 0, ""},
+		{"MSG_DONTROUTE", Const, 0, ""},
+		{"MSG_DONTWAIT", Const, 0, ""},
+		{"MSG_EOF", Const, 0, ""},
+		{"MSG_EOR", Const, 0, ""},
+		{"MSG_ERRQUEUE", Const, 0, ""},
+		{"MSG_FASTOPEN", Const, 1, ""},
+		{"MSG_FIN", Const, 0, ""},
+		{"MSG_FLUSH", Const, 0, ""},
+		{"MSG_HAVEMORE", Const, 0, ""},
+		{"MSG_HOLD", Const, 0, ""},
+		{"MSG_IOVUSRSPACE", Const, 1, ""},
+		{"MSG_LENUSRSPACE", Const, 1, ""},
+		{"MSG_MCAST", Const, 1, ""},
+		{"MSG_MORE", Const, 0, ""},
+		{"MSG_NAMEMBUF", Const, 1, ""},
+		{"MSG_NBIO", Const, 0, ""},
+		{"MSG_NEEDSA", Const, 0, ""},
+		{"MSG_NOSIGNAL", Const, 0, ""},
+		{"MSG_NOTIFICATION", Const, 0, ""},
+		{"MSG_OOB", Const, 0, ""},
+		{"MSG_PEEK", Const, 0, ""},
+		{"MSG_PROXY", Const, 0, ""},
+		{"MSG_RCVMORE", Const, 0, ""},
+		{"MSG_RST", Const, 0, ""},
+		{"MSG_SEND", Const, 0, ""},
+		{"MSG_SYN", Const, 0, ""},
+		{"MSG_TRUNC", Const, 0, ""},
+		{"MSG_TRYHARD", Const, 0, ""},
+		{"MSG_USERFLAGS", Const, 1, ""},
+		{"MSG_WAITALL", Const, 0, ""},
+		{"MSG_WAITFORONE", Const, 0, ""},
+		{"MSG_WAITSTREAM", Const, 0, ""},
+		{"MS_ACTIVE", Const, 0, ""},
+		{"MS_ASYNC", Const, 0, ""},
+		{"MS_BIND", Const, 0, ""},
+		{"MS_DEACTIVATE", Const, 0, ""},
+		{"MS_DIRSYNC", Const, 0, ""},
+		{"MS_INVALIDATE", Const, 0, ""},
+		{"MS_I_VERSION", Const, 0, ""},
+		{"MS_KERNMOUNT", Const, 0, ""},
+		{"MS_KILLPAGES", Const, 0, ""},
+		{"MS_MANDLOCK", Const, 0, ""},
+		{"MS_MGC_MSK", Const, 0, ""},
+		{"MS_MGC_VAL", Const, 0, ""},
+		{"MS_MOVE", Const, 0, ""},
+		{"MS_NOATIME", Const, 0, ""},
+		{"MS_NODEV", Const, 0, ""},
+		{"MS_NODIRATIME", Const, 0, ""},
+		{"MS_NOEXEC", Const, 0, ""},
+		{"MS_NOSUID", Const, 0, ""},
+		{"MS_NOUSER", Const, 0, ""},
+		{"MS_POSIXACL", Const, 0, ""},
+		{"MS_PRIVATE", Const, 0, ""},
+		{"MS_RDONLY", Const, 0, ""},
+		{"MS_REC", Const, 0, ""},
+		{"MS_RELATIME", Const, 0, ""},
+		{"MS_REMOUNT", Const, 0, ""},
+		{"MS_RMT_MASK", Const, 0, ""},
+		{"MS_SHARED", Const, 0, ""},
+		{"MS_SILENT", Const, 0, ""},
+		{"MS_SLAVE", Const, 0, ""},
+		{"MS_STRICTATIME", Const, 0, ""},
+		{"MS_SYNC", Const, 0, ""},
+		{"MS_SYNCHRONOUS", Const, 0, ""},
+		{"MS_UNBINDABLE", Const, 0, ""},
+		{"Madvise", Func, 0, "func(b []byte, advice int) (err error)"},
+		{"MapViewOfFile", Func, 0, ""},
+		{"MaxTokenInfoClass", Const, 0, ""},
+		{"Mclpool", Type, 2, ""},
+		{"Mclpool.Alive", Field, 2, ""},
+		{"Mclpool.Cwm", Field, 2, ""},
+		{"Mclpool.Grown", Field, 2, ""},
+		{"Mclpool.Hwm", Field, 2, ""},
+		{"Mclpool.Lwm", Field, 2, ""},
+		{"MibIfRow", Type, 0, ""},
+		{"MibIfRow.AdminStatus", Field, 0, ""},
+		{"MibIfRow.Descr", Field, 0, ""},
+		{"MibIfRow.DescrLen", Field, 0, ""},
+		{"MibIfRow.InDiscards", Field, 0, ""},
+		{"MibIfRow.InErrors", Field, 0, ""},
+		{"MibIfRow.InNUcastPkts", Field, 0, ""},
+		{"MibIfRow.InOctets", Field, 0, ""},
+		{"MibIfRow.InUcastPkts", Field, 0, ""},
+		{"MibIfRow.InUnknownProtos", Field, 0, ""},
+		{"MibIfRow.Index", Field, 0, ""},
+		{"MibIfRow.LastChange", Field, 0, ""},
+		{"MibIfRow.Mtu", Field, 0, ""},
+		{"MibIfRow.Name", Field, 0, ""},
+		{"MibIfRow.OperStatus", Field, 0, ""},
+		{"MibIfRow.OutDiscards", Field, 0, ""},
+		{"MibIfRow.OutErrors", Field, 0, ""},
+		{"MibIfRow.OutNUcastPkts", Field, 0, ""},
+		{"MibIfRow.OutOctets", Field, 0, ""},
+		{"MibIfRow.OutQLen", Field, 0, ""},
+		{"MibIfRow.OutUcastPkts", Field, 0, ""},
+		{"MibIfRow.PhysAddr", Field, 0, ""},
+		{"MibIfRow.PhysAddrLen", Field, 0, ""},
+		{"MibIfRow.Speed", Field, 0, ""},
+		{"MibIfRow.Type", Field, 0, ""},
+		{"Mkdir", Func, 0, "func(path string, mode uint32) (err error)"},
+		{"Mkdirat", Func, 0, "func(dirfd int, path string, mode uint32) (err error)"},
+		{"Mkfifo", Func, 0, "func(path string, mode uint32) (err error)"},
+		{"Mknod", Func, 0, "func(path string, mode uint32, dev int) (err error)"},
+		{"Mknodat", Func, 0, "func(dirfd int, path string, mode uint32, dev int) (err error)"},
+		{"Mlock", Func, 0, "func(b []byte) (err error)"},
+		{"Mlockall", Func, 0, "func(flags int) (err error)"},
+		{"Mmap", Func, 0, "func(fd int, offset int64, length int, prot int, flags int) (data []byte, err error)"},
+		{"Mount", Func, 0, "func(source string, target string, fstype string, flags uintptr, data string) (err error)"},
+		{"MoveFile", Func, 0, ""},
+		{"Mprotect", Func, 0, "func(b []byte, prot int) (err error)"},
+		{"Msghdr", Type, 0, ""},
+		{"Msghdr.Control", Field, 0, ""},
+		{"Msghdr.Controllen", Field, 0, ""},
+		{"Msghdr.Flags", Field, 0, ""},
+		{"Msghdr.Iov", Field, 0, ""},
+		{"Msghdr.Iovlen", Field, 0, ""},
+		{"Msghdr.Name", Field, 0, ""},
+		{"Msghdr.Namelen", Field, 0, ""},
+		{"Msghdr.Pad_cgo_0", Field, 0, ""},
+		{"Msghdr.Pad_cgo_1", Field, 0, ""},
+		{"Munlock", Func, 0, "func(b []byte) (err error)"},
+		{"Munlockall", Func, 0, "func() (err error)"},
+		{"Munmap", Func, 0, "func(b []byte) (err error)"},
+		{"MustLoadDLL", Func, 0, ""},
+		{"NAME_MAX", Const, 0, ""},
+		{"NETLINK_ADD_MEMBERSHIP", Const, 0, ""},
+		{"NETLINK_AUDIT", Const, 0, ""},
+		{"NETLINK_BROADCAST_ERROR", Const, 0, ""},
+		{"NETLINK_CONNECTOR", Const, 0, ""},
+		{"NETLINK_DNRTMSG", Const, 0, ""},
+		{"NETLINK_DROP_MEMBERSHIP", Const, 0, ""},
+		{"NETLINK_ECRYPTFS", Const, 0, ""},
+		{"NETLINK_FIB_LOOKUP", Const, 0, ""},
+		{"NETLINK_FIREWALL", Const, 0, ""},
+		{"NETLINK_GENERIC", Const, 0, ""},
+		{"NETLINK_INET_DIAG", Const, 0, ""},
+		{"NETLINK_IP6_FW", Const, 0, ""},
+		{"NETLINK_ISCSI", Const, 0, ""},
+		{"NETLINK_KOBJECT_UEVENT", Const, 0, ""},
+		{"NETLINK_NETFILTER", Const, 0, ""},
+		{"NETLINK_NFLOG", Const, 0, ""},
+		{"NETLINK_NO_ENOBUFS", Const, 0, ""},
+		{"NETLINK_PKTINFO", Const, 0, ""},
+		{"NETLINK_RDMA", Const, 0, ""},
+		{"NETLINK_ROUTE", Const, 0, ""},
+		{"NETLINK_SCSITRANSPORT", Const, 0, ""},
+		{"NETLINK_SELINUX", Const, 0, ""},
+		{"NETLINK_UNUSED", Const, 0, ""},
+		{"NETLINK_USERSOCK", Const, 0, ""},
+		{"NETLINK_XFRM", Const, 0, ""},
+		{"NET_RT_DUMP", Const, 0, ""},
+		{"NET_RT_DUMP2", Const, 0, ""},
+		{"NET_RT_FLAGS", Const, 0, ""},
+		{"NET_RT_IFLIST", Const, 0, ""},
+		{"NET_RT_IFLIST2", Const, 0, ""},
+		{"NET_RT_IFLISTL", Const, 1, ""},
+		{"NET_RT_IFMALIST", Const, 0, ""},
+		{"NET_RT_MAXID", Const, 0, ""},
+		{"NET_RT_OIFLIST", Const, 1, ""},
+		{"NET_RT_OOIFLIST", Const, 1, ""},
+		{"NET_RT_STAT", Const, 0, ""},
+		{"NET_RT_STATS", Const, 1, ""},
+		{"NET_RT_TABLE", Const, 1, ""},
+		{"NET_RT_TRASH", Const, 0, ""},
+		{"NLA_ALIGNTO", Const, 0, ""},
+		{"NLA_F_NESTED", Const, 0, ""},
+		{"NLA_F_NET_BYTEORDER", Const, 0, ""},
+		{"NLA_HDRLEN", Const, 0, ""},
+		{"NLMSG_ALIGNTO", Const, 0, ""},
+		{"NLMSG_DONE", Const, 0, ""},
+		{"NLMSG_ERROR", Const, 0, ""},
+		{"NLMSG_HDRLEN", Const, 0, ""},
+		{"NLMSG_MIN_TYPE", Const, 0, ""},
+		{"NLMSG_NOOP", Const, 0, ""},
+		{"NLMSG_OVERRUN", Const, 0, ""},
+		{"NLM_F_ACK", Const, 0, ""},
+		{"NLM_F_APPEND", Const, 0, ""},
+		{"NLM_F_ATOMIC", Const, 0, ""},
+		{"NLM_F_CREATE", Const, 0, ""},
+		{"NLM_F_DUMP", Const, 0, ""},
+		{"NLM_F_ECHO", Const, 0, ""},
+		{"NLM_F_EXCL", Const, 0, ""},
+		{"NLM_F_MATCH", Const, 0, ""},
+		{"NLM_F_MULTI", Const, 0, ""},
+		{"NLM_F_REPLACE", Const, 0, ""},
+		{"NLM_F_REQUEST", Const, 0, ""},
+		{"NLM_F_ROOT", Const, 0, ""},
+		{"NOFLSH", Const, 0, ""},
+		{"NOTE_ABSOLUTE", Const, 0, ""},
+		{"NOTE_ATTRIB", Const, 0, ""},
+		{"NOTE_BACKGROUND", Const, 16, ""},
+		{"NOTE_CHILD", Const, 0, ""},
+		{"NOTE_CRITICAL", Const, 16, ""},
+		{"NOTE_DELETE", Const, 0, ""},
+		{"NOTE_EOF", Const, 1, ""},
+		{"NOTE_EXEC", Const, 0, ""},
+		{"NOTE_EXIT", Const, 0, ""},
+		{"NOTE_EXITSTATUS", Const, 0, ""},
+		{"NOTE_EXIT_CSERROR", Const, 16, ""},
+		{"NOTE_EXIT_DECRYPTFAIL", Const, 16, ""},
+		{"NOTE_EXIT_DETAIL", Const, 16, ""},
+		{"NOTE_EXIT_DETAIL_MASK", Const, 16, ""},
+		{"NOTE_EXIT_MEMORY", Const, 16, ""},
+		{"NOTE_EXIT_REPARENTED", Const, 16, ""},
+		{"NOTE_EXTEND", Const, 0, ""},
+		{"NOTE_FFAND", Const, 0, ""},
+		{"NOTE_FFCOPY", Const, 0, ""},
+		{"NOTE_FFCTRLMASK", Const, 0, ""},
+		{"NOTE_FFLAGSMASK", Const, 0, ""},
+		{"NOTE_FFNOP", Const, 0, ""},
+		{"NOTE_FFOR", Const, 0, ""},
+		{"NOTE_FORK", Const, 0, ""},
+		{"NOTE_LEEWAY", Const, 16, ""},
+		{"NOTE_LINK", Const, 0, ""},
+		{"NOTE_LOWAT", Const, 0, ""},
+		{"NOTE_NONE", Const, 0, ""},
+		{"NOTE_NSECONDS", Const, 0, ""},
+		{"NOTE_PCTRLMASK", Const, 0, ""},
+		{"NOTE_PDATAMASK", Const, 0, ""},
+		{"NOTE_REAP", Const, 0, ""},
+		{"NOTE_RENAME", Const, 0, ""},
+		{"NOTE_RESOURCEEND", Const, 0, ""},
+		{"NOTE_REVOKE", Const, 0, ""},
+		{"NOTE_SECONDS", Const, 0, ""},
+		{"NOTE_SIGNAL", Const, 0, ""},
+		{"NOTE_TRACK", Const, 0, ""},
+		{"NOTE_TRACKERR", Const, 0, ""},
+		{"NOTE_TRIGGER", Const, 0, ""},
+		{"NOTE_TRUNCATE", Const, 1, ""},
+		{"NOTE_USECONDS", Const, 0, ""},
+		{"NOTE_VM_ERROR", Const, 0, ""},
+		{"NOTE_VM_PRESSURE", Const, 0, ""},
+		{"NOTE_VM_PRESSURE_SUDDEN_TERMINATE", Const, 0, ""},
+		{"NOTE_VM_PRESSURE_TERMINATE", Const, 0, ""},
+		{"NOTE_WRITE", Const, 0, ""},
+		{"NameCanonical", Const, 0, ""},
+		{"NameCanonicalEx", Const, 0, ""},
+		{"NameDisplay", Const, 0, ""},
+		{"NameDnsDomain", Const, 0, ""},
+		{"NameFullyQualifiedDN", Const, 0, ""},
+		{"NameSamCompatible", Const, 0, ""},
+		{"NameServicePrincipal", Const, 0, ""},
+		{"NameUniqueId", Const, 0, ""},
+		{"NameUnknown", Const, 0, ""},
+		{"NameUserPrincipal", Const, 0, ""},
+		{"Nanosleep", Func, 0, "func(time *Timespec, leftover *Timespec) (err error)"},
+		{"NetApiBufferFree", Func, 0, ""},
+		{"NetGetJoinInformation", Func, 2, ""},
+		{"NetSetupDomainName", Const, 2, ""},
+		{"NetSetupUnjoined", Const, 2, ""},
+		{"NetSetupUnknownStatus", Const, 2, ""},
+		{"NetSetupWorkgroupName", Const, 2, ""},
+		{"NetUserGetInfo", Func, 0, ""},
+		{"NetlinkMessage", Type, 0, ""},
+		{"NetlinkMessage.Data", Field, 0, ""},
+		{"NetlinkMessage.Header", Field, 0, ""},
+		{"NetlinkRIB", Func, 0, "func(proto int, family int) ([]byte, error)"},
+		{"NetlinkRouteAttr", Type, 0, ""},
+		{"NetlinkRouteAttr.Attr", Field, 0, ""},
+		{"NetlinkRouteAttr.Value", Field, 0, ""},
+		{"NetlinkRouteRequest", Type, 0, ""},
+		{"NetlinkRouteRequest.Data", Field, 0, ""},
+		{"NetlinkRouteRequest.Header", Field, 0, ""},
+		{"NewCallback", Func, 0, ""},
+		{"NewCallbackCDecl", Func, 3, ""},
+		{"NewLazyDLL", Func, 0, ""},
+		{"NlAttr", Type, 0, ""},
+		{"NlAttr.Len", Field, 0, ""},
+		{"NlAttr.Type", Field, 0, ""},
+		{"NlMsgerr", Type, 0, ""},
+		{"NlMsgerr.Error", Field, 0, ""},
+		{"NlMsgerr.Msg", Field, 0, ""},
+		{"NlMsghdr", Type, 0, ""},
+		{"NlMsghdr.Flags", Field, 0, ""},
+		{"NlMsghdr.Len", Field, 0, ""},
+		{"NlMsghdr.Pid", Field, 0, ""},
+		{"NlMsghdr.Seq", Field, 0, ""},
+		{"NlMsghdr.Type", Field, 0, ""},
+		{"NsecToFiletime", Func, 0, ""},
+		{"NsecToTimespec", Func, 0, "func(nsec int64) Timespec"},
+		{"NsecToTimeval", Func, 0, "func(nsec int64) Timeval"},
+		{"Ntohs", Func, 0, ""},
+		{"OCRNL", Const, 0, ""},
+		{"OFDEL", Const, 0, ""},
+		{"OFILL", Const, 0, ""},
+		{"OFIOGETBMAP", Const, 1, ""},
+		{"OID_PKIX_KP_SERVER_AUTH", Var, 0, ""},
+		{"OID_SERVER_GATED_CRYPTO", Var, 0, ""},
+		{"OID_SGC_NETSCAPE", Var, 0, ""},
+		{"OLCUC", Const, 0, ""},
+		{"ONLCR", Const, 0, ""},
+		{"ONLRET", Const, 0, ""},
+		{"ONOCR", Const, 0, ""},
+		{"ONOEOT", Const, 1, ""},
+		{"OPEN_ALWAYS", Const, 0, ""},
+		{"OPEN_EXISTING", Const, 0, ""},
+		{"OPOST", Const, 0, ""},
+		{"O_ACCMODE", Const, 0, ""},
+		{"O_ALERT", Const, 0, ""},
+		{"O_ALT_IO", Const, 1, ""},
+		{"O_APPEND", Const, 0, ""},
+		{"O_ASYNC", Const, 0, ""},
+		{"O_CLOEXEC", Const, 0, ""},
+		{"O_CREAT", Const, 0, ""},
+		{"O_DIRECT", Const, 0, ""},
+		{"O_DIRECTORY", Const, 0, ""},
+		{"O_DP_GETRAWENCRYPTED", Const, 16, ""},
+		{"O_DSYNC", Const, 0, ""},
+		{"O_EVTONLY", Const, 0, ""},
+		{"O_EXCL", Const, 0, ""},
+		{"O_EXEC", Const, 0, ""},
+		{"O_EXLOCK", Const, 0, ""},
+		{"O_FSYNC", Const, 0, ""},
+		{"O_LARGEFILE", Const, 0, ""},
+		{"O_NDELAY", Const, 0, ""},
+		{"O_NOATIME", Const, 0, ""},
+		{"O_NOCTTY", Const, 0, ""},
+		{"O_NOFOLLOW", Const, 0, ""},
+		{"O_NONBLOCK", Const, 0, ""},
+		{"O_NOSIGPIPE", Const, 1, ""},
+		{"O_POPUP", Const, 0, ""},
+		{"O_RDONLY", Const, 0, ""},
+		{"O_RDWR", Const, 0, ""},
+		{"O_RSYNC", Const, 0, ""},
+		{"O_SHLOCK", Const, 0, ""},
+		{"O_SYMLINK", Const, 0, ""},
+		{"O_SYNC", Const, 0, ""},
+		{"O_TRUNC", Const, 0, ""},
+		{"O_TTY_INIT", Const, 0, ""},
+		{"O_WRONLY", Const, 0, ""},
+		{"Open", Func, 0, "func(path string, mode int, perm uint32) (fd int, err error)"},
+		{"OpenCurrentProcessToken", Func, 0, ""},
+		{"OpenProcess", Func, 0, ""},
+		{"OpenProcessToken", Func, 0, ""},
+		{"Openat", Func, 0, "func(dirfd int, path string, flags int, mode uint32) (fd int, err error)"},
+		{"Overlapped", Type, 0, ""},
+		{"Overlapped.HEvent", Field, 0, ""},
+		{"Overlapped.Internal", Field, 0, ""},
+		{"Overlapped.InternalHigh", Field, 0, ""},
+		{"Overlapped.Offset", Field, 0, ""},
+		{"Overlapped.OffsetHigh", Field, 0, ""},
+		{"PACKET_ADD_MEMBERSHIP", Const, 0, ""},
+		{"PACKET_BROADCAST", Const, 0, ""},
+		{"PACKET_DROP_MEMBERSHIP", Const, 0, ""},
+		{"PACKET_FASTROUTE", Const, 0, ""},
+		{"PACKET_HOST", Const, 0, ""},
+		{"PACKET_LOOPBACK", Const, 0, ""},
+		{"PACKET_MR_ALLMULTI", Const, 0, ""},
+		{"PACKET_MR_MULTICAST", Const, 0, ""},
+		{"PACKET_MR_PROMISC", Const, 0, ""},
+		{"PACKET_MULTICAST", Const, 0, ""},
+		{"PACKET_OTHERHOST", Const, 0, ""},
+		{"PACKET_OUTGOING", Const, 0, ""},
+		{"PACKET_RECV_OUTPUT", Const, 0, ""},
+		{"PACKET_RX_RING", Const, 0, ""},
+		{"PACKET_STATISTICS", Const, 0, ""},
+		{"PAGE_EXECUTE_READ", Const, 0, ""},
+		{"PAGE_EXECUTE_READWRITE", Const, 0, ""},
+		{"PAGE_EXECUTE_WRITECOPY", Const, 0, ""},
+		{"PAGE_READONLY", Const, 0, ""},
+		{"PAGE_READWRITE", Const, 0, ""},
+		{"PAGE_WRITECOPY", Const, 0, ""},
+		{"PARENB", Const, 0, ""},
+		{"PARMRK", Const, 0, ""},
+		{"PARODD", Const, 0, ""},
+		{"PENDIN", Const, 0, ""},
+		{"PFL_HIDDEN", Const, 2, ""},
+		{"PFL_MATCHES_PROTOCOL_ZERO", Const, 2, ""},
+		{"PFL_MULTIPLE_PROTO_ENTRIES", Const, 2, ""},
+		{"PFL_NETWORKDIRECT_PROVIDER", Const, 2, ""},
+		{"PFL_RECOMMENDED_PROTO_ENTRY", Const, 2, ""},
+		{"PF_FLUSH", Const, 1, ""},
+		{"PKCS_7_ASN_ENCODING", Const, 0, ""},
+		{"PMC5_PIPELINE_FLUSH", Const, 1, ""},
+		{"PRIO_PGRP", Const, 2, ""},
+		{"PRIO_PROCESS", Const, 2, ""},
+		{"PRIO_USER", Const, 2, ""},
+		{"PRI_IOFLUSH", Const, 1, ""},
+		{"PROCESS_QUERY_INFORMATION", Const, 0, ""},
+		{"PROCESS_TERMINATE", Const, 2, ""},
+		{"PROT_EXEC", Const, 0, ""},
+		{"PROT_GROWSDOWN", Const, 0, ""},
+		{"PROT_GROWSUP", Const, 0, ""},
+		{"PROT_NONE", Const, 0, ""},
+		{"PROT_READ", Const, 0, ""},
+		{"PROT_WRITE", Const, 0, ""},
+		{"PROV_DH_SCHANNEL", Const, 0, ""},
+		{"PROV_DSS", Const, 0, ""},
+		{"PROV_DSS_DH", Const, 0, ""},
+		{"PROV_EC_ECDSA_FULL", Const, 0, ""},
+		{"PROV_EC_ECDSA_SIG", Const, 0, ""},
+		{"PROV_EC_ECNRA_FULL", Const, 0, ""},
+		{"PROV_EC_ECNRA_SIG", Const, 0, ""},
+		{"PROV_FORTEZZA", Const, 0, ""},
+		{"PROV_INTEL_SEC", Const, 0, ""},
+		{"PROV_MS_EXCHANGE", Const, 0, ""},
+		{"PROV_REPLACE_OWF", Const, 0, ""},
+		{"PROV_RNG", Const, 0, ""},
+		{"PROV_RSA_AES", Const, 0, ""},
+		{"PROV_RSA_FULL", Const, 0, ""},
+		{"PROV_RSA_SCHANNEL", Const, 0, ""},
+		{"PROV_RSA_SIG", Const, 0, ""},
+		{"PROV_SPYRUS_LYNKS", Const, 0, ""},
+		{"PROV_SSL", Const, 0, ""},
+		{"PR_CAPBSET_DROP", Const, 0, ""},
+		{"PR_CAPBSET_READ", Const, 0, ""},
+		{"PR_CLEAR_SECCOMP_FILTER", Const, 0, ""},
+		{"PR_ENDIAN_BIG", Const, 0, ""},
+		{"PR_ENDIAN_LITTLE", Const, 0, ""},
+		{"PR_ENDIAN_PPC_LITTLE", Const, 0, ""},
+		{"PR_FPEMU_NOPRINT", Const, 0, ""},
+		{"PR_FPEMU_SIGFPE", Const, 0, ""},
+		{"PR_FP_EXC_ASYNC", Const, 0, ""},
+		{"PR_FP_EXC_DISABLED", Const, 0, ""},
+		{"PR_FP_EXC_DIV", Const, 0, ""},
+		{"PR_FP_EXC_INV", Const, 0, ""},
+		{"PR_FP_EXC_NONRECOV", Const, 0, ""},
+		{"PR_FP_EXC_OVF", Const, 0, ""},
+		{"PR_FP_EXC_PRECISE", Const, 0, ""},
+		{"PR_FP_EXC_RES", Const, 0, ""},
+		{"PR_FP_EXC_SW_ENABLE", Const, 0, ""},
+		{"PR_FP_EXC_UND", Const, 0, ""},
+		{"PR_GET_DUMPABLE", Const, 0, ""},
+		{"PR_GET_ENDIAN", Const, 0, ""},
+		{"PR_GET_FPEMU", Const, 0, ""},
+		{"PR_GET_FPEXC", Const, 0, ""},
+		{"PR_GET_KEEPCAPS", Const, 0, ""},
+		{"PR_GET_NAME", Const, 0, ""},
+		{"PR_GET_PDEATHSIG", Const, 0, ""},
+		{"PR_GET_SECCOMP", Const, 0, ""},
+		{"PR_GET_SECCOMP_FILTER", Const, 0, ""},
+		{"PR_GET_SECUREBITS", Const, 0, ""},
+		{"PR_GET_TIMERSLACK", Const, 0, ""},
+		{"PR_GET_TIMING", Const, 0, ""},
+		{"PR_GET_TSC", Const, 0, ""},
+		{"PR_GET_UNALIGN", Const, 0, ""},
+		{"PR_MCE_KILL", Const, 0, ""},
+		{"PR_MCE_KILL_CLEAR", Const, 0, ""},
+		{"PR_MCE_KILL_DEFAULT", Const, 0, ""},
+		{"PR_MCE_KILL_EARLY", Const, 0, ""},
+		{"PR_MCE_KILL_GET", Const, 0, ""},
+		{"PR_MCE_KILL_LATE", Const, 0, ""},
+		{"PR_MCE_KILL_SET", Const, 0, ""},
+		{"PR_SECCOMP_FILTER_EVENT", Const, 0, ""},
+		{"PR_SECCOMP_FILTER_SYSCALL", Const, 0, ""},
+		{"PR_SET_DUMPABLE", Const, 0, ""},
+		{"PR_SET_ENDIAN", Const, 0, ""},
+		{"PR_SET_FPEMU", Const, 0, ""},
+		{"PR_SET_FPEXC", Const, 0, ""},
+		{"PR_SET_KEEPCAPS", Const, 0, ""},
+		{"PR_SET_NAME", Const, 0, ""},
+		{"PR_SET_PDEATHSIG", Const, 0, ""},
+		{"PR_SET_PTRACER", Const, 0, ""},
+		{"PR_SET_SECCOMP", Const, 0, ""},
+		{"PR_SET_SECCOMP_FILTER", Const, 0, ""},
+		{"PR_SET_SECUREBITS", Const, 0, ""},
+		{"PR_SET_TIMERSLACK", Const, 0, ""},
+		{"PR_SET_TIMING", Const, 0, ""},
+		{"PR_SET_TSC", Const, 0, ""},
+		{"PR_SET_UNALIGN", Const, 0, ""},
+		{"PR_TASK_PERF_EVENTS_DISABLE", Const, 0, ""},
+		{"PR_TASK_PERF_EVENTS_ENABLE", Const, 0, ""},
+		{"PR_TIMING_STATISTICAL", Const, 0, ""},
+		{"PR_TIMING_TIMESTAMP", Const, 0, ""},
+		{"PR_TSC_ENABLE", Const, 0, ""},
+		{"PR_TSC_SIGSEGV", Const, 0, ""},
+		{"PR_UNALIGN_NOPRINT", Const, 0, ""},
+		{"PR_UNALIGN_SIGBUS", Const, 0, ""},
+		{"PTRACE_ARCH_PRCTL", Const, 0, ""},
+		{"PTRACE_ATTACH", Const, 0, ""},
+		{"PTRACE_CONT", Const, 0, ""},
+		{"PTRACE_DETACH", Const, 0, ""},
+		{"PTRACE_EVENT_CLONE", Const, 0, ""},
+		{"PTRACE_EVENT_EXEC", Const, 0, ""},
+		{"PTRACE_EVENT_EXIT", Const, 0, ""},
+		{"PTRACE_EVENT_FORK", Const, 0, ""},
+		{"PTRACE_EVENT_VFORK", Const, 0, ""},
+		{"PTRACE_EVENT_VFORK_DONE", Const, 0, ""},
+		{"PTRACE_GETCRUNCHREGS", Const, 0, ""},
+		{"PTRACE_GETEVENTMSG", Const, 0, ""},
+		{"PTRACE_GETFPREGS", Const, 0, ""},
+		{"PTRACE_GETFPXREGS", Const, 0, ""},
+		{"PTRACE_GETHBPREGS", Const, 0, ""},
+		{"PTRACE_GETREGS", Const, 0, ""},
+		{"PTRACE_GETREGSET", Const, 0, ""},
+		{"PTRACE_GETSIGINFO", Const, 0, ""},
+		{"PTRACE_GETVFPREGS", Const, 0, ""},
+		{"PTRACE_GETWMMXREGS", Const, 0, ""},
+		{"PTRACE_GET_THREAD_AREA", Const, 0, ""},
+		{"PTRACE_KILL", Const, 0, ""},
+		{"PTRACE_OLDSETOPTIONS", Const, 0, ""},
+		{"PTRACE_O_MASK", Const, 0, ""},
+		{"PTRACE_O_TRACECLONE", Const, 0, ""},
+		{"PTRACE_O_TRACEEXEC", Const, 0, ""},
+		{"PTRACE_O_TRACEEXIT", Const, 0, ""},
+		{"PTRACE_O_TRACEFORK", Const, 0, ""},
+		{"PTRACE_O_TRACESYSGOOD", Const, 0, ""},
+		{"PTRACE_O_TRACEVFORK", Const, 0, ""},
+		{"PTRACE_O_TRACEVFORKDONE", Const, 0, ""},
+		{"PTRACE_PEEKDATA", Const, 0, ""},
+		{"PTRACE_PEEKTEXT", Const, 0, ""},
+		{"PTRACE_PEEKUSR", Const, 0, ""},
+		{"PTRACE_POKEDATA", Const, 0, ""},
+		{"PTRACE_POKETEXT", Const, 0, ""},
+		{"PTRACE_POKEUSR", Const, 0, ""},
+		{"PTRACE_SETCRUNCHREGS", Const, 0, ""},
+		{"PTRACE_SETFPREGS", Const, 0, ""},
+		{"PTRACE_SETFPXREGS", Const, 0, ""},
+		{"PTRACE_SETHBPREGS", Const, 0, ""},
+		{"PTRACE_SETOPTIONS", Const, 0, ""},
+		{"PTRACE_SETREGS", Const, 0, ""},
+		{"PTRACE_SETREGSET", Const, 0, ""},
+		{"PTRACE_SETSIGINFO", Const, 0, ""},
+		{"PTRACE_SETVFPREGS", Const, 0, ""},
+		{"PTRACE_SETWMMXREGS", Const, 0, ""},
+		{"PTRACE_SET_SYSCALL", Const, 0, ""},
+		{"PTRACE_SET_THREAD_AREA", Const, 0, ""},
+		{"PTRACE_SINGLEBLOCK", Const, 0, ""},
+		{"PTRACE_SINGLESTEP", Const, 0, ""},
+		{"PTRACE_SYSCALL", Const, 0, ""},
+		{"PTRACE_SYSEMU", Const, 0, ""},
+		{"PTRACE_SYSEMU_SINGLESTEP", Const, 0, ""},
+		{"PTRACE_TRACEME", Const, 0, ""},
+		{"PT_ATTACH", Const, 0, ""},
+		{"PT_ATTACHEXC", Const, 0, ""},
+		{"PT_CONTINUE", Const, 0, ""},
+		{"PT_DATA_ADDR", Const, 0, ""},
+		{"PT_DENY_ATTACH", Const, 0, ""},
+		{"PT_DETACH", Const, 0, ""},
+		{"PT_FIRSTMACH", Const, 0, ""},
+		{"PT_FORCEQUOTA", Const, 0, ""},
+		{"PT_KILL", Const, 0, ""},
+		{"PT_MASK", Const, 1, ""},
+		{"PT_READ_D", Const, 0, ""},
+		{"PT_READ_I", Const, 0, ""},
+		{"PT_READ_U", Const, 0, ""},
+		{"PT_SIGEXC", Const, 0, ""},
+		{"PT_STEP", Const, 0, ""},
+		{"PT_TEXT_ADDR", Const, 0, ""},
+		{"PT_TEXT_END_ADDR", Const, 0, ""},
+		{"PT_THUPDATE", Const, 0, ""},
+		{"PT_TRACE_ME", Const, 0, ""},
+		{"PT_WRITE_D", Const, 0, ""},
+		{"PT_WRITE_I", Const, 0, ""},
+		{"PT_WRITE_U", Const, 0, ""},
+		{"ParseDirent", Func, 0, "func(buf []byte, max int, names []string) (consumed int, count int, newnames []string)"},
+		{"ParseNetlinkMessage", Func, 0, "func(b []byte) ([]NetlinkMessage, error)"},
+		{"ParseNetlinkRouteAttr", Func, 0, "func(m *NetlinkMessage) ([]NetlinkRouteAttr, error)"},
+		{"ParseRoutingMessage", Func, 0, ""},
+		{"ParseRoutingSockaddr", Func, 0, ""},
+		{"ParseSocketControlMessage", Func, 0, "func(b []byte) ([]SocketControlMessage, error)"},
+		{"ParseUnixCredentials", Func, 0, "func(m *SocketControlMessage) (*Ucred, error)"},
+		{"ParseUnixRights", Func, 0, "func(m *SocketControlMessage) ([]int, error)"},
+		{"PathMax", Const, 0, ""},
+		{"Pathconf", Func, 0, ""},
+		{"Pause", Func, 0, "func() (err error)"},
+		{"Pipe", Func, 0, "func(p []int) error"},
+		{"Pipe2", Func, 1, "func(p []int, flags int) error"},
+		{"PivotRoot", Func, 0, "func(newroot string, putold string) (err error)"},
+		{"Pointer", Type, 11, ""},
+		{"PostQueuedCompletionStatus", Func, 0, ""},
+		{"Pread", Func, 0, "func(fd int, p []byte, offset int64) (n int, err error)"},
+		{"Proc", Type, 0, ""},
+		{"Proc.Dll", Field, 0, ""},
+		{"Proc.Name", Field, 0, ""},
+		{"ProcAttr", Type, 0, ""},
+		{"ProcAttr.Dir", Field, 0, ""},
+		{"ProcAttr.Env", Field, 0, ""},
+		{"ProcAttr.Files", Field, 0, ""},
+		{"ProcAttr.Sys", Field, 0, ""},
+		{"Process32First", Func, 4, ""},
+		{"Process32Next", Func, 4, ""},
+		{"ProcessEntry32", Type, 4, ""},
+		{"ProcessEntry32.DefaultHeapID", Field, 4, ""},
+		{"ProcessEntry32.ExeFile", Field, 4, ""},
+		{"ProcessEntry32.Flags", Field, 4, ""},
+		{"ProcessEntry32.ModuleID", Field, 4, ""},
+		{"ProcessEntry32.ParentProcessID", Field, 4, ""},
+		{"ProcessEntry32.PriClassBase", Field, 4, ""},
+		{"ProcessEntry32.ProcessID", Field, 4, ""},
+		{"ProcessEntry32.Size", Field, 4, ""},
+		{"ProcessEntry32.Threads", Field, 4, ""},
+		{"ProcessEntry32.Usage", Field, 4, ""},
+		{"ProcessInformation", Type, 0, ""},
+		{"ProcessInformation.Process", Field, 0, ""},
+		{"ProcessInformation.ProcessId", Field, 0, ""},
+		{"ProcessInformation.Thread", Field, 0, ""},
+		{"ProcessInformation.ThreadId", Field, 0, ""},
+		{"Protoent", Type, 0, ""},
+		{"Protoent.Aliases", Field, 0, ""},
+		{"Protoent.Name", Field, 0, ""},
+		{"Protoent.Proto", Field, 0, ""},
+		{"PtraceAttach", Func, 0, "func(pid int) (err error)"},
+		{"PtraceCont", Func, 0, "func(pid int, signal int) (err error)"},
+		{"PtraceDetach", Func, 0, "func(pid int) (err error)"},
+		{"PtraceGetEventMsg", Func, 0, "func(pid int) (msg uint, err error)"},
+		{"PtraceGetRegs", Func, 0, "func(pid int, regsout *PtraceRegs) (err error)"},
+		{"PtracePeekData", Func, 0, "func(pid int, addr uintptr, out []byte) (count int, err error)"},
+		{"PtracePeekText", Func, 0, "func(pid int, addr uintptr, out []byte) (count int, err error)"},
+		{"PtracePokeData", Func, 0, "func(pid int, addr uintptr, data []byte) (count int, err error)"},
+		{"PtracePokeText", Func, 0, "func(pid int, addr uintptr, data []byte) (count int, err error)"},
+		{"PtraceRegs", Type, 0, ""},
+		{"PtraceRegs.Cs", Field, 0, ""},
+		{"PtraceRegs.Ds", Field, 0, ""},
+		{"PtraceRegs.Eax", Field, 0, ""},
+		{"PtraceRegs.Ebp", Field, 0, ""},
+		{"PtraceRegs.Ebx", Field, 0, ""},
+		{"PtraceRegs.Ecx", Field, 0, ""},
+		{"PtraceRegs.Edi", Field, 0, ""},
+		{"PtraceRegs.Edx", Field, 0, ""},
+		{"PtraceRegs.Eflags", Field, 0, ""},
+		{"PtraceRegs.Eip", Field, 0, ""},
+		{"PtraceRegs.Es", Field, 0, ""},
+		{"PtraceRegs.Esi", Field, 0, ""},
+		{"PtraceRegs.Esp", Field, 0, ""},
+		{"PtraceRegs.Fs", Field, 0, ""},
+		{"PtraceRegs.Fs_base", Field, 0, ""},
+		{"PtraceRegs.Gs", Field, 0, ""},
+		{"PtraceRegs.Gs_base", Field, 0, ""},
+		{"PtraceRegs.Orig_eax", Field, 0, ""},
+		{"PtraceRegs.Orig_rax", Field, 0, ""},
+		{"PtraceRegs.R10", Field, 0, ""},
+		{"PtraceRegs.R11", Field, 0, ""},
+		{"PtraceRegs.R12", Field, 0, ""},
+		{"PtraceRegs.R13", Field, 0, ""},
+		{"PtraceRegs.R14", Field, 0, ""},
+		{"PtraceRegs.R15", Field, 0, ""},
+		{"PtraceRegs.R8", Field, 0, ""},
+		{"PtraceRegs.R9", Field, 0, ""},
+		{"PtraceRegs.Rax", Field, 0, ""},
+		{"PtraceRegs.Rbp", Field, 0, ""},
+		{"PtraceRegs.Rbx", Field, 0, ""},
+		{"PtraceRegs.Rcx", Field, 0, ""},
+		{"PtraceRegs.Rdi", Field, 0, ""},
+		{"PtraceRegs.Rdx", Field, 0, ""},
+		{"PtraceRegs.Rip", Field, 0, ""},
+		{"PtraceRegs.Rsi", Field, 0, ""},
+		{"PtraceRegs.Rsp", Field, 0, ""},
+		{"PtraceRegs.Ss", Field, 0, ""},
+		{"PtraceRegs.Uregs", Field, 0, ""},
+		{"PtraceRegs.Xcs", Field, 0, ""},
+		{"PtraceRegs.Xds", Field, 0, ""},
+		{"PtraceRegs.Xes", Field, 0, ""},
+		{"PtraceRegs.Xfs", Field, 0, ""},
+		{"PtraceRegs.Xgs", Field, 0, ""},
+		{"PtraceRegs.Xss", Field, 0, ""},
+		{"PtraceSetOptions", Func, 0, "func(pid int, options int) (err error)"},
+		{"PtraceSetRegs", Func, 0, "func(pid int, regs *PtraceRegs) (err error)"},
+		{"PtraceSingleStep", Func, 0, "func(pid int) (err error)"},
+		{"PtraceSyscall", Func, 1, "func(pid int, signal int) (err error)"},
+		{"Pwrite", Func, 0, "func(fd int, p []byte, offset int64) (n int, err error)"},
+		{"REG_BINARY", Const, 0, ""},
+		{"REG_DWORD", Const, 0, ""},
+		{"REG_DWORD_BIG_ENDIAN", Const, 0, ""},
+		{"REG_DWORD_LITTLE_ENDIAN", Const, 0, ""},
+		{"REG_EXPAND_SZ", Const, 0, ""},
+		{"REG_FULL_RESOURCE_DESCRIPTOR", Const, 0, ""},
+		{"REG_LINK", Const, 0, ""},
+		{"REG_MULTI_SZ", Const, 0, ""},
+		{"REG_NONE", Const, 0, ""},
+		{"REG_QWORD", Const, 0, ""},
+		{"REG_QWORD_LITTLE_ENDIAN", Const, 0, ""},
+		{"REG_RESOURCE_LIST", Const, 0, ""},
+		{"REG_RESOURCE_REQUIREMENTS_LIST", Const, 0, ""},
+		{"REG_SZ", Const, 0, ""},
+		{"RLIMIT_AS", Const, 0, ""},
+		{"RLIMIT_CORE", Const, 0, ""},
+		{"RLIMIT_CPU", Const, 0, ""},
+		{"RLIMIT_CPU_USAGE_MONITOR", Const, 16, ""},
+		{"RLIMIT_DATA", Const, 0, ""},
+		{"RLIMIT_FSIZE", Const, 0, ""},
+		{"RLIMIT_NOFILE", Const, 0, ""},
+		{"RLIMIT_STACK", Const, 0, ""},
+		{"RLIM_INFINITY", Const, 0, ""},
+		{"RTAX_ADVMSS", Const, 0, ""},
+		{"RTAX_AUTHOR", Const, 0, ""},
+		{"RTAX_BRD", Const, 0, ""},
+		{"RTAX_CWND", Const, 0, ""},
+		{"RTAX_DST", Const, 0, ""},
+		{"RTAX_FEATURES", Const, 0, ""},
+		{"RTAX_FEATURE_ALLFRAG", Const, 0, ""},
+		{"RTAX_FEATURE_ECN", Const, 0, ""},
+		{"RTAX_FEATURE_SACK", Const, 0, ""},
+		{"RTAX_FEATURE_TIMESTAMP", Const, 0, ""},
+		{"RTAX_GATEWAY", Const, 0, ""},
+		{"RTAX_GENMASK", Const, 0, ""},
+		{"RTAX_HOPLIMIT", Const, 0, ""},
+		{"RTAX_IFA", Const, 0, ""},
+		{"RTAX_IFP", Const, 0, ""},
+		{"RTAX_INITCWND", Const, 0, ""},
+		{"RTAX_INITRWND", Const, 0, ""},
+		{"RTAX_LABEL", Const, 1, ""},
+		{"RTAX_LOCK", Const, 0, ""},
+		{"RTAX_MAX", Const, 0, ""},
+		{"RTAX_MTU", Const, 0, ""},
+		{"RTAX_NETMASK", Const, 0, ""},
+		{"RTAX_REORDERING", Const, 0, ""},
+		{"RTAX_RTO_MIN", Const, 0, ""},
+		{"RTAX_RTT", Const, 0, ""},
+		{"RTAX_RTTVAR", Const, 0, ""},
+		{"RTAX_SRC", Const, 1, ""},
+		{"RTAX_SRCMASK", Const, 1, ""},
+		{"RTAX_SSTHRESH", Const, 0, ""},
+		{"RTAX_TAG", Const, 1, ""},
+		{"RTAX_UNSPEC", Const, 0, ""},
+		{"RTAX_WINDOW", Const, 0, ""},
+		{"RTA_ALIGNTO", Const, 0, ""},
+		{"RTA_AUTHOR", Const, 0, ""},
+		{"RTA_BRD", Const, 0, ""},
+		{"RTA_CACHEINFO", Const, 0, ""},
+		{"RTA_DST", Const, 0, ""},
+		{"RTA_FLOW", Const, 0, ""},
+		{"RTA_GATEWAY", Const, 0, ""},
+		{"RTA_GENMASK", Const, 0, ""},
+		{"RTA_IFA", Const, 0, ""},
+		{"RTA_IFP", Const, 0, ""},
+		{"RTA_IIF", Const, 0, ""},
+		{"RTA_LABEL", Const, 1, ""},
+		{"RTA_MAX", Const, 0, ""},
+		{"RTA_METRICS", Const, 0, ""},
+		{"RTA_MULTIPATH", Const, 0, ""},
+		{"RTA_NETMASK", Const, 0, ""},
+		{"RTA_OIF", Const, 0, ""},
+		{"RTA_PREFSRC", Const, 0, ""},
+		{"RTA_PRIORITY", Const, 0, ""},
+		{"RTA_SRC", Const, 0, ""},
+		{"RTA_SRCMASK", Const, 1, ""},
+		{"RTA_TABLE", Const, 0, ""},
+		{"RTA_TAG", Const, 1, ""},
+		{"RTA_UNSPEC", Const, 0, ""},
+		{"RTCF_DIRECTSRC", Const, 0, ""},
+		{"RTCF_DOREDIRECT", Const, 0, ""},
+		{"RTCF_LOG", Const, 0, ""},
+		{"RTCF_MASQ", Const, 0, ""},
+		{"RTCF_NAT", Const, 0, ""},
+		{"RTCF_VALVE", Const, 0, ""},
+		{"RTF_ADDRCLASSMASK", Const, 0, ""},
+		{"RTF_ADDRCONF", Const, 0, ""},
+		{"RTF_ALLONLINK", Const, 0, ""},
+		{"RTF_ANNOUNCE", Const, 1, ""},
+		{"RTF_BLACKHOLE", Const, 0, ""},
+		{"RTF_BROADCAST", Const, 0, ""},
+		{"RTF_CACHE", Const, 0, ""},
+		{"RTF_CLONED", Const, 1, ""},
+		{"RTF_CLONING", Const, 0, ""},
+		{"RTF_CONDEMNED", Const, 0, ""},
+		{"RTF_DEFAULT", Const, 0, ""},
+		{"RTF_DELCLONE", Const, 0, ""},
+		{"RTF_DONE", Const, 0, ""},
+		{"RTF_DYNAMIC", Const, 0, ""},
+		{"RTF_FLOW", Const, 0, ""},
+		{"RTF_FMASK", Const, 0, ""},
+		{"RTF_GATEWAY", Const, 0, ""},
+		{"RTF_GWFLAG_COMPAT", Const, 3, ""},
+		{"RTF_HOST", Const, 0, ""},
+		{"RTF_IFREF", Const, 0, ""},
+		{"RTF_IFSCOPE", Const, 0, ""},
+		{"RTF_INTERFACE", Const, 0, ""},
+		{"RTF_IRTT", Const, 0, ""},
+		{"RTF_LINKRT", Const, 0, ""},
+		{"RTF_LLDATA", Const, 0, ""},
+		{"RTF_LLINFO", Const, 0, ""},
+		{"RTF_LOCAL", Const, 0, ""},
+		{"RTF_MASK", Const, 1, ""},
+		{"RTF_MODIFIED", Const, 0, ""},
+		{"RTF_MPATH", Const, 1, ""},
+		{"RTF_MPLS", Const, 1, ""},
+		{"RTF_MSS", Const, 0, ""},
+		{"RTF_MTU", Const, 0, ""},
+		{"RTF_MULTICAST", Const, 0, ""},
+		{"RTF_NAT", Const, 0, ""},
+		{"RTF_NOFORWARD", Const, 0, ""},
+		{"RTF_NONEXTHOP", Const, 0, ""},
+		{"RTF_NOPMTUDISC", Const, 0, ""},
+		{"RTF_PERMANENT_ARP", Const, 1, ""},
+		{"RTF_PINNED", Const, 0, ""},
+		{"RTF_POLICY", Const, 0, ""},
+		{"RTF_PRCLONING", Const, 0, ""},
+		{"RTF_PROTO1", Const, 0, ""},
+		{"RTF_PROTO2", Const, 0, ""},
+		{"RTF_PROTO3", Const, 0, ""},
+		{"RTF_PROXY", Const, 16, ""},
+		{"RTF_REINSTATE", Const, 0, ""},
+		{"RTF_REJECT", Const, 0, ""},
+		{"RTF_RNH_LOCKED", Const, 0, ""},
+		{"RTF_ROUTER", Const, 16, ""},
+		{"RTF_SOURCE", Const, 1, ""},
+		{"RTF_SRC", Const, 1, ""},
+		{"RTF_STATIC", Const, 0, ""},
+		{"RTF_STICKY", Const, 0, ""},
+		{"RTF_THROW", Const, 0, ""},
+		{"RTF_TUNNEL", Const, 1, ""},
+		{"RTF_UP", Const, 0, ""},
+		{"RTF_USETRAILERS", Const, 1, ""},
+		{"RTF_WASCLONED", Const, 0, ""},
+		{"RTF_WINDOW", Const, 0, ""},
+		{"RTF_XRESOLVE", Const, 0, ""},
+		{"RTM_ADD", Const, 0, ""},
+		{"RTM_BASE", Const, 0, ""},
+		{"RTM_CHANGE", Const, 0, ""},
+		{"RTM_CHGADDR", Const, 1, ""},
+		{"RTM_DELACTION", Const, 0, ""},
+		{"RTM_DELADDR", Const, 0, ""},
+		{"RTM_DELADDRLABEL", Const, 0, ""},
+		{"RTM_DELETE", Const, 0, ""},
+		{"RTM_DELLINK", Const, 0, ""},
+		{"RTM_DELMADDR", Const, 0, ""},
+		{"RTM_DELNEIGH", Const, 0, ""},
+		{"RTM_DELQDISC", Const, 0, ""},
+		{"RTM_DELROUTE", Const, 0, ""},
+		{"RTM_DELRULE", Const, 0, ""},
+		{"RTM_DELTCLASS", Const, 0, ""},
+		{"RTM_DELTFILTER", Const, 0, ""},
+		{"RTM_DESYNC", Const, 1, ""},
+		{"RTM_F_CLONED", Const, 0, ""},
+		{"RTM_F_EQUALIZE", Const, 0, ""},
+		{"RTM_F_NOTIFY", Const, 0, ""},
+		{"RTM_F_PREFIX", Const, 0, ""},
+		{"RTM_GET", Const, 0, ""},
+		{"RTM_GET2", Const, 0, ""},
+		{"RTM_GETACTION", Const, 0, ""},
+		{"RTM_GETADDR", Const, 0, ""},
+		{"RTM_GETADDRLABEL", Const, 0, ""},
+		{"RTM_GETANYCAST", Const, 0, ""},
+		{"RTM_GETDCB", Const, 0, ""},
+		{"RTM_GETLINK", Const, 0, ""},
+		{"RTM_GETMULTICAST", Const, 0, ""},
+		{"RTM_GETNEIGH", Const, 0, ""},
+		{"RTM_GETNEIGHTBL", Const, 0, ""},
+		{"RTM_GETQDISC", Const, 0, ""},
+		{"RTM_GETROUTE", Const, 0, ""},
+		{"RTM_GETRULE", Const, 0, ""},
+		{"RTM_GETTCLASS", Const, 0, ""},
+		{"RTM_GETTFILTER", Const, 0, ""},
+		{"RTM_IEEE80211", Const, 0, ""},
+		{"RTM_IFANNOUNCE", Const, 0, ""},
+		{"RTM_IFINFO", Const, 0, ""},
+		{"RTM_IFINFO2", Const, 0, ""},
+		{"RTM_LLINFO_UPD", Const, 1, ""},
+		{"RTM_LOCK", Const, 0, ""},
+		{"RTM_LOSING", Const, 0, ""},
+		{"RTM_MAX", Const, 0, ""},
+		{"RTM_MAXSIZE", Const, 1, ""},
+		{"RTM_MISS", Const, 0, ""},
+		{"RTM_NEWACTION", Const, 0, ""},
+		{"RTM_NEWADDR", Const, 0, ""},
+		{"RTM_NEWADDRLABEL", Const, 0, ""},
+		{"RTM_NEWLINK", Const, 0, ""},
+		{"RTM_NEWMADDR", Const, 0, ""},
+		{"RTM_NEWMADDR2", Const, 0, ""},
+		{"RTM_NEWNDUSEROPT", Const, 0, ""},
+		{"RTM_NEWNEIGH", Const, 0, ""},
+		{"RTM_NEWNEIGHTBL", Const, 0, ""},
+		{"RTM_NEWPREFIX", Const, 0, ""},
+		{"RTM_NEWQDISC", Const, 0, ""},
+		{"RTM_NEWROUTE", Const, 0, ""},
+		{"RTM_NEWRULE", Const, 0, ""},
+		{"RTM_NEWTCLASS", Const, 0, ""},
+		{"RTM_NEWTFILTER", Const, 0, ""},
+		{"RTM_NR_FAMILIES", Const, 0, ""},
+		{"RTM_NR_MSGTYPES", Const, 0, ""},
+		{"RTM_OIFINFO", Const, 1, ""},
+		{"RTM_OLDADD", Const, 0, ""},
+		{"RTM_OLDDEL", Const, 0, ""},
+		{"RTM_OOIFINFO", Const, 1, ""},
+		{"RTM_REDIRECT", Const, 0, ""},
+		{"RTM_RESOLVE", Const, 0, ""},
+		{"RTM_RTTUNIT", Const, 0, ""},
+		{"RTM_SETDCB", Const, 0, ""},
+		{"RTM_SETGATE", Const, 1, ""},
+		{"RTM_SETLINK", Const, 0, ""},
+		{"RTM_SETNEIGHTBL", Const, 0, ""},
+		{"RTM_VERSION", Const, 0, ""},
+		{"RTNH_ALIGNTO", Const, 0, ""},
+		{"RTNH_F_DEAD", Const, 0, ""},
+		{"RTNH_F_ONLINK", Const, 0, ""},
+		{"RTNH_F_PERVASIVE", Const, 0, ""},
+		{"RTNLGRP_IPV4_IFADDR", Const, 1, ""},
+		{"RTNLGRP_IPV4_MROUTE", Const, 1, ""},
+		{"RTNLGRP_IPV4_ROUTE", Const, 1, ""},
+		{"RTNLGRP_IPV4_RULE", Const, 1, ""},
+		{"RTNLGRP_IPV6_IFADDR", Const, 1, ""},
+		{"RTNLGRP_IPV6_IFINFO", Const, 1, ""},
+		{"RTNLGRP_IPV6_MROUTE", Const, 1, ""},
+		{"RTNLGRP_IPV6_PREFIX", Const, 1, ""},
+		{"RTNLGRP_IPV6_ROUTE", Const, 1, ""},
+		{"RTNLGRP_IPV6_RULE", Const, 1, ""},
+		{"RTNLGRP_LINK", Const, 1, ""},
+		{"RTNLGRP_ND_USEROPT", Const, 1, ""},
+		{"RTNLGRP_NEIGH", Const, 1, ""},
+		{"RTNLGRP_NONE", Const, 1, ""},
+		{"RTNLGRP_NOTIFY", Const, 1, ""},
+		{"RTNLGRP_TC", Const, 1, ""},
+		{"RTN_ANYCAST", Const, 0, ""},
+		{"RTN_BLACKHOLE", Const, 0, ""},
+		{"RTN_BROADCAST", Const, 0, ""},
+		{"RTN_LOCAL", Const, 0, ""},
+		{"RTN_MAX", Const, 0, ""},
+		{"RTN_MULTICAST", Const, 0, ""},
+		{"RTN_NAT", Const, 0, ""},
+		{"RTN_PROHIBIT", Const, 0, ""},
+		{"RTN_THROW", Const, 0, ""},
+		{"RTN_UNICAST", Const, 0, ""},
+		{"RTN_UNREACHABLE", Const, 0, ""},
+		{"RTN_UNSPEC", Const, 0, ""},
+		{"RTN_XRESOLVE", Const, 0, ""},
+		{"RTPROT_BIRD", Const, 0, ""},
+		{"RTPROT_BOOT", Const, 0, ""},
+		{"RTPROT_DHCP", Const, 0, ""},
+		{"RTPROT_DNROUTED", Const, 0, ""},
+		{"RTPROT_GATED", Const, 0, ""},
+		{"RTPROT_KERNEL", Const, 0, ""},
+		{"RTPROT_MRT", Const, 0, ""},
+		{"RTPROT_NTK", Const, 0, ""},
+		{"RTPROT_RA", Const, 0, ""},
+		{"RTPROT_REDIRECT", Const, 0, ""},
+		{"RTPROT_STATIC", Const, 0, ""},
+		{"RTPROT_UNSPEC", Const, 0, ""},
+		{"RTPROT_XORP", Const, 0, ""},
+		{"RTPROT_ZEBRA", Const, 0, ""},
+		{"RTV_EXPIRE", Const, 0, ""},
+		{"RTV_HOPCOUNT", Const, 0, ""},
+		{"RTV_MTU", Const, 0, ""},
+		{"RTV_RPIPE", Const, 0, ""},
+		{"RTV_RTT", Const, 0, ""},
+		{"RTV_RTTVAR", Const, 0, ""},
+		{"RTV_SPIPE", Const, 0, ""},
+		{"RTV_SSTHRESH", Const, 0, ""},
+		{"RTV_WEIGHT", Const, 0, ""},
+		{"RT_CACHING_CONTEXT", Const, 1, ""},
+		{"RT_CLASS_DEFAULT", Const, 0, ""},
+		{"RT_CLASS_LOCAL", Const, 0, ""},
+		{"RT_CLASS_MAIN", Const, 0, ""},
+		{"RT_CLASS_MAX", Const, 0, ""},
+		{"RT_CLASS_UNSPEC", Const, 0, ""},
+		{"RT_DEFAULT_FIB", Const, 1, ""},
+		{"RT_NORTREF", Const, 1, ""},
+		{"RT_SCOPE_HOST", Const, 0, ""},
+		{"RT_SCOPE_LINK", Const, 0, ""},
+		{"RT_SCOPE_NOWHERE", Const, 0, ""},
+		{"RT_SCOPE_SITE", Const, 0, ""},
+		{"RT_SCOPE_UNIVERSE", Const, 0, ""},
+		{"RT_TABLEID_MAX", Const, 1, ""},
+		{"RT_TABLE_COMPAT", Const, 0, ""},
+		{"RT_TABLE_DEFAULT", Const, 0, ""},
+		{"RT_TABLE_LOCAL", Const, 0, ""},
+		{"RT_TABLE_MAIN", Const, 0, ""},
+		{"RT_TABLE_MAX", Const, 0, ""},
+		{"RT_TABLE_UNSPEC", Const, 0, ""},
+		{"RUSAGE_CHILDREN", Const, 0, ""},
+		{"RUSAGE_SELF", Const, 0, ""},
+		{"RUSAGE_THREAD", Const, 0, ""},
+		{"Radvisory_t", Type, 0, ""},
+		{"Radvisory_t.Count", Field, 0, ""},
+		{"Radvisory_t.Offset", Field, 0, ""},
+		{"Radvisory_t.Pad_cgo_0", Field, 0, ""},
+		{"RawConn", Type, 9, ""},
+		{"RawSockaddr", Type, 0, ""},
+		{"RawSockaddr.Data", Field, 0, ""},
+		{"RawSockaddr.Family", Field, 0, ""},
+		{"RawSockaddr.Len", Field, 0, ""},
+		{"RawSockaddrAny", Type, 0, ""},
+		{"RawSockaddrAny.Addr", Field, 0, ""},
+		{"RawSockaddrAny.Pad", Field, 0, ""},
+		{"RawSockaddrDatalink", Type, 0, ""},
+		{"RawSockaddrDatalink.Alen", Field, 0, ""},
+		{"RawSockaddrDatalink.Data", Field, 0, ""},
+		{"RawSockaddrDatalink.Family", Field, 0, ""},
+		{"RawSockaddrDatalink.Index", Field, 0, ""},
+		{"RawSockaddrDatalink.Len", Field, 0, ""},
+		{"RawSockaddrDatalink.Nlen", Field, 0, ""},
+		{"RawSockaddrDatalink.Pad_cgo_0", Field, 2, ""},
+		{"RawSockaddrDatalink.Slen", Field, 0, ""},
+		{"RawSockaddrDatalink.Type", Field, 0, ""},
+		{"RawSockaddrInet4", Type, 0, ""},
+		{"RawSockaddrInet4.Addr", Field, 0, ""},
+		{"RawSockaddrInet4.Family", Field, 0, ""},
+		{"RawSockaddrInet4.Len", Field, 0, ""},
+		{"RawSockaddrInet4.Port", Field, 0, ""},
+		{"RawSockaddrInet4.Zero", Field, 0, ""},
+		{"RawSockaddrInet6", Type, 0, ""},
+		{"RawSockaddrInet6.Addr", Field, 0, ""},
+		{"RawSockaddrInet6.Family", Field, 0, ""},
+		{"RawSockaddrInet6.Flowinfo", Field, 0, ""},
+		{"RawSockaddrInet6.Len", Field, 0, ""},
+		{"RawSockaddrInet6.Port", Field, 0, ""},
+		{"RawSockaddrInet6.Scope_id", Field, 0, ""},
+		{"RawSockaddrLinklayer", Type, 0, ""},
+		{"RawSockaddrLinklayer.Addr", Field, 0, ""},
+		{"RawSockaddrLinklayer.Family", Field, 0, ""},
+		{"RawSockaddrLinklayer.Halen", Field, 0, ""},
+		{"RawSockaddrLinklayer.Hatype", Field, 0, ""},
+		{"RawSockaddrLinklayer.Ifindex", Field, 0, ""},
+		{"RawSockaddrLinklayer.Pkttype", Field, 0, ""},
+		{"RawSockaddrLinklayer.Protocol", Field, 0, ""},
+		{"RawSockaddrNetlink", Type, 0, ""},
+		{"RawSockaddrNetlink.Family", Field, 0, ""},
+		{"RawSockaddrNetlink.Groups", Field, 0, ""},
+		{"RawSockaddrNetlink.Pad", Field, 0, ""},
+		{"RawSockaddrNetlink.Pid", Field, 0, ""},
+		{"RawSockaddrUnix", Type, 0, ""},
+		{"RawSockaddrUnix.Family", Field, 0, ""},
+		{"RawSockaddrUnix.Len", Field, 0, ""},
+		{"RawSockaddrUnix.Pad_cgo_0", Field, 2, ""},
+		{"RawSockaddrUnix.Path", Field, 0, ""},
+		{"RawSyscall", Func, 0, "func(trap uintptr, a1 uintptr, a2 uintptr, a3 uintptr) (r1 uintptr, r2 uintptr, err Errno)"},
+		{"RawSyscall6", Func, 0, "func(trap uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) (r1 uintptr, r2 uintptr, err Errno)"},
+		{"Read", Func, 0, "func(fd int, p []byte) (n int, err error)"},
+		{"ReadConsole", Func, 1, ""},
+		{"ReadDirectoryChanges", Func, 0, ""},
+		{"ReadDirent", Func, 0, "func(fd int, buf []byte) (n int, err error)"},
+		{"ReadFile", Func, 0, ""},
+		{"Readlink", Func, 0, "func(path string, buf []byte) (n int, err error)"},
+		{"Reboot", Func, 0, "func(cmd int) (err error)"},
+		{"Recvfrom", Func, 0, "func(fd int, p []byte, flags int) (n int, from Sockaddr, err error)"},
+		{"Recvmsg", Func, 0, "func(fd int, p []byte, oob []byte, flags int) (n int, oobn int, recvflags int, from Sockaddr, err error)"},
+		{"RegCloseKey", Func, 0, ""},
+		{"RegEnumKeyEx", Func, 0, ""},
+		{"RegOpenKeyEx", Func, 0, ""},
+		{"RegQueryInfoKey", Func, 0, ""},
+		{"RegQueryValueEx", Func, 0, ""},
+		{"RemoveDirectory", Func, 0, ""},
+		{"Removexattr", Func, 1, "func(path string, attr string) (err error)"},
+		{"Rename", Func, 0, "func(oldpath string, newpath string) (err error)"},
+		{"Renameat", Func, 0, "func(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)"},
+		{"Revoke", Func, 0, ""},
+		{"Rlimit", Type, 0, ""},
+		{"Rlimit.Cur", Field, 0, ""},
+		{"Rlimit.Max", Field, 0, ""},
+		{"Rmdir", Func, 0, "func(path string) error"},
+		{"RouteMessage", Type, 0, ""},
+		{"RouteMessage.Data", Field, 0, ""},
+		{"RouteMessage.Header", Field, 0, ""},
+		{"RouteRIB", Func, 0, ""},
+		{"RoutingMessage", Type, 0, ""},
+		{"RtAttr", Type, 0, ""},
+		{"RtAttr.Len", Field, 0, ""},
+		{"RtAttr.Type", Field, 0, ""},
+		{"RtGenmsg", Type, 0, ""},
+		{"RtGenmsg.Family", Field, 0, ""},
+		{"RtMetrics", Type, 0, ""},
+		{"RtMetrics.Expire", Field, 0, ""},
+		{"RtMetrics.Filler", Field, 0, ""},
+		{"RtMetrics.Hopcount", Field, 0, ""},
+		{"RtMetrics.Locks", Field, 0, ""},
+		{"RtMetrics.Mtu", Field, 0, ""},
+		{"RtMetrics.Pad", Field, 3, ""},
+		{"RtMetrics.Pksent", Field, 0, ""},
+		{"RtMetrics.Recvpipe", Field, 0, ""},
+		{"RtMetrics.Refcnt", Field, 2, ""},
+		{"RtMetrics.Rtt", Field, 0, ""},
+		{"RtMetrics.Rttvar", Field, 0, ""},
+		{"RtMetrics.Sendpipe", Field, 0, ""},
+		{"RtMetrics.Ssthresh", Field, 0, ""},
+		{"RtMetrics.Weight", Field, 0, ""},
+		{"RtMsg", Type, 0, ""},
+		{"RtMsg.Dst_len", Field, 0, ""},
+		{"RtMsg.Family", Field, 0, ""},
+		{"RtMsg.Flags", Field, 0, ""},
+		{"RtMsg.Protocol", Field, 0, ""},
+		{"RtMsg.Scope", Field, 0, ""},
+		{"RtMsg.Src_len", Field, 0, ""},
+		{"RtMsg.Table", Field, 0, ""},
+		{"RtMsg.Tos", Field, 0, ""},
+		{"RtMsg.Type", Field, 0, ""},
+		{"RtMsghdr", Type, 0, ""},
+		{"RtMsghdr.Addrs", Field, 0, ""},
+		{"RtMsghdr.Errno", Field, 0, ""},
+		{"RtMsghdr.Flags", Field, 0, ""},
+		{"RtMsghdr.Fmask", Field, 0, ""},
+		{"RtMsghdr.Hdrlen", Field, 2, ""},
+		{"RtMsghdr.Index", Field, 0, ""},
+		{"RtMsghdr.Inits", Field, 0, ""},
+		{"RtMsghdr.Mpls", Field, 2, ""},
+		{"RtMsghdr.Msglen", Field, 0, ""},
+		{"RtMsghdr.Pad_cgo_0", Field, 0, ""},
+		{"RtMsghdr.Pad_cgo_1", Field, 2, ""},
+		{"RtMsghdr.Pid", Field, 0, ""},
+		{"RtMsghdr.Priority", Field, 2, ""},
+		{"RtMsghdr.Rmx", Field, 0, ""},
+		{"RtMsghdr.Seq", Field, 0, ""},
+		{"RtMsghdr.Tableid", Field, 2, ""},
+		{"RtMsghdr.Type", Field, 0, ""},
+		{"RtMsghdr.Use", Field, 0, ""},
+		{"RtMsghdr.Version", Field, 0, ""},
+		{"RtNexthop", Type, 0, ""},
+		{"RtNexthop.Flags", Field, 0, ""},
+		{"RtNexthop.Hops", Field, 0, ""},
+		{"RtNexthop.Ifindex", Field, 0, ""},
+		{"RtNexthop.Len", Field, 0, ""},
+		{"Rusage", Type, 0, ""},
+		{"Rusage.CreationTime", Field, 0, ""},
+		{"Rusage.ExitTime", Field, 0, ""},
+		{"Rusage.Idrss", Field, 0, ""},
+		{"Rusage.Inblock", Field, 0, ""},
+		{"Rusage.Isrss", Field, 0, ""},
+		{"Rusage.Ixrss", Field, 0, ""},
+		{"Rusage.KernelTime", Field, 0, ""},
+		{"Rusage.Majflt", Field, 0, ""},
+		{"Rusage.Maxrss", Field, 0, ""},
+		{"Rusage.Minflt", Field, 0, ""},
+		{"Rusage.Msgrcv", Field, 0, ""},
+		{"Rusage.Msgsnd", Field, 0, ""},
+		{"Rusage.Nivcsw", Field, 0, ""},
+		{"Rusage.Nsignals", Field, 0, ""},
+		{"Rusage.Nswap", Field, 0, ""},
+		{"Rusage.Nvcsw", Field, 0, ""},
+		{"Rusage.Oublock", Field, 0, ""},
+		{"Rusage.Stime", Field, 0, ""},
+		{"Rusage.UserTime", Field, 0, ""},
+		{"Rusage.Utime", Field, 0, ""},
+		{"SCM_BINTIME", Const, 0, ""},
+		{"SCM_CREDENTIALS", Const, 0, ""},
+		{"SCM_CREDS", Const, 0, ""},
+		{"SCM_RIGHTS", Const, 0, ""},
+		{"SCM_TIMESTAMP", Const, 0, ""},
+		{"SCM_TIMESTAMPING", Const, 0, ""},
+		{"SCM_TIMESTAMPNS", Const, 0, ""},
+		{"SCM_TIMESTAMP_MONOTONIC", Const, 0, ""},
+		{"SHUT_RD", Const, 0, ""},
+		{"SHUT_RDWR", Const, 0, ""},
+		{"SHUT_WR", Const, 0, ""},
+		{"SID", Type, 0, ""},
+		{"SIDAndAttributes", Type, 0, ""},
+		{"SIDAndAttributes.Attributes", Field, 0, ""},
+		{"SIDAndAttributes.Sid", Field, 0, ""},
+		{"SIGABRT", Const, 0, ""},
+		{"SIGALRM", Const, 0, ""},
+		{"SIGBUS", Const, 0, ""},
+		{"SIGCHLD", Const, 0, ""},
+		{"SIGCLD", Const, 0, ""},
+		{"SIGCONT", Const, 0, ""},
+		{"SIGEMT", Const, 0, ""},
+		{"SIGFPE", Const, 0, ""},
+		{"SIGHUP", Const, 0, ""},
+		{"SIGILL", Const, 0, ""},
+		{"SIGINFO", Const, 0, ""},
+		{"SIGINT", Const, 0, ""},
+		{"SIGIO", Const, 0, ""},
+		{"SIGIOT", Const, 0, ""},
+		{"SIGKILL", Const, 0, ""},
+		{"SIGLIBRT", Const, 1, ""},
+		{"SIGLWP", Const, 0, ""},
+		{"SIGPIPE", Const, 0, ""},
+		{"SIGPOLL", Const, 0, ""},
+		{"SIGPROF", Const, 0, ""},
+		{"SIGPWR", Const, 0, ""},
+		{"SIGQUIT", Const, 0, ""},
+		{"SIGSEGV", Const, 0, ""},
+		{"SIGSTKFLT", Const, 0, ""},
+		{"SIGSTOP", Const, 0, ""},
+		{"SIGSYS", Const, 0, ""},
+		{"SIGTERM", Const, 0, ""},
+		{"SIGTHR", Const, 0, ""},
+		{"SIGTRAP", Const, 0, ""},
+		{"SIGTSTP", Const, 0, ""},
+		{"SIGTTIN", Const, 0, ""},
+		{"SIGTTOU", Const, 0, ""},
+		{"SIGUNUSED", Const, 0, ""},
+		{"SIGURG", Const, 0, ""},
+		{"SIGUSR1", Const, 0, ""},
+		{"SIGUSR2", Const, 0, ""},
+		{"SIGVTALRM", Const, 0, ""},
+		{"SIGWINCH", Const, 0, ""},
+		{"SIGXCPU", Const, 0, ""},
+		{"SIGXFSZ", Const, 0, ""},
+		{"SIOCADDDLCI", Const, 0, ""},
+		{"SIOCADDMULTI", Const, 0, ""},
+		{"SIOCADDRT", Const, 0, ""},
+		{"SIOCAIFADDR", Const, 0, ""},
+		{"SIOCAIFGROUP", Const, 0, ""},
+		{"SIOCALIFADDR", Const, 0, ""},
+		{"SIOCARPIPLL", Const, 0, ""},
+		{"SIOCATMARK", Const, 0, ""},
+		{"SIOCAUTOADDR", Const, 0, ""},
+		{"SIOCAUTONETMASK", Const, 0, ""},
+		{"SIOCBRDGADD", Const, 1, ""},
+		{"SIOCBRDGADDS", Const, 1, ""},
+		{"SIOCBRDGARL", Const, 1, ""},
+		{"SIOCBRDGDADDR", Const, 1, ""},
+		{"SIOCBRDGDEL", Const, 1, ""},
+		{"SIOCBRDGDELS", Const, 1, ""},
+		{"SIOCBRDGFLUSH", Const, 1, ""},
+		{"SIOCBRDGFRL", Const, 1, ""},
+		{"SIOCBRDGGCACHE", Const, 1, ""},
+		{"SIOCBRDGGFD", Const, 1, ""},
+		{"SIOCBRDGGHT", Const, 1, ""},
+		{"SIOCBRDGGIFFLGS", Const, 1, ""},
+		{"SIOCBRDGGMA", Const, 1, ""},
+		{"SIOCBRDGGPARAM", Const, 1, ""},
+		{"SIOCBRDGGPRI", Const, 1, ""},
+		{"SIOCBRDGGRL", Const, 1, ""},
+		{"SIOCBRDGGSIFS", Const, 1, ""},
+		{"SIOCBRDGGTO", Const, 1, ""},
+		{"SIOCBRDGIFS", Const, 1, ""},
+		{"SIOCBRDGRTS", Const, 1, ""},
+		{"SIOCBRDGSADDR", Const, 1, ""},
+		{"SIOCBRDGSCACHE", Const, 1, ""},
+		{"SIOCBRDGSFD", Const, 1, ""},
+		{"SIOCBRDGSHT", Const, 1, ""},
+		{"SIOCBRDGSIFCOST", Const, 1, ""},
+		{"SIOCBRDGSIFFLGS", Const, 1, ""},
+		{"SIOCBRDGSIFPRIO", Const, 1, ""},
+		{"SIOCBRDGSMA", Const, 1, ""},
+		{"SIOCBRDGSPRI", Const, 1, ""},
+		{"SIOCBRDGSPROTO", Const, 1, ""},
+		{"SIOCBRDGSTO", Const, 1, ""},
+		{"SIOCBRDGSTXHC", Const, 1, ""},
+		{"SIOCDARP", Const, 0, ""},
+		{"SIOCDELDLCI", Const, 0, ""},
+		{"SIOCDELMULTI", Const, 0, ""},
+		{"SIOCDELRT", Const, 0, ""},
+		{"SIOCDEVPRIVATE", Const, 0, ""},
+		{"SIOCDIFADDR", Const, 0, ""},
+		{"SIOCDIFGROUP", Const, 0, ""},
+		{"SIOCDIFPHYADDR", Const, 0, ""},
+		{"SIOCDLIFADDR", Const, 0, ""},
+		{"SIOCDRARP", Const, 0, ""},
+		{"SIOCGARP", Const, 0, ""},
+		{"SIOCGDRVSPEC", Const, 0, ""},
+		{"SIOCGETKALIVE", Const, 1, ""},
+		{"SIOCGETLABEL", Const, 1, ""},
+		{"SIOCGETPFLOW", Const, 1, ""},
+		{"SIOCGETPFSYNC", Const, 1, ""},
+		{"SIOCGETSGCNT", Const, 0, ""},
+		{"SIOCGETVIFCNT", Const, 0, ""},
+		{"SIOCGETVLAN", Const, 0, ""},
+		{"SIOCGHIWAT", Const, 0, ""},
+		{"SIOCGIFADDR", Const, 0, ""},
+		{"SIOCGIFADDRPREF", Const, 1, ""},
+		{"SIOCGIFALIAS", Const, 1, ""},
+		{"SIOCGIFALTMTU", Const, 0, ""},
+		{"SIOCGIFASYNCMAP", Const, 0, ""},
+		{"SIOCGIFBOND", Const, 0, ""},
+		{"SIOCGIFBR", Const, 0, ""},
+		{"SIOCGIFBRDADDR", Const, 0, ""},
+		{"SIOCGIFCAP", Const, 0, ""},
+		{"SIOCGIFCONF", Const, 0, ""},
+		{"SIOCGIFCOUNT", Const, 0, ""},
+		{"SIOCGIFDATA", Const, 1, ""},
+		{"SIOCGIFDESCR", Const, 0, ""},
+		{"SIOCGIFDEVMTU", Const, 0, ""},
+		{"SIOCGIFDLT", Const, 1, ""},
+		{"SIOCGIFDSTADDR", Const, 0, ""},
+		{"SIOCGIFENCAP", Const, 0, ""},
+		{"SIOCGIFFIB", Const, 1, ""},
+		{"SIOCGIFFLAGS", Const, 0, ""},
+		{"SIOCGIFGATTR", Const, 1, ""},
+		{"SIOCGIFGENERIC", Const, 0, ""},
+		{"SIOCGIFGMEMB", Const, 0, ""},
+		{"SIOCGIFGROUP", Const, 0, ""},
+		{"SIOCGIFHARDMTU", Const, 3, ""},
+		{"SIOCGIFHWADDR", Const, 0, ""},
+		{"SIOCGIFINDEX", Const, 0, ""},
+		{"SIOCGIFKPI", Const, 0, ""},
+		{"SIOCGIFMAC", Const, 0, ""},
+		{"SIOCGIFMAP", Const, 0, ""},
+		{"SIOCGIFMEDIA", Const, 0, ""},
+		{"SIOCGIFMEM", Const, 0, ""},
+		{"SIOCGIFMETRIC", Const, 0, ""},
+		{"SIOCGIFMTU", Const, 0, ""},
+		{"SIOCGIFNAME", Const, 0, ""},
+		{"SIOCGIFNETMASK", Const, 0, ""},
+		{"SIOCGIFPDSTADDR", Const, 0, ""},
+		{"SIOCGIFPFLAGS", Const, 0, ""},
+		{"SIOCGIFPHYS", Const, 0, ""},
+		{"SIOCGIFPRIORITY", Const, 1, ""},
+		{"SIOCGIFPSRCADDR", Const, 0, ""},
+		{"SIOCGIFRDOMAIN", Const, 1, ""},
+		{"SIOCGIFRTLABEL", Const, 1, ""},
+		{"SIOCGIFSLAVE", Const, 0, ""},
+		{"SIOCGIFSTATUS", Const, 0, ""},
+		{"SIOCGIFTIMESLOT", Const, 1, ""},
+		{"SIOCGIFTXQLEN", Const, 0, ""},
+		{"SIOCGIFVLAN", Const, 0, ""},
+		{"SIOCGIFWAKEFLAGS", Const, 0, ""},
+		{"SIOCGIFXFLAGS", Const, 1, ""},
+		{"SIOCGLIFADDR", Const, 0, ""},
+		{"SIOCGLIFPHYADDR", Const, 0, ""},
+		{"SIOCGLIFPHYRTABLE", Const, 1, ""},
+		{"SIOCGLIFPHYTTL", Const, 3, ""},
+		{"SIOCGLINKSTR", Const, 1, ""},
+		{"SIOCGLOWAT", Const, 0, ""},
+		{"SIOCGPGRP", Const, 0, ""},
+		{"SIOCGPRIVATE_0", Const, 0, ""},
+		{"SIOCGPRIVATE_1", Const, 0, ""},
+		{"SIOCGRARP", Const, 0, ""},
+		{"SIOCGSPPPPARAMS", Const, 3, ""},
+		{"SIOCGSTAMP", Const, 0, ""},
+		{"SIOCGSTAMPNS", Const, 0, ""},
+		{"SIOCGVH", Const, 1, ""},
+		{"SIOCGVNETID", Const, 3, ""},
+		{"SIOCIFCREATE", Const, 0, ""},
+		{"SIOCIFCREATE2", Const, 0, ""},
+		{"SIOCIFDESTROY", Const, 0, ""},
+		{"SIOCIFGCLONERS", Const, 0, ""},
+		{"SIOCINITIFADDR", Const, 1, ""},
+		{"SIOCPROTOPRIVATE", Const, 0, ""},
+		{"SIOCRSLVMULTI", Const, 0, ""},
+		{"SIOCRTMSG", Const, 0, ""},
+		{"SIOCSARP", Const, 0, ""},
+		{"SIOCSDRVSPEC", Const, 0, ""},
+		{"SIOCSETKALIVE", Const, 1, ""},
+		{"SIOCSETLABEL", Const, 1, ""},
+		{"SIOCSETPFLOW", Const, 1, ""},
+		{"SIOCSETPFSYNC", Const, 1, ""},
+		{"SIOCSETVLAN", Const, 0, ""},
+		{"SIOCSHIWAT", Const, 0, ""},
+		{"SIOCSIFADDR", Const, 0, ""},
+		{"SIOCSIFADDRPREF", Const, 1, ""},
+		{"SIOCSIFALTMTU", Const, 0, ""},
+		{"SIOCSIFASYNCMAP", Const, 0, ""},
+		{"SIOCSIFBOND", Const, 0, ""},
+		{"SIOCSIFBR", Const, 0, ""},
+		{"SIOCSIFBRDADDR", Const, 0, ""},
+		{"SIOCSIFCAP", Const, 0, ""},
+		{"SIOCSIFDESCR", Const, 0, ""},
+		{"SIOCSIFDSTADDR", Const, 0, ""},
+		{"SIOCSIFENCAP", Const, 0, ""},
+		{"SIOCSIFFIB", Const, 1, ""},
+		{"SIOCSIFFLAGS", Const, 0, ""},
+		{"SIOCSIFGATTR", Const, 1, ""},
+		{"SIOCSIFGENERIC", Const, 0, ""},
+		{"SIOCSIFHWADDR", Const, 0, ""},
+		{"SIOCSIFHWBROADCAST", Const, 0, ""},
+		{"SIOCSIFKPI", Const, 0, ""},
+		{"SIOCSIFLINK", Const, 0, ""},
+		{"SIOCSIFLLADDR", Const, 0, ""},
+		{"SIOCSIFMAC", Const, 0, ""},
+		{"SIOCSIFMAP", Const, 0, ""},
+		{"SIOCSIFMEDIA", Const, 0, ""},
+		{"SIOCSIFMEM", Const, 0, ""},
+		{"SIOCSIFMETRIC", Const, 0, ""},
+		{"SIOCSIFMTU", Const, 0, ""},
+		{"SIOCSIFNAME", Const, 0, ""},
+		{"SIOCSIFNETMASK", Const, 0, ""},
+		{"SIOCSIFPFLAGS", Const, 0, ""},
+		{"SIOCSIFPHYADDR", Const, 0, ""},
+		{"SIOCSIFPHYS", Const, 0, ""},
+		{"SIOCSIFPRIORITY", Const, 1, ""},
+		{"SIOCSIFRDOMAIN", Const, 1, ""},
+		{"SIOCSIFRTLABEL", Const, 1, ""},
+		{"SIOCSIFRVNET", Const, 0, ""},
+		{"SIOCSIFSLAVE", Const, 0, ""},
+		{"SIOCSIFTIMESLOT", Const, 1, ""},
+		{"SIOCSIFTXQLEN", Const, 0, ""},
+		{"SIOCSIFVLAN", Const, 0, ""},
+		{"SIOCSIFVNET", Const, 0, ""},
+		{"SIOCSIFXFLAGS", Const, 1, ""},
+		{"SIOCSLIFPHYADDR", Const, 0, ""},
+		{"SIOCSLIFPHYRTABLE", Const, 1, ""},
+		{"SIOCSLIFPHYTTL", Const, 3, ""},
+		{"SIOCSLINKSTR", Const, 1, ""},
+		{"SIOCSLOWAT", Const, 0, ""},
+		{"SIOCSPGRP", Const, 0, ""},
+		{"SIOCSRARP", Const, 0, ""},
+		{"SIOCSSPPPPARAMS", Const, 3, ""},
+		{"SIOCSVH", Const, 1, ""},
+		{"SIOCSVNETID", Const, 3, ""},
+		{"SIOCZIFDATA", Const, 1, ""},
+		{"SIO_GET_EXTENSION_FUNCTION_POINTER", Const, 1, ""},
+		{"SIO_GET_INTERFACE_LIST", Const, 0, ""},
+		{"SIO_KEEPALIVE_VALS", Const, 3, ""},
+		{"SIO_UDP_CONNRESET", Const, 4, ""},
+		{"SOCK_CLOEXEC", Const, 0, ""},
+		{"SOCK_DCCP", Const, 0, ""},
+		{"SOCK_DGRAM", Const, 0, ""},
+		{"SOCK_FLAGS_MASK", Const, 1, ""},
+		{"SOCK_MAXADDRLEN", Const, 0, ""},
+		{"SOCK_NONBLOCK", Const, 0, ""},
+		{"SOCK_NOSIGPIPE", Const, 1, ""},
+		{"SOCK_PACKET", Const, 0, ""},
+		{"SOCK_RAW", Const, 0, ""},
+		{"SOCK_RDM", Const, 0, ""},
+		{"SOCK_SEQPACKET", Const, 0, ""},
+		{"SOCK_STREAM", Const, 0, ""},
+		{"SOL_AAL", Const, 0, ""},
+		{"SOL_ATM", Const, 0, ""},
+		{"SOL_DECNET", Const, 0, ""},
+		{"SOL_ICMPV6", Const, 0, ""},
+		{"SOL_IP", Const, 0, ""},
+		{"SOL_IPV6", Const, 0, ""},
+		{"SOL_IRDA", Const, 0, ""},
+		{"SOL_PACKET", Const, 0, ""},
+		{"SOL_RAW", Const, 0, ""},
+		{"SOL_SOCKET", Const, 0, ""},
+		{"SOL_TCP", Const, 0, ""},
+		{"SOL_X25", Const, 0, ""},
+		{"SOMAXCONN", Const, 0, ""},
+		{"SO_ACCEPTCONN", Const, 0, ""},
+		{"SO_ACCEPTFILTER", Const, 0, ""},
+		{"SO_ATTACH_FILTER", Const, 0, ""},
+		{"SO_BINDANY", Const, 1, ""},
+		{"SO_BINDTODEVICE", Const, 0, ""},
+		{"SO_BINTIME", Const, 0, ""},
+		{"SO_BROADCAST", Const, 0, ""},
+		{"SO_BSDCOMPAT", Const, 0, ""},
+		{"SO_DEBUG", Const, 0, ""},
+		{"SO_DETACH_FILTER", Const, 0, ""},
+		{"SO_DOMAIN", Const, 0, ""},
+		{"SO_DONTROUTE", Const, 0, ""},
+		{"SO_DONTTRUNC", Const, 0, ""},
+		{"SO_ERROR", Const, 0, ""},
+		{"SO_KEEPALIVE", Const, 0, ""},
+		{"SO_LABEL", Const, 0, ""},
+		{"SO_LINGER", Const, 0, ""},
+		{"SO_LINGER_SEC", Const, 0, ""},
+		{"SO_LISTENINCQLEN", Const, 0, ""},
+		{"SO_LISTENQLEN", Const, 0, ""},
+		{"SO_LISTENQLIMIT", Const, 0, ""},
+		{"SO_MARK", Const, 0, ""},
+		{"SO_NETPROC", Const, 1, ""},
+		{"SO_NKE", Const, 0, ""},
+		{"SO_NOADDRERR", Const, 0, ""},
+		{"SO_NOHEADER", Const, 1, ""},
+		{"SO_NOSIGPIPE", Const, 0, ""},
+		{"SO_NOTIFYCONFLICT", Const, 0, ""},
+		{"SO_NO_CHECK", Const, 0, ""},
+		{"SO_NO_DDP", Const, 0, ""},
+		{"SO_NO_OFFLOAD", Const, 0, ""},
+		{"SO_NP_EXTENSIONS", Const, 0, ""},
+		{"SO_NREAD", Const, 0, ""},
+		{"SO_NUMRCVPKT", Const, 16, ""},
+		{"SO_NWRITE", Const, 0, ""},
+		{"SO_OOBINLINE", Const, 0, ""},
+		{"SO_OVERFLOWED", Const, 1, ""},
+		{"SO_PASSCRED", Const, 0, ""},
+		{"SO_PASSSEC", Const, 0, ""},
+		{"SO_PEERCRED", Const, 0, ""},
+		{"SO_PEERLABEL", Const, 0, ""},
+		{"SO_PEERNAME", Const, 0, ""},
+		{"SO_PEERSEC", Const, 0, ""},
+		{"SO_PRIORITY", Const, 0, ""},
+		{"SO_PROTOCOL", Const, 0, ""},
+		{"SO_PROTOTYPE", Const, 1, ""},
+		{"SO_RANDOMPORT", Const, 0, ""},
+		{"SO_RCVBUF", Const, 0, ""},
+		{"SO_RCVBUFFORCE", Const, 0, ""},
+		{"SO_RCVLOWAT", Const, 0, ""},
+		{"SO_RCVTIMEO", Const, 0, ""},
+		{"SO_RESTRICTIONS", Const, 0, ""},
+		{"SO_RESTRICT_DENYIN", Const, 0, ""},
+		{"SO_RESTRICT_DENYOUT", Const, 0, ""},
+		{"SO_RESTRICT_DENYSET", Const, 0, ""},
+		{"SO_REUSEADDR", Const, 0, ""},
+		{"SO_REUSEPORT", Const, 0, ""},
+		{"SO_REUSESHAREUID", Const, 0, ""},
+		{"SO_RTABLE", Const, 1, ""},
+		{"SO_RXQ_OVFL", Const, 0, ""},
+		{"SO_SECURITY_AUTHENTICATION", Const, 0, ""},
+		{"SO_SECURITY_ENCRYPTION_NETWORK", Const, 0, ""},
+		{"SO_SECURITY_ENCRYPTION_TRANSPORT", Const, 0, ""},
+		{"SO_SETFIB", Const, 0, ""},
+		{"SO_SNDBUF", Const, 0, ""},
+		{"SO_SNDBUFFORCE", Const, 0, ""},
+		{"SO_SNDLOWAT", Const, 0, ""},
+		{"SO_SNDTIMEO", Const, 0, ""},
+		{"SO_SPLICE", Const, 1, ""},
+		{"SO_TIMESTAMP", Const, 0, ""},
+		{"SO_TIMESTAMPING", Const, 0, ""},
+		{"SO_TIMESTAMPNS", Const, 0, ""},
+		{"SO_TIMESTAMP_MONOTONIC", Const, 0, ""},
+		{"SO_TYPE", Const, 0, ""},
+		{"SO_UPCALLCLOSEWAIT", Const, 0, ""},
+		{"SO_UPDATE_ACCEPT_CONTEXT", Const, 0, ""},
+		{"SO_UPDATE_CONNECT_CONTEXT", Const, 1, ""},
+		{"SO_USELOOPBACK", Const, 0, ""},
+		{"SO_USER_COOKIE", Const, 1, ""},
+		{"SO_VENDOR", Const, 3, ""},
+		{"SO_WANTMORE", Const, 0, ""},
+		{"SO_WANTOOBFLAG", Const, 0, ""},
+		{"SSLExtraCertChainPolicyPara", Type, 0, ""},
+		{"SSLExtraCertChainPolicyPara.AuthType", Field, 0, ""},
+		{"SSLExtraCertChainPolicyPara.Checks", Field, 0, ""},
+		{"SSLExtraCertChainPolicyPara.ServerName", Field, 0, ""},
+		{"SSLExtraCertChainPolicyPara.Size", Field, 0, ""},
+		{"STANDARD_RIGHTS_ALL", Const, 0, ""},
+		{"STANDARD_RIGHTS_EXECUTE", Const, 0, ""},
+		{"STANDARD_RIGHTS_READ", Const, 0, ""},
+		{"STANDARD_RIGHTS_REQUIRED", Const, 0, ""},
+		{"STANDARD_RIGHTS_WRITE", Const, 0, ""},
+		{"STARTF_USESHOWWINDOW", Const, 0, ""},
+		{"STARTF_USESTDHANDLES", Const, 0, ""},
+		{"STD_ERROR_HANDLE", Const, 0, ""},
+		{"STD_INPUT_HANDLE", Const, 0, ""},
+		{"STD_OUTPUT_HANDLE", Const, 0, ""},
+		{"SUBLANG_ENGLISH_US", Const, 0, ""},
+		{"SW_FORCEMINIMIZE", Const, 0, ""},
+		{"SW_HIDE", Const, 0, ""},
+		{"SW_MAXIMIZE", Const, 0, ""},
+		{"SW_MINIMIZE", Const, 0, ""},
+		{"SW_NORMAL", Const, 0, ""},
+		{"SW_RESTORE", Const, 0, ""},
+		{"SW_SHOW", Const, 0, ""},
+		{"SW_SHOWDEFAULT", Const, 0, ""},
+		{"SW_SHOWMAXIMIZED", Const, 0, ""},
+		{"SW_SHOWMINIMIZED", Const, 0, ""},
+		{"SW_SHOWMINNOACTIVE", Const, 0, ""},
+		{"SW_SHOWNA", Const, 0, ""},
+		{"SW_SHOWNOACTIVATE", Const, 0, ""},
+		{"SW_SHOWNORMAL", Const, 0, ""},
+		{"SYMBOLIC_LINK_FLAG_DIRECTORY", Const, 4, ""},
+		{"SYNCHRONIZE", Const, 0, ""},
+		{"SYSCTL_VERSION", Const, 1, ""},
+		{"SYSCTL_VERS_0", Const, 1, ""},
+		{"SYSCTL_VERS_1", Const, 1, ""},
+		{"SYSCTL_VERS_MASK", Const, 1, ""},
+		{"SYS_ABORT2", Const, 0, ""},
+		{"SYS_ACCEPT", Const, 0, ""},
+		{"SYS_ACCEPT4", Const, 0, ""},
+		{"SYS_ACCEPT_NOCANCEL", Const, 0, ""},
+		{"SYS_ACCESS", Const, 0, ""},
+		{"SYS_ACCESS_EXTENDED", Const, 0, ""},
+		{"SYS_ACCT", Const, 0, ""},
+		{"SYS_ADD_KEY", Const, 0, ""},
+		{"SYS_ADD_PROFIL", Const, 0, ""},
+		{"SYS_ADJFREQ", Const, 1, ""},
+		{"SYS_ADJTIME", Const, 0, ""},
+		{"SYS_ADJTIMEX", Const, 0, ""},
+		{"SYS_AFS_SYSCALL", Const, 0, ""},
+		{"SYS_AIO_CANCEL", Const, 0, ""},
+		{"SYS_AIO_ERROR", Const, 0, ""},
+		{"SYS_AIO_FSYNC", Const, 0, ""},
+		{"SYS_AIO_MLOCK", Const, 14, ""},
+		{"SYS_AIO_READ", Const, 0, ""},
+		{"SYS_AIO_RETURN", Const, 0, ""},
+		{"SYS_AIO_SUSPEND", Const, 0, ""},
+		{"SYS_AIO_SUSPEND_NOCANCEL", Const, 0, ""},
+		{"SYS_AIO_WAITCOMPLETE", Const, 14, ""},
+		{"SYS_AIO_WRITE", Const, 0, ""},
+		{"SYS_ALARM", Const, 0, ""},
+		{"SYS_ARCH_PRCTL", Const, 0, ""},
+		{"SYS_ARM_FADVISE64_64", Const, 0, ""},
+		{"SYS_ARM_SYNC_FILE_RANGE", Const, 0, ""},
+		{"SYS_ATGETMSG", Const, 0, ""},
+		{"SYS_ATPGETREQ", Const, 0, ""},
+		{"SYS_ATPGETRSP", Const, 0, ""},
+		{"SYS_ATPSNDREQ", Const, 0, ""},
+		{"SYS_ATPSNDRSP", Const, 0, ""},
+		{"SYS_ATPUTMSG", Const, 0, ""},
+		{"SYS_ATSOCKET", Const, 0, ""},
+		{"SYS_AUDIT", Const, 0, ""},
+		{"SYS_AUDITCTL", Const, 0, ""},
+		{"SYS_AUDITON", Const, 0, ""},
+		{"SYS_AUDIT_SESSION_JOIN", Const, 0, ""},
+		{"SYS_AUDIT_SESSION_PORT", Const, 0, ""},
+		{"SYS_AUDIT_SESSION_SELF", Const, 0, ""},
+		{"SYS_BDFLUSH", Const, 0, ""},
+		{"SYS_BIND", Const, 0, ""},
+		{"SYS_BINDAT", Const, 3, ""},
+		{"SYS_BREAK", Const, 0, ""},
+		{"SYS_BRK", Const, 0, ""},
+		{"SYS_BSDTHREAD_CREATE", Const, 0, ""},
+		{"SYS_BSDTHREAD_REGISTER", Const, 0, ""},
+		{"SYS_BSDTHREAD_TERMINATE", Const, 0, ""},
+		{"SYS_CAPGET", Const, 0, ""},
+		{"SYS_CAPSET", Const, 0, ""},
+		{"SYS_CAP_ENTER", Const, 0, ""},
+		{"SYS_CAP_FCNTLS_GET", Const, 1, ""},
+		{"SYS_CAP_FCNTLS_LIMIT", Const, 1, ""},
+		{"SYS_CAP_GETMODE", Const, 0, ""},
+		{"SYS_CAP_GETRIGHTS", Const, 0, ""},
+		{"SYS_CAP_IOCTLS_GET", Const, 1, ""},
+		{"SYS_CAP_IOCTLS_LIMIT", Const, 1, ""},
+		{"SYS_CAP_NEW", Const, 0, ""},
+		{"SYS_CAP_RIGHTS_GET", Const, 1, ""},
+		{"SYS_CAP_RIGHTS_LIMIT", Const, 1, ""},
+		{"SYS_CHDIR", Const, 0, ""},
+		{"SYS_CHFLAGS", Const, 0, ""},
+		{"SYS_CHFLAGSAT", Const, 3, ""},
+		{"SYS_CHMOD", Const, 0, ""},
+		{"SYS_CHMOD_EXTENDED", Const, 0, ""},
+		{"SYS_CHOWN", Const, 0, ""},
+		{"SYS_CHOWN32", Const, 0, ""},
+		{"SYS_CHROOT", Const, 0, ""},
+		{"SYS_CHUD", Const, 0, ""},
+		{"SYS_CLOCK_ADJTIME", Const, 0, ""},
+		{"SYS_CLOCK_GETCPUCLOCKID2", Const, 1, ""},
+		{"SYS_CLOCK_GETRES", Const, 0, ""},
+		{"SYS_CLOCK_GETTIME", Const, 0, ""},
+		{"SYS_CLOCK_NANOSLEEP", Const, 0, ""},
+		{"SYS_CLOCK_SETTIME", Const, 0, ""},
+		{"SYS_CLONE", Const, 0, ""},
+		{"SYS_CLOSE", Const, 0, ""},
+		{"SYS_CLOSEFROM", Const, 0, ""},
+		{"SYS_CLOSE_NOCANCEL", Const, 0, ""},
+		{"SYS_CONNECT", Const, 0, ""},
+		{"SYS_CONNECTAT", Const, 3, ""},
+		{"SYS_CONNECT_NOCANCEL", Const, 0, ""},
+		{"SYS_COPYFILE", Const, 0, ""},
+		{"SYS_CPUSET", Const, 0, ""},
+		{"SYS_CPUSET_GETAFFINITY", Const, 0, ""},
+		{"SYS_CPUSET_GETID", Const, 0, ""},
+		{"SYS_CPUSET_SETAFFINITY", Const, 0, ""},
+		{"SYS_CPUSET_SETID", Const, 0, ""},
+		{"SYS_CREAT", Const, 0, ""},
+		{"SYS_CREATE_MODULE", Const, 0, ""},
+		{"SYS_CSOPS", Const, 0, ""},
+		{"SYS_CSOPS_AUDITTOKEN", Const, 16, ""},
+		{"SYS_DELETE", Const, 0, ""},
+		{"SYS_DELETE_MODULE", Const, 0, ""},
+		{"SYS_DUP", Const, 0, ""},
+		{"SYS_DUP2", Const, 0, ""},
+		{"SYS_DUP3", Const, 0, ""},
+		{"SYS_EACCESS", Const, 0, ""},
+		{"SYS_EPOLL_CREATE", Const, 0, ""},
+		{"SYS_EPOLL_CREATE1", Const, 0, ""},
+		{"SYS_EPOLL_CTL", Const, 0, ""},
+		{"SYS_EPOLL_CTL_OLD", Const, 0, ""},
+		{"SYS_EPOLL_PWAIT", Const, 0, ""},
+		{"SYS_EPOLL_WAIT", Const, 0, ""},
+		{"SYS_EPOLL_WAIT_OLD", Const, 0, ""},
+		{"SYS_EVENTFD", Const, 0, ""},
+		{"SYS_EVENTFD2", Const, 0, ""},
+		{"SYS_EXCHANGEDATA", Const, 0, ""},
+		{"SYS_EXECVE", Const, 0, ""},
+		{"SYS_EXIT", Const, 0, ""},
+		{"SYS_EXIT_GROUP", Const, 0, ""},
+		{"SYS_EXTATTRCTL", Const, 0, ""},
+		{"SYS_EXTATTR_DELETE_FD", Const, 0, ""},
+		{"SYS_EXTATTR_DELETE_FILE", Const, 0, ""},
+		{"SYS_EXTATTR_DELETE_LINK", Const, 0, ""},
+		{"SYS_EXTATTR_GET_FD", Const, 0, ""},
+		{"SYS_EXTATTR_GET_FILE", Const, 0, ""},
+		{"SYS_EXTATTR_GET_LINK", Const, 0, ""},
+		{"SYS_EXTATTR_LIST_FD", Const, 0, ""},
+		{"SYS_EXTATTR_LIST_FILE", Const, 0, ""},
+		{"SYS_EXTATTR_LIST_LINK", Const, 0, ""},
+		{"SYS_EXTATTR_SET_FD", Const, 0, ""},
+		{"SYS_EXTATTR_SET_FILE", Const, 0, ""},
+		{"SYS_EXTATTR_SET_LINK", Const, 0, ""},
+		{"SYS_FACCESSAT", Const, 0, ""},
+		{"SYS_FADVISE64", Const, 0, ""},
+		{"SYS_FADVISE64_64", Const, 0, ""},
+		{"SYS_FALLOCATE", Const, 0, ""},
+		{"SYS_FANOTIFY_INIT", Const, 0, ""},
+		{"SYS_FANOTIFY_MARK", Const, 0, ""},
+		{"SYS_FCHDIR", Const, 0, ""},
+		{"SYS_FCHFLAGS", Const, 0, ""},
+		{"SYS_FCHMOD", Const, 0, ""},
+		{"SYS_FCHMODAT", Const, 0, ""},
+		{"SYS_FCHMOD_EXTENDED", Const, 0, ""},
+		{"SYS_FCHOWN", Const, 0, ""},
+		{"SYS_FCHOWN32", Const, 0, ""},
+		{"SYS_FCHOWNAT", Const, 0, ""},
+		{"SYS_FCHROOT", Const, 1, ""},
+		{"SYS_FCNTL", Const, 0, ""},
+		{"SYS_FCNTL64", Const, 0, ""},
+		{"SYS_FCNTL_NOCANCEL", Const, 0, ""},
+		{"SYS_FDATASYNC", Const, 0, ""},
+		{"SYS_FEXECVE", Const, 0, ""},
+		{"SYS_FFCLOCK_GETCOUNTER", Const, 0, ""},
+		{"SYS_FFCLOCK_GETESTIMATE", Const, 0, ""},
+		{"SYS_FFCLOCK_SETESTIMATE", Const, 0, ""},
+		{"SYS_FFSCTL", Const, 0, ""},
+		{"SYS_FGETATTRLIST", Const, 0, ""},
+		{"SYS_FGETXATTR", Const, 0, ""},
+		{"SYS_FHOPEN", Const, 0, ""},
+		{"SYS_FHSTAT", Const, 0, ""},
+		{"SYS_FHSTATFS", Const, 0, ""},
+		{"SYS_FILEPORT_MAKEFD", Const, 0, ""},
+		{"SYS_FILEPORT_MAKEPORT", Const, 0, ""},
+		{"SYS_FKTRACE", Const, 1, ""},
+		{"SYS_FLISTXATTR", Const, 0, ""},
+		{"SYS_FLOCK", Const, 0, ""},
+		{"SYS_FORK", Const, 0, ""},
+		{"SYS_FPATHCONF", Const, 0, ""},
+		{"SYS_FREEBSD6_FTRUNCATE", Const, 0, ""},
+		{"SYS_FREEBSD6_LSEEK", Const, 0, ""},
+		{"SYS_FREEBSD6_MMAP", Const, 0, ""},
+		{"SYS_FREEBSD6_PREAD", Const, 0, ""},
+		{"SYS_FREEBSD6_PWRITE", Const, 0, ""},
+		{"SYS_FREEBSD6_TRUNCATE", Const, 0, ""},
+		{"SYS_FREMOVEXATTR", Const, 0, ""},
+		{"SYS_FSCTL", Const, 0, ""},
+		{"SYS_FSETATTRLIST", Const, 0, ""},
+		{"SYS_FSETXATTR", Const, 0, ""},
+		{"SYS_FSGETPATH", Const, 0, ""},
+		{"SYS_FSTAT", Const, 0, ""},
+		{"SYS_FSTAT64", Const, 0, ""},
+		{"SYS_FSTAT64_EXTENDED", Const, 0, ""},
+		{"SYS_FSTATAT", Const, 0, ""},
+		{"SYS_FSTATAT64", Const, 0, ""},
+		{"SYS_FSTATFS", Const, 0, ""},
+		{"SYS_FSTATFS64", Const, 0, ""},
+		{"SYS_FSTATV", Const, 0, ""},
+		{"SYS_FSTATVFS1", Const, 1, ""},
+		{"SYS_FSTAT_EXTENDED", Const, 0, ""},
+		{"SYS_FSYNC", Const, 0, ""},
+		{"SYS_FSYNC_NOCANCEL", Const, 0, ""},
+		{"SYS_FSYNC_RANGE", Const, 1, ""},
+		{"SYS_FTIME", Const, 0, ""},
+		{"SYS_FTRUNCATE", Const, 0, ""},
+		{"SYS_FTRUNCATE64", Const, 0, ""},
+		{"SYS_FUTEX", Const, 0, ""},
+		{"SYS_FUTIMENS", Const, 1, ""},
+		{"SYS_FUTIMES", Const, 0, ""},
+		{"SYS_FUTIMESAT", Const, 0, ""},
+		{"SYS_GETATTRLIST", Const, 0, ""},
+		{"SYS_GETAUDIT", Const, 0, ""},
+		{"SYS_GETAUDIT_ADDR", Const, 0, ""},
+		{"SYS_GETAUID", Const, 0, ""},
+		{"SYS_GETCONTEXT", Const, 0, ""},
+		{"SYS_GETCPU", Const, 0, ""},
+		{"SYS_GETCWD", Const, 0, ""},
+		{"SYS_GETDENTS", Const, 0, ""},
+		{"SYS_GETDENTS64", Const, 0, ""},
+		{"SYS_GETDIRENTRIES", Const, 0, ""},
+		{"SYS_GETDIRENTRIES64", Const, 0, ""},
+		{"SYS_GETDIRENTRIESATTR", Const, 0, ""},
+		{"SYS_GETDTABLECOUNT", Const, 1, ""},
+		{"SYS_GETDTABLESIZE", Const, 0, ""},
+		{"SYS_GETEGID", Const, 0, ""},
+		{"SYS_GETEGID32", Const, 0, ""},
+		{"SYS_GETEUID", Const, 0, ""},
+		{"SYS_GETEUID32", Const, 0, ""},
+		{"SYS_GETFH", Const, 0, ""},
+		{"SYS_GETFSSTAT", Const, 0, ""},
+		{"SYS_GETFSSTAT64", Const, 0, ""},
+		{"SYS_GETGID", Const, 0, ""},
+		{"SYS_GETGID32", Const, 0, ""},
+		{"SYS_GETGROUPS", Const, 0, ""},
+		{"SYS_GETGROUPS32", Const, 0, ""},
+		{"SYS_GETHOSTUUID", Const, 0, ""},
+		{"SYS_GETITIMER", Const, 0, ""},
+		{"SYS_GETLCID", Const, 0, ""},
+		{"SYS_GETLOGIN", Const, 0, ""},
+		{"SYS_GETLOGINCLASS", Const, 0, ""},
+		{"SYS_GETPEERNAME", Const, 0, ""},
+		{"SYS_GETPGID", Const, 0, ""},
+		{"SYS_GETPGRP", Const, 0, ""},
+		{"SYS_GETPID", Const, 0, ""},
+		{"SYS_GETPMSG", Const, 0, ""},
+		{"SYS_GETPPID", Const, 0, ""},
+		{"SYS_GETPRIORITY", Const, 0, ""},
+		{"SYS_GETRESGID", Const, 0, ""},
+		{"SYS_GETRESGID32", Const, 0, ""},
+		{"SYS_GETRESUID", Const, 0, ""},
+		{"SYS_GETRESUID32", Const, 0, ""},
+		{"SYS_GETRLIMIT", Const, 0, ""},
+		{"SYS_GETRTABLE", Const, 1, ""},
+		{"SYS_GETRUSAGE", Const, 0, ""},
+		{"SYS_GETSGROUPS", Const, 0, ""},
+		{"SYS_GETSID", Const, 0, ""},
+		{"SYS_GETSOCKNAME", Const, 0, ""},
+		{"SYS_GETSOCKOPT", Const, 0, ""},
+		{"SYS_GETTHRID", Const, 1, ""},
+		{"SYS_GETTID", Const, 0, ""},
+		{"SYS_GETTIMEOFDAY", Const, 0, ""},
+		{"SYS_GETUID", Const, 0, ""},
+		{"SYS_GETUID32", Const, 0, ""},
+		{"SYS_GETVFSSTAT", Const, 1, ""},
+		{"SYS_GETWGROUPS", Const, 0, ""},
+		{"SYS_GETXATTR", Const, 0, ""},
+		{"SYS_GET_KERNEL_SYMS", Const, 0, ""},
+		{"SYS_GET_MEMPOLICY", Const, 0, ""},
+		{"SYS_GET_ROBUST_LIST", Const, 0, ""},
+		{"SYS_GET_THREAD_AREA", Const, 0, ""},
+		{"SYS_GSSD_SYSCALL", Const, 14, ""},
+		{"SYS_GTTY", Const, 0, ""},
+		{"SYS_IDENTITYSVC", Const, 0, ""},
+		{"SYS_IDLE", Const, 0, ""},
+		{"SYS_INITGROUPS", Const, 0, ""},
+		{"SYS_INIT_MODULE", Const, 0, ""},
+		{"SYS_INOTIFY_ADD_WATCH", Const, 0, ""},
+		{"SYS_INOTIFY_INIT", Const, 0, ""},
+		{"SYS_INOTIFY_INIT1", Const, 0, ""},
+		{"SYS_INOTIFY_RM_WATCH", Const, 0, ""},
+		{"SYS_IOCTL", Const, 0, ""},
+		{"SYS_IOPERM", Const, 0, ""},
+		{"SYS_IOPL", Const, 0, ""},
+		{"SYS_IOPOLICYSYS", Const, 0, ""},
+		{"SYS_IOPRIO_GET", Const, 0, ""},
+		{"SYS_IOPRIO_SET", Const, 0, ""},
+		{"SYS_IO_CANCEL", Const, 0, ""},
+		{"SYS_IO_DESTROY", Const, 0, ""},
+		{"SYS_IO_GETEVENTS", Const, 0, ""},
+		{"SYS_IO_SETUP", Const, 0, ""},
+		{"SYS_IO_SUBMIT", Const, 0, ""},
+		{"SYS_IPC", Const, 0, ""},
+		{"SYS_ISSETUGID", Const, 0, ""},
+		{"SYS_JAIL", Const, 0, ""},
+		{"SYS_JAIL_ATTACH", Const, 0, ""},
+		{"SYS_JAIL_GET", Const, 0, ""},
+		{"SYS_JAIL_REMOVE", Const, 0, ""},
+		{"SYS_JAIL_SET", Const, 0, ""},
+		{"SYS_KAS_INFO", Const, 16, ""},
+		{"SYS_KDEBUG_TRACE", Const, 0, ""},
+		{"SYS_KENV", Const, 0, ""},
+		{"SYS_KEVENT", Const, 0, ""},
+		{"SYS_KEVENT64", Const, 0, ""},
+		{"SYS_KEXEC_LOAD", Const, 0, ""},
+		{"SYS_KEYCTL", Const, 0, ""},
+		{"SYS_KILL", Const, 0, ""},
+		{"SYS_KLDFIND", Const, 0, ""},
+		{"SYS_KLDFIRSTMOD", Const, 0, ""},
+		{"SYS_KLDLOAD", Const, 0, ""},
+		{"SYS_KLDNEXT", Const, 0, ""},
+		{"SYS_KLDSTAT", Const, 0, ""},
+		{"SYS_KLDSYM", Const, 0, ""},
+		{"SYS_KLDUNLOAD", Const, 0, ""},
+		{"SYS_KLDUNLOADF", Const, 0, ""},
+		{"SYS_KMQ_NOTIFY", Const, 14, ""},
+		{"SYS_KMQ_OPEN", Const, 14, ""},
+		{"SYS_KMQ_SETATTR", Const, 14, ""},
+		{"SYS_KMQ_TIMEDRECEIVE", Const, 14, ""},
+		{"SYS_KMQ_TIMEDSEND", Const, 14, ""},
+		{"SYS_KMQ_UNLINK", Const, 14, ""},
+		{"SYS_KQUEUE", Const, 0, ""},
+		{"SYS_KQUEUE1", Const, 1, ""},
+		{"SYS_KSEM_CLOSE", Const, 14, ""},
+		{"SYS_KSEM_DESTROY", Const, 14, ""},
+		{"SYS_KSEM_GETVALUE", Const, 14, ""},
+		{"SYS_KSEM_INIT", Const, 14, ""},
+		{"SYS_KSEM_OPEN", Const, 14, ""},
+		{"SYS_KSEM_POST", Const, 14, ""},
+		{"SYS_KSEM_TIMEDWAIT", Const, 14, ""},
+		{"SYS_KSEM_TRYWAIT", Const, 14, ""},
+		{"SYS_KSEM_UNLINK", Const, 14, ""},
+		{"SYS_KSEM_WAIT", Const, 14, ""},
+		{"SYS_KTIMER_CREATE", Const, 0, ""},
+		{"SYS_KTIMER_DELETE", Const, 0, ""},
+		{"SYS_KTIMER_GETOVERRUN", Const, 0, ""},
+		{"SYS_KTIMER_GETTIME", Const, 0, ""},
+		{"SYS_KTIMER_SETTIME", Const, 0, ""},
+		{"SYS_KTRACE", Const, 0, ""},
+		{"SYS_LCHFLAGS", Const, 0, ""},
+		{"SYS_LCHMOD", Const, 0, ""},
+		{"SYS_LCHOWN", Const, 0, ""},
+		{"SYS_LCHOWN32", Const, 0, ""},
+		{"SYS_LEDGER", Const, 16, ""},
+		{"SYS_LGETFH", Const, 0, ""},
+		{"SYS_LGETXATTR", Const, 0, ""},
+		{"SYS_LINK", Const, 0, ""},
+		{"SYS_LINKAT", Const, 0, ""},
+		{"SYS_LIO_LISTIO", Const, 0, ""},
+		{"SYS_LISTEN", Const, 0, ""},
+		{"SYS_LISTXATTR", Const, 0, ""},
+		{"SYS_LLISTXATTR", Const, 0, ""},
+		{"SYS_LOCK", Const, 0, ""},
+		{"SYS_LOOKUP_DCOOKIE", Const, 0, ""},
+		{"SYS_LPATHCONF", Const, 0, ""},
+		{"SYS_LREMOVEXATTR", Const, 0, ""},
+		{"SYS_LSEEK", Const, 0, ""},
+		{"SYS_LSETXATTR", Const, 0, ""},
+		{"SYS_LSTAT", Const, 0, ""},
+		{"SYS_LSTAT64", Const, 0, ""},
+		{"SYS_LSTAT64_EXTENDED", Const, 0, ""},
+		{"SYS_LSTATV", Const, 0, ""},
+		{"SYS_LSTAT_EXTENDED", Const, 0, ""},
+		{"SYS_LUTIMES", Const, 0, ""},
+		{"SYS_MAC_SYSCALL", Const, 0, ""},
+		{"SYS_MADVISE", Const, 0, ""},
+		{"SYS_MADVISE1", Const, 0, ""},
+		{"SYS_MAXSYSCALL", Const, 0, ""},
+		{"SYS_MBIND", Const, 0, ""},
+		{"SYS_MIGRATE_PAGES", Const, 0, ""},
+		{"SYS_MINCORE", Const, 0, ""},
+		{"SYS_MINHERIT", Const, 0, ""},
+		{"SYS_MKCOMPLEX", Const, 0, ""},
+		{"SYS_MKDIR", Const, 0, ""},
+		{"SYS_MKDIRAT", Const, 0, ""},
+		{"SYS_MKDIR_EXTENDED", Const, 0, ""},
+		{"SYS_MKFIFO", Const, 0, ""},
+		{"SYS_MKFIFOAT", Const, 0, ""},
+		{"SYS_MKFIFO_EXTENDED", Const, 0, ""},
+		{"SYS_MKNOD", Const, 0, ""},
+		{"SYS_MKNODAT", Const, 0, ""},
+		{"SYS_MLOCK", Const, 0, ""},
+		{"SYS_MLOCKALL", Const, 0, ""},
+		{"SYS_MMAP", Const, 0, ""},
+		{"SYS_MMAP2", Const, 0, ""},
+		{"SYS_MODCTL", Const, 1, ""},
+		{"SYS_MODFIND", Const, 0, ""},
+		{"SYS_MODFNEXT", Const, 0, ""},
+		{"SYS_MODIFY_LDT", Const, 0, ""},
+		{"SYS_MODNEXT", Const, 0, ""},
+		{"SYS_MODSTAT", Const, 0, ""},
+		{"SYS_MODWATCH", Const, 0, ""},
+		{"SYS_MOUNT", Const, 0, ""},
+		{"SYS_MOVE_PAGES", Const, 0, ""},
+		{"SYS_MPROTECT", Const, 0, ""},
+		{"SYS_MPX", Const, 0, ""},
+		{"SYS_MQUERY", Const, 1, ""},
+		{"SYS_MQ_GETSETATTR", Const, 0, ""},
+		{"SYS_MQ_NOTIFY", Const, 0, ""},
+		{"SYS_MQ_OPEN", Const, 0, ""},
+		{"SYS_MQ_TIMEDRECEIVE", Const, 0, ""},
+		{"SYS_MQ_TIMEDSEND", Const, 0, ""},
+		{"SYS_MQ_UNLINK", Const, 0, ""},
+		{"SYS_MREMAP", Const, 0, ""},
+		{"SYS_MSGCTL", Const, 0, ""},
+		{"SYS_MSGGET", Const, 0, ""},
+		{"SYS_MSGRCV", Const, 0, ""},
+		{"SYS_MSGRCV_NOCANCEL", Const, 0, ""},
+		{"SYS_MSGSND", Const, 0, ""},
+		{"SYS_MSGSND_NOCANCEL", Const, 0, ""},
+		{"SYS_MSGSYS", Const, 0, ""},
+		{"SYS_MSYNC", Const, 0, ""},
+		{"SYS_MSYNC_NOCANCEL", Const, 0, ""},
+		{"SYS_MUNLOCK", Const, 0, ""},
+		{"SYS_MUNLOCKALL", Const, 0, ""},
+		{"SYS_MUNMAP", Const, 0, ""},
+		{"SYS_NAME_TO_HANDLE_AT", Const, 0, ""},
+		{"SYS_NANOSLEEP", Const, 0, ""},
+		{"SYS_NEWFSTATAT", Const, 0, ""},
+		{"SYS_NFSCLNT", Const, 0, ""},
+		{"SYS_NFSSERVCTL", Const, 0, ""},
+		{"SYS_NFSSVC", Const, 0, ""},
+		{"SYS_NFSTAT", Const, 0, ""},
+		{"SYS_NICE", Const, 0, ""},
+		{"SYS_NLM_SYSCALL", Const, 14, ""},
+		{"SYS_NLSTAT", Const, 0, ""},
+		{"SYS_NMOUNT", Const, 0, ""},
+		{"SYS_NSTAT", Const, 0, ""},
+		{"SYS_NTP_ADJTIME", Const, 0, ""},
+		{"SYS_NTP_GETTIME", Const, 0, ""},
+		{"SYS_NUMA_GETAFFINITY", Const, 14, ""},
+		{"SYS_NUMA_SETAFFINITY", Const, 14, ""},
+		{"SYS_OABI_SYSCALL_BASE", Const, 0, ""},
+		{"SYS_OBREAK", Const, 0, ""},
+		{"SYS_OLDFSTAT", Const, 0, ""},
+		{"SYS_OLDLSTAT", Const, 0, ""},
+		{"SYS_OLDOLDUNAME", Const, 0, ""},
+		{"SYS_OLDSTAT", Const, 0, ""},
+		{"SYS_OLDUNAME", Const, 0, ""},
+		{"SYS_OPEN", Const, 0, ""},
+		{"SYS_OPENAT", Const, 0, ""},
+		{"SYS_OPENBSD_POLL", Const, 0, ""},
+		{"SYS_OPEN_BY_HANDLE_AT", Const, 0, ""},
+		{"SYS_OPEN_DPROTECTED_NP", Const, 16, ""},
+		{"SYS_OPEN_EXTENDED", Const, 0, ""},
+		{"SYS_OPEN_NOCANCEL", Const, 0, ""},
+		{"SYS_OVADVISE", Const, 0, ""},
+		{"SYS_PACCEPT", Const, 1, ""},
+		{"SYS_PATHCONF", Const, 0, ""},
+		{"SYS_PAUSE", Const, 0, ""},
+		{"SYS_PCICONFIG_IOBASE", Const, 0, ""},
+		{"SYS_PCICONFIG_READ", Const, 0, ""},
+		{"SYS_PCICONFIG_WRITE", Const, 0, ""},
+		{"SYS_PDFORK", Const, 0, ""},
+		{"SYS_PDGETPID", Const, 0, ""},
+		{"SYS_PDKILL", Const, 0, ""},
+		{"SYS_PERF_EVENT_OPEN", Const, 0, ""},
+		{"SYS_PERSONALITY", Const, 0, ""},
+		{"SYS_PID_HIBERNATE", Const, 0, ""},
+		{"SYS_PID_RESUME", Const, 0, ""},
+		{"SYS_PID_SHUTDOWN_SOCKETS", Const, 0, ""},
+		{"SYS_PID_SUSPEND", Const, 0, ""},
+		{"SYS_PIPE", Const, 0, ""},
+		{"SYS_PIPE2", Const, 0, ""},
+		{"SYS_PIVOT_ROOT", Const, 0, ""},
+		{"SYS_PMC_CONTROL", Const, 1, ""},
+		{"SYS_PMC_GET_INFO", Const, 1, ""},
+		{"SYS_POLL", Const, 0, ""},
+		{"SYS_POLLTS", Const, 1, ""},
+		{"SYS_POLL_NOCANCEL", Const, 0, ""},
+		{"SYS_POSIX_FADVISE", Const, 0, ""},
+		{"SYS_POSIX_FALLOCATE", Const, 0, ""},
+		{"SYS_POSIX_OPENPT", Const, 0, ""},
+		{"SYS_POSIX_SPAWN", Const, 0, ""},
+		{"SYS_PPOLL", Const, 0, ""},
+		{"SYS_PRCTL", Const, 0, ""},
+		{"SYS_PREAD", Const, 0, ""},
+		{"SYS_PREAD64", Const, 0, ""},
+		{"SYS_PREADV", Const, 0, ""},
+		{"SYS_PREAD_NOCANCEL", Const, 0, ""},
+		{"SYS_PRLIMIT64", Const, 0, ""},
+		{"SYS_PROCCTL", Const, 3, ""},
+		{"SYS_PROCESS_POLICY", Const, 0, ""},
+		{"SYS_PROCESS_VM_READV", Const, 0, ""},
+		{"SYS_PROCESS_VM_WRITEV", Const, 0, ""},
+		{"SYS_PROC_INFO", Const, 0, ""},
+		{"SYS_PROF", Const, 0, ""},
+		{"SYS_PROFIL", Const, 0, ""},
+		{"SYS_PSELECT", Const, 0, ""},
+		{"SYS_PSELECT6", Const, 0, ""},
+		{"SYS_PSET_ASSIGN", Const, 1, ""},
+		{"SYS_PSET_CREATE", Const, 1, ""},
+		{"SYS_PSET_DESTROY", Const, 1, ""},
+		{"SYS_PSYNCH_CVBROAD", Const, 0, ""},
+		{"SYS_PSYNCH_CVCLRPREPOST", Const, 0, ""},
+		{"SYS_PSYNCH_CVSIGNAL", Const, 0, ""},
+		{"SYS_PSYNCH_CVWAIT", Const, 0, ""},
+		{"SYS_PSYNCH_MUTEXDROP", Const, 0, ""},
+		{"SYS_PSYNCH_MUTEXWAIT", Const, 0, ""},
+		{"SYS_PSYNCH_RW_DOWNGRADE", Const, 0, ""},
+		{"SYS_PSYNCH_RW_LONGRDLOCK", Const, 0, ""},
+		{"SYS_PSYNCH_RW_RDLOCK", Const, 0, ""},
+		{"SYS_PSYNCH_RW_UNLOCK", Const, 0, ""},
+		{"SYS_PSYNCH_RW_UNLOCK2", Const, 0, ""},
+		{"SYS_PSYNCH_RW_UPGRADE", Const, 0, ""},
+		{"SYS_PSYNCH_RW_WRLOCK", Const, 0, ""},
+		{"SYS_PSYNCH_RW_YIELDWRLOCK", Const, 0, ""},
+		{"SYS_PTRACE", Const, 0, ""},
+		{"SYS_PUTPMSG", Const, 0, ""},
+		{"SYS_PWRITE", Const, 0, ""},
+		{"SYS_PWRITE64", Const, 0, ""},
+		{"SYS_PWRITEV", Const, 0, ""},
+		{"SYS_PWRITE_NOCANCEL", Const, 0, ""},
+		{"SYS_QUERY_MODULE", Const, 0, ""},
+		{"SYS_QUOTACTL", Const, 0, ""},
+		{"SYS_RASCTL", Const, 1, ""},
+		{"SYS_RCTL_ADD_RULE", Const, 0, ""},
+		{"SYS_RCTL_GET_LIMITS", Const, 0, ""},
+		{"SYS_RCTL_GET_RACCT", Const, 0, ""},
+		{"SYS_RCTL_GET_RULES", Const, 0, ""},
+		{"SYS_RCTL_REMOVE_RULE", Const, 0, ""},
+		{"SYS_READ", Const, 0, ""},
+		{"SYS_READAHEAD", Const, 0, ""},
+		{"SYS_READDIR", Const, 0, ""},
+		{"SYS_READLINK", Const, 0, ""},
+		{"SYS_READLINKAT", Const, 0, ""},
+		{"SYS_READV", Const, 0, ""},
+		{"SYS_READV_NOCANCEL", Const, 0, ""},
+		{"SYS_READ_NOCANCEL", Const, 0, ""},
+		{"SYS_REBOOT", Const, 0, ""},
+		{"SYS_RECV", Const, 0, ""},
+		{"SYS_RECVFROM", Const, 0, ""},
+		{"SYS_RECVFROM_NOCANCEL", Const, 0, ""},
+		{"SYS_RECVMMSG", Const, 0, ""},
+		{"SYS_RECVMSG", Const, 0, ""},
+		{"SYS_RECVMSG_NOCANCEL", Const, 0, ""},
+		{"SYS_REMAP_FILE_PAGES", Const, 0, ""},
+		{"SYS_REMOVEXATTR", Const, 0, ""},
+		{"SYS_RENAME", Const, 0, ""},
+		{"SYS_RENAMEAT", Const, 0, ""},
+		{"SYS_REQUEST_KEY", Const, 0, ""},
+		{"SYS_RESTART_SYSCALL", Const, 0, ""},
+		{"SYS_REVOKE", Const, 0, ""},
+		{"SYS_RFORK", Const, 0, ""},
+		{"SYS_RMDIR", Const, 0, ""},
+		{"SYS_RTPRIO", Const, 0, ""},
+		{"SYS_RTPRIO_THREAD", Const, 0, ""},
+		{"SYS_RT_SIGACTION", Const, 0, ""},
+		{"SYS_RT_SIGPENDING", Const, 0, ""},
+		{"SYS_RT_SIGPROCMASK", Const, 0, ""},
+		{"SYS_RT_SIGQUEUEINFO", Const, 0, ""},
+		{"SYS_RT_SIGRETURN", Const, 0, ""},
+		{"SYS_RT_SIGSUSPEND", Const, 0, ""},
+		{"SYS_RT_SIGTIMEDWAIT", Const, 0, ""},
+		{"SYS_RT_TGSIGQUEUEINFO", Const, 0, ""},
+		{"SYS_SBRK", Const, 0, ""},
+		{"SYS_SCHED_GETAFFINITY", Const, 0, ""},
+		{"SYS_SCHED_GETPARAM", Const, 0, ""},
+		{"SYS_SCHED_GETSCHEDULER", Const, 0, ""},
+		{"SYS_SCHED_GET_PRIORITY_MAX", Const, 0, ""},
+		{"SYS_SCHED_GET_PRIORITY_MIN", Const, 0, ""},
+		{"SYS_SCHED_RR_GET_INTERVAL", Const, 0, ""},
+		{"SYS_SCHED_SETAFFINITY", Const, 0, ""},
+		{"SYS_SCHED_SETPARAM", Const, 0, ""},
+		{"SYS_SCHED_SETSCHEDULER", Const, 0, ""},
+		{"SYS_SCHED_YIELD", Const, 0, ""},
+		{"SYS_SCTP_GENERIC_RECVMSG", Const, 0, ""},
+		{"SYS_SCTP_GENERIC_SENDMSG", Const, 0, ""},
+		{"SYS_SCTP_GENERIC_SENDMSG_IOV", Const, 0, ""},
+		{"SYS_SCTP_PEELOFF", Const, 0, ""},
+		{"SYS_SEARCHFS", Const, 0, ""},
+		{"SYS_SECURITY", Const, 0, ""},
+		{"SYS_SELECT", Const, 0, ""},
+		{"SYS_SELECT_NOCANCEL", Const, 0, ""},
+		{"SYS_SEMCONFIG", Const, 1, ""},
+		{"SYS_SEMCTL", Const, 0, ""},
+		{"SYS_SEMGET", Const, 0, ""},
+		{"SYS_SEMOP", Const, 0, ""},
+		{"SYS_SEMSYS", Const, 0, ""},
+		{"SYS_SEMTIMEDOP", Const, 0, ""},
+		{"SYS_SEM_CLOSE", Const, 0, ""},
+		{"SYS_SEM_DESTROY", Const, 0, ""},
+		{"SYS_SEM_GETVALUE", Const, 0, ""},
+		{"SYS_SEM_INIT", Const, 0, ""},
+		{"SYS_SEM_OPEN", Const, 0, ""},
+		{"SYS_SEM_POST", Const, 0, ""},
+		{"SYS_SEM_TRYWAIT", Const, 0, ""},
+		{"SYS_SEM_UNLINK", Const, 0, ""},
+		{"SYS_SEM_WAIT", Const, 0, ""},
+		{"SYS_SEM_WAIT_NOCANCEL", Const, 0, ""},
+		{"SYS_SEND", Const, 0, ""},
+		{"SYS_SENDFILE", Const, 0, ""},
+		{"SYS_SENDFILE64", Const, 0, ""},
+		{"SYS_SENDMMSG", Const, 0, ""},
+		{"SYS_SENDMSG", Const, 0, ""},
+		{"SYS_SENDMSG_NOCANCEL", Const, 0, ""},
+		{"SYS_SENDTO", Const, 0, ""},
+		{"SYS_SENDTO_NOCANCEL", Const, 0, ""},
+		{"SYS_SETATTRLIST", Const, 0, ""},
+		{"SYS_SETAUDIT", Const, 0, ""},
+		{"SYS_SETAUDIT_ADDR", Const, 0, ""},
+		{"SYS_SETAUID", Const, 0, ""},
+		{"SYS_SETCONTEXT", Const, 0, ""},
+		{"SYS_SETDOMAINNAME", Const, 0, ""},
+		{"SYS_SETEGID", Const, 0, ""},
+		{"SYS_SETEUID", Const, 0, ""},
+		{"SYS_SETFIB", Const, 0, ""},
+		{"SYS_SETFSGID", Const, 0, ""},
+		{"SYS_SETFSGID32", Const, 0, ""},
+		{"SYS_SETFSUID", Const, 0, ""},
+		{"SYS_SETFSUID32", Const, 0, ""},
+		{"SYS_SETGID", Const, 0, ""},
+		{"SYS_SETGID32", Const, 0, ""},
+		{"SYS_SETGROUPS", Const, 0, ""},
+		{"SYS_SETGROUPS32", Const, 0, ""},
+		{"SYS_SETHOSTNAME", Const, 0, ""},
+		{"SYS_SETITIMER", Const, 0, ""},
+		{"SYS_SETLCID", Const, 0, ""},
+		{"SYS_SETLOGIN", Const, 0, ""},
+		{"SYS_SETLOGINCLASS", Const, 0, ""},
+		{"SYS_SETNS", Const, 0, ""},
+		{"SYS_SETPGID", Const, 0, ""},
+		{"SYS_SETPRIORITY", Const, 0, ""},
+		{"SYS_SETPRIVEXEC", Const, 0, ""},
+		{"SYS_SETREGID", Const, 0, ""},
+		{"SYS_SETREGID32", Const, 0, ""},
+		{"SYS_SETRESGID", Const, 0, ""},
+		{"SYS_SETRESGID32", Const, 0, ""},
+		{"SYS_SETRESUID", Const, 0, ""},
+		{"SYS_SETRESUID32", Const, 0, ""},
+		{"SYS_SETREUID", Const, 0, ""},
+		{"SYS_SETREUID32", Const, 0, ""},
+		{"SYS_SETRLIMIT", Const, 0, ""},
+		{"SYS_SETRTABLE", Const, 1, ""},
+		{"SYS_SETSGROUPS", Const, 0, ""},
+		{"SYS_SETSID", Const, 0, ""},
+		{"SYS_SETSOCKOPT", Const, 0, ""},
+		{"SYS_SETTID", Const, 0, ""},
+		{"SYS_SETTID_WITH_PID", Const, 0, ""},
+		{"SYS_SETTIMEOFDAY", Const, 0, ""},
+		{"SYS_SETUID", Const, 0, ""},
+		{"SYS_SETUID32", Const, 0, ""},
+		{"SYS_SETWGROUPS", Const, 0, ""},
+		{"SYS_SETXATTR", Const, 0, ""},
+		{"SYS_SET_MEMPOLICY", Const, 0, ""},
+		{"SYS_SET_ROBUST_LIST", Const, 0, ""},
+		{"SYS_SET_THREAD_AREA", Const, 0, ""},
+		{"SYS_SET_TID_ADDRESS", Const, 0, ""},
+		{"SYS_SGETMASK", Const, 0, ""},
+		{"SYS_SHARED_REGION_CHECK_NP", Const, 0, ""},
+		{"SYS_SHARED_REGION_MAP_AND_SLIDE_NP", Const, 0, ""},
+		{"SYS_SHMAT", Const, 0, ""},
+		{"SYS_SHMCTL", Const, 0, ""},
+		{"SYS_SHMDT", Const, 0, ""},
+		{"SYS_SHMGET", Const, 0, ""},
+		{"SYS_SHMSYS", Const, 0, ""},
+		{"SYS_SHM_OPEN", Const, 0, ""},
+		{"SYS_SHM_UNLINK", Const, 0, ""},
+		{"SYS_SHUTDOWN", Const, 0, ""},
+		{"SYS_SIGACTION", Const, 0, ""},
+		{"SYS_SIGALTSTACK", Const, 0, ""},
+		{"SYS_SIGNAL", Const, 0, ""},
+		{"SYS_SIGNALFD", Const, 0, ""},
+		{"SYS_SIGNALFD4", Const, 0, ""},
+		{"SYS_SIGPENDING", Const, 0, ""},
+		{"SYS_SIGPROCMASK", Const, 0, ""},
+		{"SYS_SIGQUEUE", Const, 0, ""},
+		{"SYS_SIGQUEUEINFO", Const, 1, ""},
+		{"SYS_SIGRETURN", Const, 0, ""},
+		{"SYS_SIGSUSPEND", Const, 0, ""},
+		{"SYS_SIGSUSPEND_NOCANCEL", Const, 0, ""},
+		{"SYS_SIGTIMEDWAIT", Const, 0, ""},
+		{"SYS_SIGWAIT", Const, 0, ""},
+		{"SYS_SIGWAITINFO", Const, 0, ""},
+		{"SYS_SOCKET", Const, 0, ""},
+		{"SYS_SOCKETCALL", Const, 0, ""},
+		{"SYS_SOCKETPAIR", Const, 0, ""},
+		{"SYS_SPLICE", Const, 0, ""},
+		{"SYS_SSETMASK", Const, 0, ""},
+		{"SYS_SSTK", Const, 0, ""},
+		{"SYS_STACK_SNAPSHOT", Const, 0, ""},
+		{"SYS_STAT", Const, 0, ""},
+		{"SYS_STAT64", Const, 0, ""},
+		{"SYS_STAT64_EXTENDED", Const, 0, ""},
+		{"SYS_STATFS", Const, 0, ""},
+		{"SYS_STATFS64", Const, 0, ""},
+		{"SYS_STATV", Const, 0, ""},
+		{"SYS_STATVFS1", Const, 1, ""},
+		{"SYS_STAT_EXTENDED", Const, 0, ""},
+		{"SYS_STIME", Const, 0, ""},
+		{"SYS_STTY", Const, 0, ""},
+		{"SYS_SWAPCONTEXT", Const, 0, ""},
+		{"SYS_SWAPCTL", Const, 1, ""},
+		{"SYS_SWAPOFF", Const, 0, ""},
+		{"SYS_SWAPON", Const, 0, ""},
+		{"SYS_SYMLINK", Const, 0, ""},
+		{"SYS_SYMLINKAT", Const, 0, ""},
+		{"SYS_SYNC", Const, 0, ""},
+		{"SYS_SYNCFS", Const, 0, ""},
+		{"SYS_SYNC_FILE_RANGE", Const, 0, ""},
+		{"SYS_SYSARCH", Const, 0, ""},
+		{"SYS_SYSCALL", Const, 0, ""},
+		{"SYS_SYSCALL_BASE", Const, 0, ""},
+		{"SYS_SYSFS", Const, 0, ""},
+		{"SYS_SYSINFO", Const, 0, ""},
+		{"SYS_SYSLOG", Const, 0, ""},
+		{"SYS_TEE", Const, 0, ""},
+		{"SYS_TGKILL", Const, 0, ""},
+		{"SYS_THREAD_SELFID", Const, 0, ""},
+		{"SYS_THR_CREATE", Const, 0, ""},
+		{"SYS_THR_EXIT", Const, 0, ""},
+		{"SYS_THR_KILL", Const, 0, ""},
+		{"SYS_THR_KILL2", Const, 0, ""},
+		{"SYS_THR_NEW", Const, 0, ""},
+		{"SYS_THR_SELF", Const, 0, ""},
+		{"SYS_THR_SET_NAME", Const, 0, ""},
+		{"SYS_THR_SUSPEND", Const, 0, ""},
+		{"SYS_THR_WAKE", Const, 0, ""},
+		{"SYS_TIME", Const, 0, ""},
+		{"SYS_TIMERFD_CREATE", Const, 0, ""},
+		{"SYS_TIMERFD_GETTIME", Const, 0, ""},
+		{"SYS_TIMERFD_SETTIME", Const, 0, ""},
+		{"SYS_TIMER_CREATE", Const, 0, ""},
+		{"SYS_TIMER_DELETE", Const, 0, ""},
+		{"SYS_TIMER_GETOVERRUN", Const, 0, ""},
+		{"SYS_TIMER_GETTIME", Const, 0, ""},
+		{"SYS_TIMER_SETTIME", Const, 0, ""},
+		{"SYS_TIMES", Const, 0, ""},
+		{"SYS_TKILL", Const, 0, ""},
+		{"SYS_TRUNCATE", Const, 0, ""},
+		{"SYS_TRUNCATE64", Const, 0, ""},
+		{"SYS_TUXCALL", Const, 0, ""},
+		{"SYS_UGETRLIMIT", Const, 0, ""},
+		{"SYS_ULIMIT", Const, 0, ""},
+		{"SYS_UMASK", Const, 0, ""},
+		{"SYS_UMASK_EXTENDED", Const, 0, ""},
+		{"SYS_UMOUNT", Const, 0, ""},
+		{"SYS_UMOUNT2", Const, 0, ""},
+		{"SYS_UNAME", Const, 0, ""},
+		{"SYS_UNDELETE", Const, 0, ""},
+		{"SYS_UNLINK", Const, 0, ""},
+		{"SYS_UNLINKAT", Const, 0, ""},
+		{"SYS_UNMOUNT", Const, 0, ""},
+		{"SYS_UNSHARE", Const, 0, ""},
+		{"SYS_USELIB", Const, 0, ""},
+		{"SYS_USTAT", Const, 0, ""},
+		{"SYS_UTIME", Const, 0, ""},
+		{"SYS_UTIMENSAT", Const, 0, ""},
+		{"SYS_UTIMES", Const, 0, ""},
+		{"SYS_UTRACE", Const, 0, ""},
+		{"SYS_UUIDGEN", Const, 0, ""},
+		{"SYS_VADVISE", Const, 1, ""},
+		{"SYS_VFORK", Const, 0, ""},
+		{"SYS_VHANGUP", Const, 0, ""},
+		{"SYS_VM86", Const, 0, ""},
+		{"SYS_VM86OLD", Const, 0, ""},
+		{"SYS_VMSPLICE", Const, 0, ""},
+		{"SYS_VM_PRESSURE_MONITOR", Const, 0, ""},
+		{"SYS_VSERVER", Const, 0, ""},
+		{"SYS_WAIT4", Const, 0, ""},
+		{"SYS_WAIT4_NOCANCEL", Const, 0, ""},
+		{"SYS_WAIT6", Const, 1, ""},
+		{"SYS_WAITEVENT", Const, 0, ""},
+		{"SYS_WAITID", Const, 0, ""},
+		{"SYS_WAITID_NOCANCEL", Const, 0, ""},
+		{"SYS_WAITPID", Const, 0, ""},
+		{"SYS_WATCHEVENT", Const, 0, ""},
+		{"SYS_WORKQ_KERNRETURN", Const, 0, ""},
+		{"SYS_WORKQ_OPEN", Const, 0, ""},
+		{"SYS_WRITE", Const, 0, ""},
+		{"SYS_WRITEV", Const, 0, ""},
+		{"SYS_WRITEV_NOCANCEL", Const, 0, ""},
+		{"SYS_WRITE_NOCANCEL", Const, 0, ""},
+		{"SYS_YIELD", Const, 0, ""},
+		{"SYS__LLSEEK", Const, 0, ""},
+		{"SYS__LWP_CONTINUE", Const, 1, ""},
+		{"SYS__LWP_CREATE", Const, 1, ""},
+		{"SYS__LWP_CTL", Const, 1, ""},
+		{"SYS__LWP_DETACH", Const, 1, ""},
+		{"SYS__LWP_EXIT", Const, 1, ""},
+		{"SYS__LWP_GETNAME", Const, 1, ""},
+		{"SYS__LWP_GETPRIVATE", Const, 1, ""},
+		{"SYS__LWP_KILL", Const, 1, ""},
+		{"SYS__LWP_PARK", Const, 1, ""},
+		{"SYS__LWP_SELF", Const, 1, ""},
+		{"SYS__LWP_SETNAME", Const, 1, ""},
+		{"SYS__LWP_SETPRIVATE", Const, 1, ""},
+		{"SYS__LWP_SUSPEND", Const, 1, ""},
+		{"SYS__LWP_UNPARK", Const, 1, ""},
+		{"SYS__LWP_UNPARK_ALL", Const, 1, ""},
+		{"SYS__LWP_WAIT", Const, 1, ""},
+		{"SYS__LWP_WAKEUP", Const, 1, ""},
+		{"SYS__NEWSELECT", Const, 0, ""},
+		{"SYS__PSET_BIND", Const, 1, ""},
+		{"SYS__SCHED_GETAFFINITY", Const, 1, ""},
+		{"SYS__SCHED_GETPARAM", Const, 1, ""},
+		{"SYS__SCHED_SETAFFINITY", Const, 1, ""},
+		{"SYS__SCHED_SETPARAM", Const, 1, ""},
+		{"SYS__SYSCTL", Const, 0, ""},
+		{"SYS__UMTX_LOCK", Const, 0, ""},
+		{"SYS__UMTX_OP", Const, 0, ""},
+		{"SYS__UMTX_UNLOCK", Const, 0, ""},
+		{"SYS___ACL_ACLCHECK_FD", Const, 0, ""},
+		{"SYS___ACL_ACLCHECK_FILE", Const, 0, ""},
+		{"SYS___ACL_ACLCHECK_LINK", Const, 0, ""},
+		{"SYS___ACL_DELETE_FD", Const, 0, ""},
+		{"SYS___ACL_DELETE_FILE", Const, 0, ""},
+		{"SYS___ACL_DELETE_LINK", Const, 0, ""},
+		{"SYS___ACL_GET_FD", Const, 0, ""},
+		{"SYS___ACL_GET_FILE", Const, 0, ""},
+		{"SYS___ACL_GET_LINK", Const, 0, ""},
+		{"SYS___ACL_SET_FD", Const, 0, ""},
+		{"SYS___ACL_SET_FILE", Const, 0, ""},
+		{"SYS___ACL_SET_LINK", Const, 0, ""},
+		{"SYS___CAP_RIGHTS_GET", Const, 14, ""},
+		{"SYS___CLONE", Const, 1, ""},
+		{"SYS___DISABLE_THREADSIGNAL", Const, 0, ""},
+		{"SYS___GETCWD", Const, 0, ""},
+		{"SYS___GETLOGIN", Const, 1, ""},
+		{"SYS___GET_TCB", Const, 1, ""},
+		{"SYS___MAC_EXECVE", Const, 0, ""},
+		{"SYS___MAC_GETFSSTAT", Const, 0, ""},
+		{"SYS___MAC_GET_FD", Const, 0, ""},
+		{"SYS___MAC_GET_FILE", Const, 0, ""},
+		{"SYS___MAC_GET_LCID", Const, 0, ""},
+		{"SYS___MAC_GET_LCTX", Const, 0, ""},
+		{"SYS___MAC_GET_LINK", Const, 0, ""},
+		{"SYS___MAC_GET_MOUNT", Const, 0, ""},
+		{"SYS___MAC_GET_PID", Const, 0, ""},
+		{"SYS___MAC_GET_PROC", Const, 0, ""},
+		{"SYS___MAC_MOUNT", Const, 0, ""},
+		{"SYS___MAC_SET_FD", Const, 0, ""},
+		{"SYS___MAC_SET_FILE", Const, 0, ""},
+		{"SYS___MAC_SET_LCTX", Const, 0, ""},
+		{"SYS___MAC_SET_LINK", Const, 0, ""},
+		{"SYS___MAC_SET_PROC", Const, 0, ""},
+		{"SYS___MAC_SYSCALL", Const, 0, ""},
+		{"SYS___OLD_SEMWAIT_SIGNAL", Const, 0, ""},
+		{"SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL", Const, 0, ""},
+		{"SYS___POSIX_CHOWN", Const, 1, ""},
+		{"SYS___POSIX_FCHOWN", Const, 1, ""},
+		{"SYS___POSIX_LCHOWN", Const, 1, ""},
+		{"SYS___POSIX_RENAME", Const, 1, ""},
+		{"SYS___PTHREAD_CANCELED", Const, 0, ""},
+		{"SYS___PTHREAD_CHDIR", Const, 0, ""},
+		{"SYS___PTHREAD_FCHDIR", Const, 0, ""},
+		{"SYS___PTHREAD_KILL", Const, 0, ""},
+		{"SYS___PTHREAD_MARKCANCEL", Const, 0, ""},
+		{"SYS___PTHREAD_SIGMASK", Const, 0, ""},
+		{"SYS___QUOTACTL", Const, 1, ""},
+		{"SYS___SEMCTL", Const, 1, ""},
+		{"SYS___SEMWAIT_SIGNAL", Const, 0, ""},
+		{"SYS___SEMWAIT_SIGNAL_NOCANCEL", Const, 0, ""},
+		{"SYS___SETLOGIN", Const, 1, ""},
+		{"SYS___SETUGID", Const, 0, ""},
+		{"SYS___SET_TCB", Const, 1, ""},
+		{"SYS___SIGACTION_SIGTRAMP", Const, 1, ""},
+		{"SYS___SIGTIMEDWAIT", Const, 1, ""},
+		{"SYS___SIGWAIT", Const, 0, ""},
+		{"SYS___SIGWAIT_NOCANCEL", Const, 0, ""},
+		{"SYS___SYSCTL", Const, 0, ""},
+		{"SYS___TFORK", Const, 1, ""},
+		{"SYS___THREXIT", Const, 1, ""},
+		{"SYS___THRSIGDIVERT", Const, 1, ""},
+		{"SYS___THRSLEEP", Const, 1, ""},
+		{"SYS___THRWAKEUP", Const, 1, ""},
+		{"S_ARCH1", Const, 1, ""},
+		{"S_ARCH2", Const, 1, ""},
+		{"S_BLKSIZE", Const, 0, ""},
+		{"S_IEXEC", Const, 0, ""},
+		{"S_IFBLK", Const, 0, ""},
+		{"S_IFCHR", Const, 0, ""},
+		{"S_IFDIR", Const, 0, ""},
+		{"S_IFIFO", Const, 0, ""},
+		{"S_IFLNK", Const, 0, ""},
+		{"S_IFMT", Const, 0, ""},
+		{"S_IFREG", Const, 0, ""},
+		{"S_IFSOCK", Const, 0, ""},
+		{"S_IFWHT", Const, 0, ""},
+		{"S_IREAD", Const, 0, ""},
+		{"S_IRGRP", Const, 0, ""},
+		{"S_IROTH", Const, 0, ""},
+		{"S_IRUSR", Const, 0, ""},
+		{"S_IRWXG", Const, 0, ""},
+		{"S_IRWXO", Const, 0, ""},
+		{"S_IRWXU", Const, 0, ""},
+		{"S_ISGID", Const, 0, ""},
+		{"S_ISTXT", Const, 0, ""},
+		{"S_ISUID", Const, 0, ""},
+		{"S_ISVTX", Const, 0, ""},
+		{"S_IWGRP", Const, 0, ""},
+		{"S_IWOTH", Const, 0, ""},
+		{"S_IWRITE", Const, 0, ""},
+		{"S_IWUSR", Const, 0, ""},
+		{"S_IXGRP", Const, 0, ""},
+		{"S_IXOTH", Const, 0, ""},
+		{"S_IXUSR", Const, 0, ""},
+		{"S_LOGIN_SET", Const, 1, ""},
+		{"SecurityAttributes", Type, 0, ""},
+		{"SecurityAttributes.InheritHandle", Field, 0, ""},
+		{"SecurityAttributes.Length", Field, 0, ""},
+		{"SecurityAttributes.SecurityDescriptor", Field, 0, ""},
+		{"Seek", Func, 0, "func(fd int, offset int64, whence int) (off int64, err error)"},
+		{"Select", Func, 0, "func(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error)"},
+		{"Sendfile", Func, 0, "func(outfd int, infd int, offset *int64, count int) (written int, err error)"},
+		{"Sendmsg", Func, 0, "func(fd int, p []byte, oob []byte, to Sockaddr, flags int) (err error)"},
+		{"SendmsgN", Func, 3, "func(fd int, p []byte, oob []byte, to Sockaddr, flags int) (n int, err error)"},
+		{"Sendto", Func, 0, "func(fd int, p []byte, flags int, to Sockaddr) (err error)"},
+		{"Servent", Type, 0, ""},
+		{"Servent.Aliases", Field, 0, ""},
+		{"Servent.Name", Field, 0, ""},
+		{"Servent.Port", Field, 0, ""},
+		{"Servent.Proto", Field, 0, ""},
+		{"SetBpf", Func, 0, ""},
+		{"SetBpfBuflen", Func, 0, ""},
+		{"SetBpfDatalink", Func, 0, ""},
+		{"SetBpfHeadercmpl", Func, 0, ""},
+		{"SetBpfImmediate", Func, 0, ""},
+		{"SetBpfInterface", Func, 0, ""},
+		{"SetBpfPromisc", Func, 0, ""},
+		{"SetBpfTimeout", Func, 0, ""},
+		{"SetCurrentDirectory", Func, 0, ""},
+		{"SetEndOfFile", Func, 0, ""},
+		{"SetEnvironmentVariable", Func, 0, ""},
+		{"SetFileAttributes", Func, 0, ""},
+		{"SetFileCompletionNotificationModes", Func, 2, ""},
+		{"SetFilePointer", Func, 0, ""},
+		{"SetFileTime", Func, 0, ""},
+		{"SetHandleInformation", Func, 0, ""},
+		{"SetKevent", Func, 0, ""},
+		{"SetLsfPromisc", Func, 0, "func(name string, m bool) error"},
+		{"SetNonblock", Func, 0, "func(fd int, nonblocking bool) (err error)"},
+		{"Setdomainname", Func, 0, "func(p []byte) (err error)"},
+		{"Setegid", Func, 0, "func(egid int) (err error)"},
+		{"Setenv", Func, 0, "func(key string, value string) error"},
+		{"Seteuid", Func, 0, "func(euid int) (err error)"},
+		{"Setfsgid", Func, 0, "func(gid int) (err error)"},
+		{"Setfsuid", Func, 0, "func(uid int) (err error)"},
+		{"Setgid", Func, 0, "func(gid int) (err error)"},
+		{"Setgroups", Func, 0, "func(gids []int) (err error)"},
+		{"Sethostname", Func, 0, "func(p []byte) (err error)"},
+		{"Setlogin", Func, 0, ""},
+		{"Setpgid", Func, 0, "func(pid int, pgid int) (err error)"},
+		{"Setpriority", Func, 0, "func(which int, who int, prio int) (err error)"},
+		{"Setprivexec", Func, 0, ""},
+		{"Setregid", Func, 0, "func(rgid int, egid int) (err error)"},
+		{"Setresgid", Func, 0, "func(rgid int, egid int, sgid int) (err error)"},
+		{"Setresuid", Func, 0, "func(ruid int, euid int, suid int) (err error)"},
+		{"Setreuid", Func, 0, "func(ruid int, euid int) (err error)"},
+		{"Setrlimit", Func, 0, "func(resource int, rlim *Rlimit) error"},
+		{"Setsid", Func, 0, "func() (pid int, err error)"},
+		{"Setsockopt", Func, 0, ""},
+		{"SetsockoptByte", Func, 0, "func(fd int, level int, opt int, value byte) (err error)"},
+		{"SetsockoptICMPv6Filter", Func, 2, "func(fd int, level int, opt int, filter *ICMPv6Filter) error"},
+		{"SetsockoptIPMreq", Func, 0, "func(fd int, level int, opt int, mreq *IPMreq) (err error)"},
+		{"SetsockoptIPMreqn", Func, 0, "func(fd int, level int, opt int, mreq *IPMreqn) (err error)"},
+		{"SetsockoptIPv6Mreq", Func, 0, "func(fd int, level int, opt int, mreq *IPv6Mreq) (err error)"},
+		{"SetsockoptInet4Addr", Func, 0, "func(fd int, level int, opt int, value [4]byte) (err error)"},
+		{"SetsockoptInt", Func, 0, "func(fd int, level int, opt int, value int) (err error)"},
+		{"SetsockoptLinger", Func, 0, "func(fd int, level int, opt int, l *Linger) (err error)"},
+		{"SetsockoptString", Func, 0, "func(fd int, level int, opt int, s string) (err error)"},
+		{"SetsockoptTimeval", Func, 0, "func(fd int, level int, opt int, tv *Timeval) (err error)"},
+		{"Settimeofday", Func, 0, "func(tv *Timeval) (err error)"},
+		{"Setuid", Func, 0, "func(uid int) (err error)"},
+		{"Setxattr", Func, 1, "func(path string, attr string, data []byte, flags int) (err error)"},
+		{"Shutdown", Func, 0, "func(fd int, how int) (err error)"},
+		{"SidTypeAlias", Const, 0, ""},
+		{"SidTypeComputer", Const, 0, ""},
+		{"SidTypeDeletedAccount", Const, 0, ""},
+		{"SidTypeDomain", Const, 0, ""},
+		{"SidTypeGroup", Const, 0, ""},
+		{"SidTypeInvalid", Const, 0, ""},
+		{"SidTypeLabel", Const, 0, ""},
+		{"SidTypeUnknown", Const, 0, ""},
+		{"SidTypeUser", Const, 0, ""},
+		{"SidTypeWellKnownGroup", Const, 0, ""},
+		{"Signal", Type, 0, ""},
+		{"SizeofBpfHdr", Const, 0, ""},
+		{"SizeofBpfInsn", Const, 0, ""},
+		{"SizeofBpfProgram", Const, 0, ""},
+		{"SizeofBpfStat", Const, 0, ""},
+		{"SizeofBpfVersion", Const, 0, ""},
+		{"SizeofBpfZbuf", Const, 0, ""},
+		{"SizeofBpfZbufHeader", Const, 0, ""},
+		{"SizeofCmsghdr", Const, 0, ""},
+		{"SizeofICMPv6Filter", Const, 2, ""},
+		{"SizeofIPMreq", Const, 0, ""},
+		{"SizeofIPMreqn", Const, 0, ""},
+		{"SizeofIPv6MTUInfo", Const, 2, ""},
+		{"SizeofIPv6Mreq", Const, 0, ""},
+		{"SizeofIfAddrmsg", Const, 0, ""},
+		{"SizeofIfAnnounceMsghdr", Const, 1, ""},
+		{"SizeofIfData", Const, 0, ""},
+		{"SizeofIfInfomsg", Const, 0, ""},
+		{"SizeofIfMsghdr", Const, 0, ""},
+		{"SizeofIfaMsghdr", Const, 0, ""},
+		{"SizeofIfmaMsghdr", Const, 0, ""},
+		{"SizeofIfmaMsghdr2", Const, 0, ""},
+		{"SizeofInet4Pktinfo", Const, 0, ""},
+		{"SizeofInet6Pktinfo", Const, 0, ""},
+		{"SizeofInotifyEvent", Const, 0, ""},
+		{"SizeofLinger", Const, 0, ""},
+		{"SizeofMsghdr", Const, 0, ""},
+		{"SizeofNlAttr", Const, 0, ""},
+		{"SizeofNlMsgerr", Const, 0, ""},
+		{"SizeofNlMsghdr", Const, 0, ""},
+		{"SizeofRtAttr", Const, 0, ""},
+		{"SizeofRtGenmsg", Const, 0, ""},
+		{"SizeofRtMetrics", Const, 0, ""},
+		{"SizeofRtMsg", Const, 0, ""},
+		{"SizeofRtMsghdr", Const, 0, ""},
+		{"SizeofRtNexthop", Const, 0, ""},
+		{"SizeofSockFilter", Const, 0, ""},
+		{"SizeofSockFprog", Const, 0, ""},
+		{"SizeofSockaddrAny", Const, 0, ""},
+		{"SizeofSockaddrDatalink", Const, 0, ""},
+		{"SizeofSockaddrInet4", Const, 0, ""},
+		{"SizeofSockaddrInet6", Const, 0, ""},
+		{"SizeofSockaddrLinklayer", Const, 0, ""},
+		{"SizeofSockaddrNetlink", Const, 0, ""},
+		{"SizeofSockaddrUnix", Const, 0, ""},
+		{"SizeofTCPInfo", Const, 1, ""},
+		{"SizeofUcred", Const, 0, ""},
+		{"SlicePtrFromStrings", Func, 1, "func(ss []string) ([]*byte, error)"},
+		{"SockFilter", Type, 0, ""},
+		{"SockFilter.Code", Field, 0, ""},
+		{"SockFilter.Jf", Field, 0, ""},
+		{"SockFilter.Jt", Field, 0, ""},
+		{"SockFilter.K", Field, 0, ""},
+		{"SockFprog", Type, 0, ""},
+		{"SockFprog.Filter", Field, 0, ""},
+		{"SockFprog.Len", Field, 0, ""},
+		{"SockFprog.Pad_cgo_0", Field, 0, ""},
+		{"Sockaddr", Type, 0, ""},
+		{"SockaddrDatalink", Type, 0, ""},
+		{"SockaddrDatalink.Alen", Field, 0, ""},
+		{"SockaddrDatalink.Data", Field, 0, ""},
+		{"SockaddrDatalink.Family", Field, 0, ""},
+		{"SockaddrDatalink.Index", Field, 0, ""},
+		{"SockaddrDatalink.Len", Field, 0, ""},
+		{"SockaddrDatalink.Nlen", Field, 0, ""},
+		{"SockaddrDatalink.Slen", Field, 0, ""},
+		{"SockaddrDatalink.Type", Field, 0, ""},
+		{"SockaddrGen", Type, 0, ""},
+		{"SockaddrInet4", Type, 0, ""},
+		{"SockaddrInet4.Addr", Field, 0, ""},
+		{"SockaddrInet4.Port", Field, 0, ""},
+		{"SockaddrInet6", Type, 0, ""},
+		{"SockaddrInet6.Addr", Field, 0, ""},
+		{"SockaddrInet6.Port", Field, 0, ""},
+		{"SockaddrInet6.ZoneId", Field, 0, ""},
+		{"SockaddrLinklayer", Type, 0, ""},
+		{"SockaddrLinklayer.Addr", Field, 0, ""},
+		{"SockaddrLinklayer.Halen", Field, 0, ""},
+		{"SockaddrLinklayer.Hatype", Field, 0, ""},
+		{"SockaddrLinklayer.Ifindex", Field, 0, ""},
+		{"SockaddrLinklayer.Pkttype", Field, 0, ""},
+		{"SockaddrLinklayer.Protocol", Field, 0, ""},
+		{"SockaddrNetlink", Type, 0, ""},
+		{"SockaddrNetlink.Family", Field, 0, ""},
+		{"SockaddrNetlink.Groups", Field, 0, ""},
+		{"SockaddrNetlink.Pad", Field, 0, ""},
+		{"SockaddrNetlink.Pid", Field, 0, ""},
+		{"SockaddrUnix", Type, 0, ""},
+		{"SockaddrUnix.Name", Field, 0, ""},
+		{"Socket", Func, 0, "func(domain int, typ int, proto int) (fd int, err error)"},
+		{"SocketControlMessage", Type, 0, ""},
+		{"SocketControlMessage.Data", Field, 0, ""},
+		{"SocketControlMessage.Header", Field, 0, ""},
+		{"SocketDisableIPv6", Var, 0, ""},
+		{"Socketpair", Func, 0, "func(domain int, typ int, proto int) (fd [2]int, err error)"},
+		{"Splice", Func, 0, "func(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)"},
+		{"StartProcess", Func, 0, "func(argv0 string, argv []string, attr *ProcAttr) (pid int, handle uintptr, err error)"},
+		{"StartupInfo", Type, 0, ""},
+		{"StartupInfo.Cb", Field, 0, ""},
+		{"StartupInfo.Desktop", Field, 0, ""},
+		{"StartupInfo.FillAttribute", Field, 0, ""},
+		{"StartupInfo.Flags", Field, 0, ""},
+		{"StartupInfo.ShowWindow", Field, 0, ""},
+		{"StartupInfo.StdErr", Field, 0, ""},
+		{"StartupInfo.StdInput", Field, 0, ""},
+		{"StartupInfo.StdOutput", Field, 0, ""},
+		{"StartupInfo.Title", Field, 0, ""},
+		{"StartupInfo.X", Field, 0, ""},
+		{"StartupInfo.XCountChars", Field, 0, ""},
+		{"StartupInfo.XSize", Field, 0, ""},
+		{"StartupInfo.Y", Field, 0, ""},
+		{"StartupInfo.YCountChars", Field, 0, ""},
+		{"StartupInfo.YSize", Field, 0, ""},
+		{"Stat", Func, 0, "func(path string, stat *Stat_t) (err error)"},
+		{"Stat_t", Type, 0, ""},
+		{"Stat_t.Atim", Field, 0, ""},
+		{"Stat_t.Atim_ext", Field, 12, ""},
+		{"Stat_t.Atimespec", Field, 0, ""},
+		{"Stat_t.Birthtimespec", Field, 0, ""},
+		{"Stat_t.Blksize", Field, 0, ""},
+		{"Stat_t.Blocks", Field, 0, ""},
+		{"Stat_t.Btim_ext", Field, 12, ""},
+		{"Stat_t.Ctim", Field, 0, ""},
+		{"Stat_t.Ctim_ext", Field, 12, ""},
+		{"Stat_t.Ctimespec", Field, 0, ""},
+		{"Stat_t.Dev", Field, 0, ""},
+		{"Stat_t.Flags", Field, 0, ""},
+		{"Stat_t.Gen", Field, 0, ""},
+		{"Stat_t.Gid", Field, 0, ""},
+		{"Stat_t.Ino", Field, 0, ""},
+		{"Stat_t.Lspare", Field, 0, ""},
+		{"Stat_t.Lspare0", Field, 2, ""},
+		{"Stat_t.Lspare1", Field, 2, ""},
+		{"Stat_t.Mode", Field, 0, ""},
+		{"Stat_t.Mtim", Field, 0, ""},
+		{"Stat_t.Mtim_ext", Field, 12, ""},
+		{"Stat_t.Mtimespec", Field, 0, ""},
+		{"Stat_t.Nlink", Field, 0, ""},
+		{"Stat_t.Pad_cgo_0", Field, 0, ""},
+		{"Stat_t.Pad_cgo_1", Field, 0, ""},
+		{"Stat_t.Pad_cgo_2", Field, 0, ""},
+		{"Stat_t.Padding0", Field, 12, ""},
+		{"Stat_t.Padding1", Field, 12, ""},
+		{"Stat_t.Qspare", Field, 0, ""},
+		{"Stat_t.Rdev", Field, 0, ""},
+		{"Stat_t.Size", Field, 0, ""},
+		{"Stat_t.Spare", Field, 2, ""},
+		{"Stat_t.Uid", Field, 0, ""},
+		{"Stat_t.X__pad0", Field, 0, ""},
+		{"Stat_t.X__pad1", Field, 0, ""},
+		{"Stat_t.X__pad2", Field, 0, ""},
+		{"Stat_t.X__st_birthtim", Field, 2, ""},
+		{"Stat_t.X__st_ino", Field, 0, ""},
+		{"Stat_t.X__unused", Field, 0, ""},
+		{"Statfs", Func, 0, "func(path string, buf *Statfs_t) (err error)"},
+		{"Statfs_t", Type, 0, ""},
+		{"Statfs_t.Asyncreads", Field, 0, ""},
+		{"Statfs_t.Asyncwrites", Field, 0, ""},
+		{"Statfs_t.Bavail", Field, 0, ""},
+		{"Statfs_t.Bfree", Field, 0, ""},
+		{"Statfs_t.Blocks", Field, 0, ""},
+		{"Statfs_t.Bsize", Field, 0, ""},
+		{"Statfs_t.Charspare", Field, 0, ""},
+		{"Statfs_t.F_asyncreads", Field, 2, ""},
+		{"Statfs_t.F_asyncwrites", Field, 2, ""},
+		{"Statfs_t.F_bavail", Field, 2, ""},
+		{"Statfs_t.F_bfree", Field, 2, ""},
+		{"Statfs_t.F_blocks", Field, 2, ""},
+		{"Statfs_t.F_bsize", Field, 2, ""},
+		{"Statfs_t.F_ctime", Field, 2, ""},
+		{"Statfs_t.F_favail", Field, 2, ""},
+		{"Statfs_t.F_ffree", Field, 2, ""},
+		{"Statfs_t.F_files", Field, 2, ""},
+		{"Statfs_t.F_flags", Field, 2, ""},
+		{"Statfs_t.F_fsid", Field, 2, ""},
+		{"Statfs_t.F_fstypename", Field, 2, ""},
+		{"Statfs_t.F_iosize", Field, 2, ""},
+		{"Statfs_t.F_mntfromname", Field, 2, ""},
+		{"Statfs_t.F_mntfromspec", Field, 3, ""},
+		{"Statfs_t.F_mntonname", Field, 2, ""},
+		{"Statfs_t.F_namemax", Field, 2, ""},
+		{"Statfs_t.F_owner", Field, 2, ""},
+		{"Statfs_t.F_spare", Field, 2, ""},
+		{"Statfs_t.F_syncreads", Field, 2, ""},
+		{"Statfs_t.F_syncwrites", Field, 2, ""},
+		{"Statfs_t.Ffree", Field, 0, ""},
+		{"Statfs_t.Files", Field, 0, ""},
+		{"Statfs_t.Flags", Field, 0, ""},
+		{"Statfs_t.Frsize", Field, 0, ""},
+		{"Statfs_t.Fsid", Field, 0, ""},
+		{"Statfs_t.Fssubtype", Field, 0, ""},
+		{"Statfs_t.Fstypename", Field, 0, ""},
+		{"Statfs_t.Iosize", Field, 0, ""},
+		{"Statfs_t.Mntfromname", Field, 0, ""},
+		{"Statfs_t.Mntonname", Field, 0, ""},
+		{"Statfs_t.Mount_info", Field, 2, ""},
+		{"Statfs_t.Namelen", Field, 0, ""},
+		{"Statfs_t.Namemax", Field, 0, ""},
+		{"Statfs_t.Owner", Field, 0, ""},
+		{"Statfs_t.Pad_cgo_0", Field, 0, ""},
+		{"Statfs_t.Pad_cgo_1", Field, 2, ""},
+		{"Statfs_t.Reserved", Field, 0, ""},
+		{"Statfs_t.Spare", Field, 0, ""},
+		{"Statfs_t.Syncreads", Field, 0, ""},
+		{"Statfs_t.Syncwrites", Field, 0, ""},
+		{"Statfs_t.Type", Field, 0, ""},
+		{"Statfs_t.Version", Field, 0, ""},
+		{"Stderr", Var, 0, ""},
+		{"Stdin", Var, 0, ""},
+		{"Stdout", Var, 0, ""},
+		{"StringBytePtr", Func, 0, "func(s string) *byte"},
+		{"StringByteSlice", Func, 0, "func(s string) []byte"},
+		{"StringSlicePtr", Func, 0, "func(ss []string) []*byte"},
+		{"StringToSid", Func, 0, ""},
+		{"StringToUTF16", Func, 0, ""},
+		{"StringToUTF16Ptr", Func, 0, ""},
+		{"Symlink", Func, 0, "func(oldpath string, newpath string) (err error)"},
+		{"Sync", Func, 0, "func()"},
+		{"SyncFileRange", Func, 0, "func(fd int, off int64, n int64, flags int) (err error)"},
+		{"SysProcAttr", Type, 0, ""},
+		{"SysProcAttr.AdditionalInheritedHandles", Field, 17, ""},
+		{"SysProcAttr.AmbientCaps", Field, 9, ""},
+		{"SysProcAttr.CgroupFD", Field, 20, ""},
+		{"SysProcAttr.Chroot", Field, 0, ""},
+		{"SysProcAttr.Cloneflags", Field, 2, ""},
+		{"SysProcAttr.CmdLine", Field, 0, ""},
+		{"SysProcAttr.CreationFlags", Field, 1, ""},
+		{"SysProcAttr.Credential", Field, 0, ""},
+		{"SysProcAttr.Ctty", Field, 1, ""},
+		{"SysProcAttr.Foreground", Field, 5, ""},
+		{"SysProcAttr.GidMappings", Field, 4, ""},
+		{"SysProcAttr.GidMappingsEnableSetgroups", Field, 5, ""},
+		{"SysProcAttr.HideWindow", Field, 0, ""},
+		{"SysProcAttr.Jail", Field, 21, ""},
+		{"SysProcAttr.NoInheritHandles", Field, 16, ""},
+		{"SysProcAttr.Noctty", Field, 0, ""},
+		{"SysProcAttr.ParentProcess", Field, 17, ""},
+		{"SysProcAttr.Pdeathsig", Field, 0, ""},
+		{"SysProcAttr.Pgid", Field, 5, ""},
+		{"SysProcAttr.PidFD", Field, 22, ""},
+		{"SysProcAttr.ProcessAttributes", Field, 13, ""},
+		{"SysProcAttr.Ptrace", Field, 0, ""},
+		{"SysProcAttr.Setctty", Field, 0, ""},
+		{"SysProcAttr.Setpgid", Field, 0, ""},
+		{"SysProcAttr.Setsid", Field, 0, ""},
+		{"SysProcAttr.ThreadAttributes", Field, 13, ""},
+		{"SysProcAttr.Token", Field, 10, ""},
+		{"SysProcAttr.UidMappings", Field, 4, ""},
+		{"SysProcAttr.Unshareflags", Field, 7, ""},
+		{"SysProcAttr.UseCgroupFD", Field, 20, ""},
+		{"SysProcIDMap", Type, 4, ""},
+		{"SysProcIDMap.ContainerID", Field, 4, ""},
+		{"SysProcIDMap.HostID", Field, 4, ""},
+		{"SysProcIDMap.Size", Field, 4, ""},
+		{"Syscall", Func, 0, "func(trap uintptr, a1 uintptr, a2 uintptr, a3 uintptr) (r1 uintptr, r2 uintptr, err Errno)"},
+		{"Syscall12", Func, 0, ""},
+		{"Syscall15", Func, 0, ""},
+		{"Syscall18", Func, 12, ""},
+		{"Syscall6", Func, 0, "func(trap uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) (r1 uintptr, r2 uintptr, err Errno)"},
+		{"Syscall9", Func, 0, ""},
+		{"SyscallN", Func, 18, ""},
+		{"Sysctl", Func, 0, ""},
+		{"SysctlUint32", Func, 0, ""},
+		{"Sysctlnode", Type, 2, ""},
+		{"Sysctlnode.Flags", Field, 2, ""},
+		{"Sysctlnode.Name", Field, 2, ""},
+		{"Sysctlnode.Num", Field, 2, ""},
+		{"Sysctlnode.Un", Field, 2, ""},
+		{"Sysctlnode.Ver", Field, 2, ""},
+		{"Sysctlnode.X__rsvd", Field, 2, ""},
+		{"Sysctlnode.X_sysctl_desc", Field, 2, ""},
+		{"Sysctlnode.X_sysctl_func", Field, 2, ""},
+		{"Sysctlnode.X_sysctl_parent", Field, 2, ""},
+		{"Sysctlnode.X_sysctl_size", Field, 2, ""},
+		{"Sysinfo", Func, 0, "func(info *Sysinfo_t) (err error)"},
+		{"Sysinfo_t", Type, 0, ""},
+		{"Sysinfo_t.Bufferram", Field, 0, ""},
+		{"Sysinfo_t.Freehigh", Field, 0, ""},
+		{"Sysinfo_t.Freeram", Field, 0, ""},
+		{"Sysinfo_t.Freeswap", Field, 0, ""},
+		{"Sysinfo_t.Loads", Field, 0, ""},
+		{"Sysinfo_t.Pad", Field, 0, ""},
+		{"Sysinfo_t.Pad_cgo_0", Field, 0, ""},
+		{"Sysinfo_t.Pad_cgo_1", Field, 0, ""},
+		{"Sysinfo_t.Procs", Field, 0, ""},
+		{"Sysinfo_t.Sharedram", Field, 0, ""},
+		{"Sysinfo_t.Totalhigh", Field, 0, ""},
+		{"Sysinfo_t.Totalram", Field, 0, ""},
+		{"Sysinfo_t.Totalswap", Field, 0, ""},
+		{"Sysinfo_t.Unit", Field, 0, ""},
+		{"Sysinfo_t.Uptime", Field, 0, ""},
+		{"Sysinfo_t.X_f", Field, 0, ""},
+		{"Systemtime", Type, 0, ""},
+		{"Systemtime.Day", Field, 0, ""},
+		{"Systemtime.DayOfWeek", Field, 0, ""},
+		{"Systemtime.Hour", Field, 0, ""},
+		{"Systemtime.Milliseconds", Field, 0, ""},
+		{"Systemtime.Minute", Field, 0, ""},
+		{"Systemtime.Month", Field, 0, ""},
+		{"Systemtime.Second", Field, 0, ""},
+		{"Systemtime.Year", Field, 0, ""},
+		{"TCGETS", Const, 0, ""},
+		{"TCIFLUSH", Const, 1, ""},
+		{"TCIOFLUSH", Const, 1, ""},
+		{"TCOFLUSH", Const, 1, ""},
+		{"TCPInfo", Type, 1, ""},
+		{"TCPInfo.Advmss", Field, 1, ""},
+		{"TCPInfo.Ato", Field, 1, ""},
+		{"TCPInfo.Backoff", Field, 1, ""},
+		{"TCPInfo.Ca_state", Field, 1, ""},
+		{"TCPInfo.Fackets", Field, 1, ""},
+		{"TCPInfo.Last_ack_recv", Field, 1, ""},
+		{"TCPInfo.Last_ack_sent", Field, 1, ""},
+		{"TCPInfo.Last_data_recv", Field, 1, ""},
+		{"TCPInfo.Last_data_sent", Field, 1, ""},
+		{"TCPInfo.Lost", Field, 1, ""},
+		{"TCPInfo.Options", Field, 1, ""},
+		{"TCPInfo.Pad_cgo_0", Field, 1, ""},
+		{"TCPInfo.Pmtu", Field, 1, ""},
+		{"TCPInfo.Probes", Field, 1, ""},
+		{"TCPInfo.Rcv_mss", Field, 1, ""},
+		{"TCPInfo.Rcv_rtt", Field, 1, ""},
+		{"TCPInfo.Rcv_space", Field, 1, ""},
+		{"TCPInfo.Rcv_ssthresh", Field, 1, ""},
+		{"TCPInfo.Reordering", Field, 1, ""},
+		{"TCPInfo.Retrans", Field, 1, ""},
+		{"TCPInfo.Retransmits", Field, 1, ""},
+		{"TCPInfo.Rto", Field, 1, ""},
+		{"TCPInfo.Rtt", Field, 1, ""},
+		{"TCPInfo.Rttvar", Field, 1, ""},
+		{"TCPInfo.Sacked", Field, 1, ""},
+		{"TCPInfo.Snd_cwnd", Field, 1, ""},
+		{"TCPInfo.Snd_mss", Field, 1, ""},
+		{"TCPInfo.Snd_ssthresh", Field, 1, ""},
+		{"TCPInfo.State", Field, 1, ""},
+		{"TCPInfo.Total_retrans", Field, 1, ""},
+		{"TCPInfo.Unacked", Field, 1, ""},
+		{"TCPKeepalive", Type, 3, ""},
+		{"TCPKeepalive.Interval", Field, 3, ""},
+		{"TCPKeepalive.OnOff", Field, 3, ""},
+		{"TCPKeepalive.Time", Field, 3, ""},
+		{"TCP_CA_NAME_MAX", Const, 0, ""},
+		{"TCP_CONGCTL", Const, 1, ""},
+		{"TCP_CONGESTION", Const, 0, ""},
+		{"TCP_CONNECTIONTIMEOUT", Const, 0, ""},
+		{"TCP_CORK", Const, 0, ""},
+		{"TCP_DEFER_ACCEPT", Const, 0, ""},
+		{"TCP_ENABLE_ECN", Const, 16, ""},
+		{"TCP_INFO", Const, 0, ""},
+		{"TCP_KEEPALIVE", Const, 0, ""},
+		{"TCP_KEEPCNT", Const, 0, ""},
+		{"TCP_KEEPIDLE", Const, 0, ""},
+		{"TCP_KEEPINIT", Const, 1, ""},
+		{"TCP_KEEPINTVL", Const, 0, ""},
+		{"TCP_LINGER2", Const, 0, ""},
+		{"TCP_MAXBURST", Const, 0, ""},
+		{"TCP_MAXHLEN", Const, 0, ""},
+		{"TCP_MAXOLEN", Const, 0, ""},
+		{"TCP_MAXSEG", Const, 0, ""},
+		{"TCP_MAXWIN", Const, 0, ""},
+		{"TCP_MAX_SACK", Const, 0, ""},
+		{"TCP_MAX_WINSHIFT", Const, 0, ""},
+		{"TCP_MD5SIG", Const, 0, ""},
+		{"TCP_MD5SIG_MAXKEYLEN", Const, 0, ""},
+		{"TCP_MINMSS", Const, 0, ""},
+		{"TCP_MINMSSOVERLOAD", Const, 0, ""},
+		{"TCP_MSS", Const, 0, ""},
+		{"TCP_NODELAY", Const, 0, ""},
+		{"TCP_NOOPT", Const, 0, ""},
+		{"TCP_NOPUSH", Const, 0, ""},
+		{"TCP_NOTSENT_LOWAT", Const, 16, ""},
+		{"TCP_NSTATES", Const, 1, ""},
+		{"TCP_QUICKACK", Const, 0, ""},
+		{"TCP_RXT_CONNDROPTIME", Const, 0, ""},
+		{"TCP_RXT_FINDROP", Const, 0, ""},
+		{"TCP_SACK_ENABLE", Const, 1, ""},
+		{"TCP_SENDMOREACKS", Const, 16, ""},
+		{"TCP_SYNCNT", Const, 0, ""},
+		{"TCP_VENDOR", Const, 3, ""},
+		{"TCP_WINDOW_CLAMP", Const, 0, ""},
+		{"TCSAFLUSH", Const, 1, ""},
+		{"TCSETS", Const, 0, ""},
+		{"TF_DISCONNECT", Const, 0, ""},
+		{"TF_REUSE_SOCKET", Const, 0, ""},
+		{"TF_USE_DEFAULT_WORKER", Const, 0, ""},
+		{"TF_USE_KERNEL_APC", Const, 0, ""},
+		{"TF_USE_SYSTEM_THREAD", Const, 0, ""},
+		{"TF_WRITE_BEHIND", Const, 0, ""},
+		{"TH32CS_INHERIT", Const, 4, ""},
+		{"TH32CS_SNAPALL", Const, 4, ""},
+		{"TH32CS_SNAPHEAPLIST", Const, 4, ""},
+		{"TH32CS_SNAPMODULE", Const, 4, ""},
+		{"TH32CS_SNAPMODULE32", Const, 4, ""},
+		{"TH32CS_SNAPPROCESS", Const, 4, ""},
+		{"TH32CS_SNAPTHREAD", Const, 4, ""},
+		{"TIME_ZONE_ID_DAYLIGHT", Const, 0, ""},
+		{"TIME_ZONE_ID_STANDARD", Const, 0, ""},
+		{"TIME_ZONE_ID_UNKNOWN", Const, 0, ""},
+		{"TIOCCBRK", Const, 0, ""},
+		{"TIOCCDTR", Const, 0, ""},
+		{"TIOCCONS", Const, 0, ""},
+		{"TIOCDCDTIMESTAMP", Const, 0, ""},
+		{"TIOCDRAIN", Const, 0, ""},
+		{"TIOCDSIMICROCODE", Const, 0, ""},
+		{"TIOCEXCL", Const, 0, ""},
+		{"TIOCEXT", Const, 0, ""},
+		{"TIOCFLAG_CDTRCTS", Const, 1, ""},
+		{"TIOCFLAG_CLOCAL", Const, 1, ""},
+		{"TIOCFLAG_CRTSCTS", Const, 1, ""},
+		{"TIOCFLAG_MDMBUF", Const, 1, ""},
+		{"TIOCFLAG_PPS", Const, 1, ""},
+		{"TIOCFLAG_SOFTCAR", Const, 1, ""},
+		{"TIOCFLUSH", Const, 0, ""},
+		{"TIOCGDEV", Const, 0, ""},
+		{"TIOCGDRAINWAIT", Const, 0, ""},
+		{"TIOCGETA", Const, 0, ""},
+		{"TIOCGETD", Const, 0, ""},
+		{"TIOCGFLAGS", Const, 1, ""},
+		{"TIOCGICOUNT", Const, 0, ""},
+		{"TIOCGLCKTRMIOS", Const, 0, ""},
+		{"TIOCGLINED", Const, 1, ""},
+		{"TIOCGPGRP", Const, 0, ""},
+		{"TIOCGPTN", Const, 0, ""},
+		{"TIOCGQSIZE", Const, 1, ""},
+		{"TIOCGRANTPT", Const, 1, ""},
+		{"TIOCGRS485", Const, 0, ""},
+		{"TIOCGSERIAL", Const, 0, ""},
+		{"TIOCGSID", Const, 0, ""},
+		{"TIOCGSIZE", Const, 1, ""},
+		{"TIOCGSOFTCAR", Const, 0, ""},
+		{"TIOCGTSTAMP", Const, 1, ""},
+		{"TIOCGWINSZ", Const, 0, ""},
+		{"TIOCINQ", Const, 0, ""},
+		{"TIOCIXOFF", Const, 0, ""},
+		{"TIOCIXON", Const, 0, ""},
+		{"TIOCLINUX", Const, 0, ""},
+		{"TIOCMBIC", Const, 0, ""},
+		{"TIOCMBIS", Const, 0, ""},
+		{"TIOCMGDTRWAIT", Const, 0, ""},
+		{"TIOCMGET", Const, 0, ""},
+		{"TIOCMIWAIT", Const, 0, ""},
+		{"TIOCMODG", Const, 0, ""},
+		{"TIOCMODS", Const, 0, ""},
+		{"TIOCMSDTRWAIT", Const, 0, ""},
+		{"TIOCMSET", Const, 0, ""},
+		{"TIOCM_CAR", Const, 0, ""},
+		{"TIOCM_CD", Const, 0, ""},
+		{"TIOCM_CTS", Const, 0, ""},
+		{"TIOCM_DCD", Const, 0, ""},
+		{"TIOCM_DSR", Const, 0, ""},
+		{"TIOCM_DTR", Const, 0, ""},
+		{"TIOCM_LE", Const, 0, ""},
+		{"TIOCM_RI", Const, 0, ""},
+		{"TIOCM_RNG", Const, 0, ""},
+		{"TIOCM_RTS", Const, 0, ""},
+		{"TIOCM_SR", Const, 0, ""},
+		{"TIOCM_ST", Const, 0, ""},
+		{"TIOCNOTTY", Const, 0, ""},
+		{"TIOCNXCL", Const, 0, ""},
+		{"TIOCOUTQ", Const, 0, ""},
+		{"TIOCPKT", Const, 0, ""},
+		{"TIOCPKT_DATA", Const, 0, ""},
+		{"TIOCPKT_DOSTOP", Const, 0, ""},
+		{"TIOCPKT_FLUSHREAD", Const, 0, ""},
+		{"TIOCPKT_FLUSHWRITE", Const, 0, ""},
+		{"TIOCPKT_IOCTL", Const, 0, ""},
+		{"TIOCPKT_NOSTOP", Const, 0, ""},
+		{"TIOCPKT_START", Const, 0, ""},
+		{"TIOCPKT_STOP", Const, 0, ""},
+		{"TIOCPTMASTER", Const, 0, ""},
+		{"TIOCPTMGET", Const, 1, ""},
+		{"TIOCPTSNAME", Const, 1, ""},
+		{"TIOCPTYGNAME", Const, 0, ""},
+		{"TIOCPTYGRANT", Const, 0, ""},
+		{"TIOCPTYUNLK", Const, 0, ""},
+		{"TIOCRCVFRAME", Const, 1, ""},
+		{"TIOCREMOTE", Const, 0, ""},
+		{"TIOCSBRK", Const, 0, ""},
+		{"TIOCSCONS", Const, 0, ""},
+		{"TIOCSCTTY", Const, 0, ""},
+		{"TIOCSDRAINWAIT", Const, 0, ""},
+		{"TIOCSDTR", Const, 0, ""},
+		{"TIOCSERCONFIG", Const, 0, ""},
+		{"TIOCSERGETLSR", Const, 0, ""},
+		{"TIOCSERGETMULTI", Const, 0, ""},
+		{"TIOCSERGSTRUCT", Const, 0, ""},
+		{"TIOCSERGWILD", Const, 0, ""},
+		{"TIOCSERSETMULTI", Const, 0, ""},
+		{"TIOCSERSWILD", Const, 0, ""},
+		{"TIOCSER_TEMT", Const, 0, ""},
+		{"TIOCSETA", Const, 0, ""},
+		{"TIOCSETAF", Const, 0, ""},
+		{"TIOCSETAW", Const, 0, ""},
+		{"TIOCSETD", Const, 0, ""},
+		{"TIOCSFLAGS", Const, 1, ""},
+		{"TIOCSIG", Const, 0, ""},
+		{"TIOCSLCKTRMIOS", Const, 0, ""},
+		{"TIOCSLINED", Const, 1, ""},
+		{"TIOCSPGRP", Const, 0, ""},
+		{"TIOCSPTLCK", Const, 0, ""},
+		{"TIOCSQSIZE", Const, 1, ""},
+		{"TIOCSRS485", Const, 0, ""},
+		{"TIOCSSERIAL", Const, 0, ""},
+		{"TIOCSSIZE", Const, 1, ""},
+		{"TIOCSSOFTCAR", Const, 0, ""},
+		{"TIOCSTART", Const, 0, ""},
+		{"TIOCSTAT", Const, 0, ""},
+		{"TIOCSTI", Const, 0, ""},
+		{"TIOCSTOP", Const, 0, ""},
+		{"TIOCSTSTAMP", Const, 1, ""},
+		{"TIOCSWINSZ", Const, 0, ""},
+		{"TIOCTIMESTAMP", Const, 0, ""},
+		{"TIOCUCNTL", Const, 0, ""},
+		{"TIOCVHANGUP", Const, 0, ""},
+		{"TIOCXMTFRAME", Const, 1, ""},
+		{"TOKEN_ADJUST_DEFAULT", Const, 0, ""},
+		{"TOKEN_ADJUST_GROUPS", Const, 0, ""},
+		{"TOKEN_ADJUST_PRIVILEGES", Const, 0, ""},
+		{"TOKEN_ADJUST_SESSIONID", Const, 11, ""},
+		{"TOKEN_ALL_ACCESS", Const, 0, ""},
+		{"TOKEN_ASSIGN_PRIMARY", Const, 0, ""},
+		{"TOKEN_DUPLICATE", Const, 0, ""},
+		{"TOKEN_EXECUTE", Const, 0, ""},
+		{"TOKEN_IMPERSONATE", Const, 0, ""},
+		{"TOKEN_QUERY", Const, 0, ""},
+		{"TOKEN_QUERY_SOURCE", Const, 0, ""},
+		{"TOKEN_READ", Const, 0, ""},
+		{"TOKEN_WRITE", Const, 0, ""},
+		{"TOSTOP", Const, 0, ""},
+		{"TRUNCATE_EXISTING", Const, 0, ""},
+		{"TUNATTACHFILTER", Const, 0, ""},
+		{"TUNDETACHFILTER", Const, 0, ""},
+		{"TUNGETFEATURES", Const, 0, ""},
+		{"TUNGETIFF", Const, 0, ""},
+		{"TUNGETSNDBUF", Const, 0, ""},
+		{"TUNGETVNETHDRSZ", Const, 0, ""},
+		{"TUNSETDEBUG", Const, 0, ""},
+		{"TUNSETGROUP", Const, 0, ""},
+		{"TUNSETIFF", Const, 0, ""},
+		{"TUNSETLINK", Const, 0, ""},
+		{"TUNSETNOCSUM", Const, 0, ""},
+		{"TUNSETOFFLOAD", Const, 0, ""},
+		{"TUNSETOWNER", Const, 0, ""},
+		{"TUNSETPERSIST", Const, 0, ""},
+		{"TUNSETSNDBUF", Const, 0, ""},
+		{"TUNSETTXFILTER", Const, 0, ""},
+		{"TUNSETVNETHDRSZ", Const, 0, ""},
+		{"Tee", Func, 0, "func(rfd int, wfd int, len int, flags int) (n int64, err error)"},
+		{"TerminateProcess", Func, 0, ""},
+		{"Termios", Type, 0, ""},
+		{"Termios.Cc", Field, 0, ""},
+		{"Termios.Cflag", Field, 0, ""},
+		{"Termios.Iflag", Field, 0, ""},
+		{"Termios.Ispeed", Field, 0, ""},
+		{"Termios.Lflag", Field, 0, ""},
+		{"Termios.Line", Field, 0, ""},
+		{"Termios.Oflag", Field, 0, ""},
+		{"Termios.Ospeed", Field, 0, ""},
+		{"Termios.Pad_cgo_0", Field, 0, ""},
+		{"Tgkill", Func, 0, "func(tgid int, tid int, sig Signal) (err error)"},
+		{"Time", Func, 0, "func(t *Time_t) (tt Time_t, err error)"},
+		{"Time_t", Type, 0, ""},
+		{"Times", Func, 0, "func(tms *Tms) (ticks uintptr, err error)"},
+		{"Timespec", Type, 0, ""},
+		{"Timespec.Nsec", Field, 0, ""},
+		{"Timespec.Pad_cgo_0", Field, 2, ""},
+		{"Timespec.Sec", Field, 0, ""},
+		{"TimespecToNsec", Func, 0, "func(ts Timespec) int64"},
+		{"Timeval", Type, 0, ""},
+		{"Timeval.Pad_cgo_0", Field, 0, ""},
+		{"Timeval.Sec", Field, 0, ""},
+		{"Timeval.Usec", Field, 0, ""},
+		{"Timeval32", Type, 0, ""},
+		{"Timeval32.Sec", Field, 0, ""},
+		{"Timeval32.Usec", Field, 0, ""},
+		{"TimevalToNsec", Func, 0, "func(tv Timeval) int64"},
+		{"Timex", Type, 0, ""},
+		{"Timex.Calcnt", Field, 0, ""},
+		{"Timex.Constant", Field, 0, ""},
+		{"Timex.Errcnt", Field, 0, ""},
+		{"Timex.Esterror", Field, 0, ""},
+		{"Timex.Freq", Field, 0, ""},
+		{"Timex.Jitcnt", Field, 0, ""},
+		{"Timex.Jitter", Field, 0, ""},
+		{"Timex.Maxerror", Field, 0, ""},
+		{"Timex.Modes", Field, 0, ""},
+		{"Timex.Offset", Field, 0, ""},
+		{"Timex.Pad_cgo_0", Field, 0, ""},
+		{"Timex.Pad_cgo_1", Field, 0, ""},
+		{"Timex.Pad_cgo_2", Field, 0, ""},
+		{"Timex.Pad_cgo_3", Field, 0, ""},
+		{"Timex.Ppsfreq", Field, 0, ""},
+		{"Timex.Precision", Field, 0, ""},
+		{"Timex.Shift", Field, 0, ""},
+		{"Timex.Stabil", Field, 0, ""},
+		{"Timex.Status", Field, 0, ""},
+		{"Timex.Stbcnt", Field, 0, ""},
+		{"Timex.Tai", Field, 0, ""},
+		{"Timex.Tick", Field, 0, ""},
+		{"Timex.Time", Field, 0, ""},
+		{"Timex.Tolerance", Field, 0, ""},
+		{"Timezoneinformation", Type, 0, ""},
+		{"Timezoneinformation.Bias", Field, 0, ""},
+		{"Timezoneinformation.DaylightBias", Field, 0, ""},
+		{"Timezoneinformation.DaylightDate", Field, 0, ""},
+		{"Timezoneinformation.DaylightName", Field, 0, ""},
+		{"Timezoneinformation.StandardBias", Field, 0, ""},
+		{"Timezoneinformation.StandardDate", Field, 0, ""},
+		{"Timezoneinformation.StandardName", Field, 0, ""},
+		{"Tms", Type, 0, ""},
+		{"Tms.Cstime", Field, 0, ""},
+		{"Tms.Cutime", Field, 0, ""},
+		{"Tms.Stime", Field, 0, ""},
+		{"Tms.Utime", Field, 0, ""},
+		{"Token", Type, 0, ""},
+		{"TokenAccessInformation", Const, 0, ""},
+		{"TokenAuditPolicy", Const, 0, ""},
+		{"TokenDefaultDacl", Const, 0, ""},
+		{"TokenElevation", Const, 0, ""},
+		{"TokenElevationType", Const, 0, ""},
+		{"TokenGroups", Const, 0, ""},
+		{"TokenGroupsAndPrivileges", Const, 0, ""},
+		{"TokenHasRestrictions", Const, 0, ""},
+		{"TokenImpersonationLevel", Const, 0, ""},
+		{"TokenIntegrityLevel", Const, 0, ""},
+		{"TokenLinkedToken", Const, 0, ""},
+		{"TokenLogonSid", Const, 0, ""},
+		{"TokenMandatoryPolicy", Const, 0, ""},
+		{"TokenOrigin", Const, 0, ""},
+		{"TokenOwner", Const, 0, ""},
+		{"TokenPrimaryGroup", Const, 0, ""},
+		{"TokenPrivileges", Const, 0, ""},
+		{"TokenRestrictedSids", Const, 0, ""},
+		{"TokenSandBoxInert", Const, 0, ""},
+		{"TokenSessionId", Const, 0, ""},
+		{"TokenSessionReference", Const, 0, ""},
+		{"TokenSource", Const, 0, ""},
+		{"TokenStatistics", Const, 0, ""},
+		{"TokenType", Const, 0, ""},
+		{"TokenUIAccess", Const, 0, ""},
+		{"TokenUser", Const, 0, ""},
+		{"TokenVirtualizationAllowed", Const, 0, ""},
+		{"TokenVirtualizationEnabled", Const, 0, ""},
+		{"Tokenprimarygroup", Type, 0, ""},
+		{"Tokenprimarygroup.PrimaryGroup", Field, 0, ""},
+		{"Tokenuser", Type, 0, ""},
+		{"Tokenuser.User", Field, 0, ""},
+		{"TranslateAccountName", Func, 0, ""},
+		{"TranslateName", Func, 0, ""},
+		{"TransmitFile", Func, 0, ""},
+		{"TransmitFileBuffers", Type, 0, ""},
+		{"TransmitFileBuffers.Head", Field, 0, ""},
+		{"TransmitFileBuffers.HeadLength", Field, 0, ""},
+		{"TransmitFileBuffers.Tail", Field, 0, ""},
+		{"TransmitFileBuffers.TailLength", Field, 0, ""},
+		{"Truncate", Func, 0, "func(path string, length int64) (err error)"},
+		{"UNIX_PATH_MAX", Const, 12, ""},
+		{"USAGE_MATCH_TYPE_AND", Const, 0, ""},
+		{"USAGE_MATCH_TYPE_OR", Const, 0, ""},
+		{"UTF16FromString", Func, 1, ""},
+		{"UTF16PtrFromString", Func, 1, ""},
+		{"UTF16ToString", Func, 0, ""},
+		{"Ucred", Type, 0, ""},
+		{"Ucred.Gid", Field, 0, ""},
+		{"Ucred.Pid", Field, 0, ""},
+		{"Ucred.Uid", Field, 0, ""},
+		{"Umask", Func, 0, "func(mask int) (oldmask int)"},
+		{"Uname", Func, 0, "func(buf *Utsname) (err error)"},
+		{"Undelete", Func, 0, ""},
+		{"UnixCredentials", Func, 0, "func(ucred *Ucred) []byte"},
+		{"UnixRights", Func, 0, "func(fds ...int) []byte"},
+		{"Unlink", Func, 0, "func(path string) error"},
+		{"Unlinkat", Func, 0, "func(dirfd int, path string) error"},
+		{"UnmapViewOfFile", Func, 0, ""},
+		{"Unmount", Func, 0, "func(target string, flags int) (err error)"},
+		{"Unsetenv", Func, 4, "func(key string) error"},
+		{"Unshare", Func, 0, "func(flags int) (err error)"},
+		{"UserInfo10", Type, 0, ""},
+		{"UserInfo10.Comment", Field, 0, ""},
+		{"UserInfo10.FullName", Field, 0, ""},
+		{"UserInfo10.Name", Field, 0, ""},
+		{"UserInfo10.UsrComment", Field, 0, ""},
+		{"Ustat", Func, 0, "func(dev int, ubuf *Ustat_t) (err error)"},
+		{"Ustat_t", Type, 0, ""},
+		{"Ustat_t.Fname", Field, 0, ""},
+		{"Ustat_t.Fpack", Field, 0, ""},
+		{"Ustat_t.Pad_cgo_0", Field, 0, ""},
+		{"Ustat_t.Pad_cgo_1", Field, 0, ""},
+		{"Ustat_t.Tfree", Field, 0, ""},
+		{"Ustat_t.Tinode", Field, 0, ""},
+		{"Utimbuf", Type, 0, ""},
+		{"Utimbuf.Actime", Field, 0, ""},
+		{"Utimbuf.Modtime", Field, 0, ""},
+		{"Utime", Func, 0, "func(path string, buf *Utimbuf) (err error)"},
+		{"Utimes", Func, 0, "func(path string, tv []Timeval) (err error)"},
+		{"UtimesNano", Func, 1, "func(path string, ts []Timespec) (err error)"},
+		{"Utsname", Type, 0, ""},
+		{"Utsname.Domainname", Field, 0, ""},
+		{"Utsname.Machine", Field, 0, ""},
+		{"Utsname.Nodename", Field, 0, ""},
+		{"Utsname.Release", Field, 0, ""},
+		{"Utsname.Sysname", Field, 0, ""},
+		{"Utsname.Version", Field, 0, ""},
+		{"VDISCARD", Const, 0, ""},
+		{"VDSUSP", Const, 1, ""},
+		{"VEOF", Const, 0, ""},
+		{"VEOL", Const, 0, ""},
+		{"VEOL2", Const, 0, ""},
+		{"VERASE", Const, 0, ""},
+		{"VERASE2", Const, 1, ""},
+		{"VINTR", Const, 0, ""},
+		{"VKILL", Const, 0, ""},
+		{"VLNEXT", Const, 0, ""},
+		{"VMIN", Const, 0, ""},
+		{"VQUIT", Const, 0, ""},
+		{"VREPRINT", Const, 0, ""},
+		{"VSTART", Const, 0, ""},
+		{"VSTATUS", Const, 1, ""},
+		{"VSTOP", Const, 0, ""},
+		{"VSUSP", Const, 0, ""},
+		{"VSWTC", Const, 0, ""},
+		{"VT0", Const, 1, ""},
+		{"VT1", Const, 1, ""},
+		{"VTDLY", Const, 1, ""},
+		{"VTIME", Const, 0, ""},
+		{"VWERASE", Const, 0, ""},
+		{"VirtualLock", Func, 0, ""},
+		{"VirtualUnlock", Func, 0, ""},
+		{"WAIT_ABANDONED", Const, 0, ""},
+		{"WAIT_FAILED", Const, 0, ""},
+		{"WAIT_OBJECT_0", Const, 0, ""},
+		{"WAIT_TIMEOUT", Const, 0, ""},
+		{"WALL", Const, 0, ""},
+		{"WALLSIG", Const, 1, ""},
+		{"WALTSIG", Const, 1, ""},
+		{"WCLONE", Const, 0, ""},
+		{"WCONTINUED", Const, 0, ""},
+		{"WCOREFLAG", Const, 0, ""},
+		{"WEXITED", Const, 0, ""},
+		{"WLINUXCLONE", Const, 0, ""},
+		{"WNOHANG", Const, 0, ""},
+		{"WNOTHREAD", Const, 0, ""},
+		{"WNOWAIT", Const, 0, ""},
+		{"WNOZOMBIE", Const, 1, ""},
+		{"WOPTSCHECKED", Const, 1, ""},
+		{"WORDSIZE", Const, 0, ""},
+		{"WSABuf", Type, 0, ""},
+		{"WSABuf.Buf", Field, 0, ""},
+		{"WSABuf.Len", Field, 0, ""},
+		{"WSACleanup", Func, 0, ""},
+		{"WSADESCRIPTION_LEN", Const, 0, ""},
+		{"WSAData", Type, 0, ""},
+		{"WSAData.Description", Field, 0, ""},
+		{"WSAData.HighVersion", Field, 0, ""},
+		{"WSAData.MaxSockets", Field, 0, ""},
+		{"WSAData.MaxUdpDg", Field, 0, ""},
+		{"WSAData.SystemStatus", Field, 0, ""},
+		{"WSAData.VendorInfo", Field, 0, ""},
+		{"WSAData.Version", Field, 0, ""},
+		{"WSAEACCES", Const, 2, ""},
+		{"WSAECONNABORTED", Const, 9, ""},
+		{"WSAECONNRESET", Const, 3, ""},
+		{"WSAENOPROTOOPT", Const, 23, ""},
+		{"WSAEnumProtocols", Func, 2, ""},
+		{"WSAID_CONNECTEX", Var, 1, ""},
+		{"WSAIoctl", Func, 0, ""},
+		{"WSAPROTOCOL_LEN", Const, 2, ""},
+		{"WSAProtocolChain", Type, 2, ""},
+		{"WSAProtocolChain.ChainEntries", Field, 2, ""},
+		{"WSAProtocolChain.ChainLen", Field, 2, ""},
+		{"WSAProtocolInfo", Type, 2, ""},
+		{"WSAProtocolInfo.AddressFamily", Field, 2, ""},
+		{"WSAProtocolInfo.CatalogEntryId", Field, 2, ""},
+		{"WSAProtocolInfo.MaxSockAddr", Field, 2, ""},
+		{"WSAProtocolInfo.MessageSize", Field, 2, ""},
+		{"WSAProtocolInfo.MinSockAddr", Field, 2, ""},
+		{"WSAProtocolInfo.NetworkByteOrder", Field, 2, ""},
+		{"WSAProtocolInfo.Protocol", Field, 2, ""},
+		{"WSAProtocolInfo.ProtocolChain", Field, 2, ""},
+		{"WSAProtocolInfo.ProtocolMaxOffset", Field, 2, ""},
+		{"WSAProtocolInfo.ProtocolName", Field, 2, ""},
+		{"WSAProtocolInfo.ProviderFlags", Field, 2, ""},
+		{"WSAProtocolInfo.ProviderId", Field, 2, ""},
+		{"WSAProtocolInfo.ProviderReserved", Field, 2, ""},
+		{"WSAProtocolInfo.SecurityScheme", Field, 2, ""},
+		{"WSAProtocolInfo.ServiceFlags1", Field, 2, ""},
+		{"WSAProtocolInfo.ServiceFlags2", Field, 2, ""},
+		{"WSAProtocolInfo.ServiceFlags3", Field, 2, ""},
+		{"WSAProtocolInfo.ServiceFlags4", Field, 2, ""},
+		{"WSAProtocolInfo.SocketType", Field, 2, ""},
+		{"WSAProtocolInfo.Version", Field, 2, ""},
+		{"WSARecv", Func, 0, ""},
+		{"WSARecvFrom", Func, 0, ""},
+		{"WSASYS_STATUS_LEN", Const, 0, ""},
+		{"WSASend", Func, 0, ""},
+		{"WSASendTo", Func, 0, ""},
+		{"WSASendto", Func, 0, ""},
+		{"WSAStartup", Func, 0, ""},
+		{"WSTOPPED", Const, 0, ""},
+		{"WTRAPPED", Const, 1, ""},
+		{"WUNTRACED", Const, 0, ""},
+		{"Wait4", Func, 0, "func(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error)"},
+		{"WaitForSingleObject", Func, 0, ""},
+		{"WaitStatus", Type, 0, ""},
+		{"WaitStatus.ExitCode", Field, 0, ""},
+		{"Win32FileAttributeData", Type, 0, ""},
+		{"Win32FileAttributeData.CreationTime", Field, 0, ""},
+		{"Win32FileAttributeData.FileAttributes", Field, 0, ""},
+		{"Win32FileAttributeData.FileSizeHigh", Field, 0, ""},
+		{"Win32FileAttributeData.FileSizeLow", Field, 0, ""},
+		{"Win32FileAttributeData.LastAccessTime", Field, 0, ""},
+		{"Win32FileAttributeData.LastWriteTime", Field, 0, ""},
+		{"Win32finddata", Type, 0, ""},
+		{"Win32finddata.AlternateFileName", Field, 0, ""},
+		{"Win32finddata.CreationTime", Field, 0, ""},
+		{"Win32finddata.FileAttributes", Field, 0, ""},
+		{"Win32finddata.FileName", Field, 0, ""},
+		{"Win32finddata.FileSizeHigh", Field, 0, ""},
+		{"Win32finddata.FileSizeLow", Field, 0, ""},
+		{"Win32finddata.LastAccessTime", Field, 0, ""},
+		{"Win32finddata.LastWriteTime", Field, 0, ""},
+		{"Win32finddata.Reserved0", Field, 0, ""},
+		{"Win32finddata.Reserved1", Field, 0, ""},
+		{"Write", Func, 0, "func(fd int, p []byte) (n int, err error)"},
+		{"WriteConsole", Func, 1, ""},
+		{"WriteFile", Func, 0, ""},
+		{"X509_ASN_ENCODING", Const, 0, ""},
+		{"XCASE", Const, 0, ""},
+		{"XP1_CONNECTIONLESS", Const, 2, ""},
+		{"XP1_CONNECT_DATA", Const, 2, ""},
+		{"XP1_DISCONNECT_DATA", Const, 2, ""},
+		{"XP1_EXPEDITED_DATA", Const, 2, ""},
+		{"XP1_GRACEFUL_CLOSE", Const, 2, ""},
+		{"XP1_GUARANTEED_DELIVERY", Const, 2, ""},
+		{"XP1_GUARANTEED_ORDER", Const, 2, ""},
+		{"XP1_IFS_HANDLES", Const, 2, ""},
+		{"XP1_MESSAGE_ORIENTED", Const, 2, ""},
+		{"XP1_MULTIPOINT_CONTROL_PLANE", Const, 2, ""},
+		{"XP1_MULTIPOINT_DATA_PLANE", Const, 2, ""},
+		{"XP1_PARTIAL_MESSAGE", Const, 2, ""},
+		{"XP1_PSEUDO_STREAM", Const, 2, ""},
+		{"XP1_QOS_SUPPORTED", Const, 2, ""},
+		{"XP1_SAN_SUPPORT_SDP", Const, 2, ""},
+		{"XP1_SUPPORT_BROADCAST", Const, 2, ""},
+		{"XP1_SUPPORT_MULTIPOINT", Const, 2, ""},
+		{"XP1_UNI_RECV", Const, 2, ""},
+		{"XP1_UNI_SEND", Const, 2, ""},
+	},
+	"syscall/js": {
+		{"CopyBytesToGo", Func, 0, ""},
+		{"CopyBytesToJS", Func, 0, ""},
+		{"Error", Type, 0, ""},
+		{"Func", Type, 0, ""},
+		{"FuncOf", Func, 0, ""},
+		{"Global", Func, 0, ""},
+		{"Null", Func, 0, ""},
+		{"Type", Type, 0, ""},
+		{"TypeBoolean", Const, 0, ""},
+		{"TypeFunction", Const, 0, ""},
+		{"TypeNull", Const, 0, ""},
+		{"TypeNumber", Const, 0, ""},
+		{"TypeObject", Const, 0, ""},
+		{"TypeString", Const, 0, ""},
+		{"TypeSymbol", Const, 0, ""},
+		{"TypeUndefined", Const, 0, ""},
+		{"Undefined", Func, 0, ""},
+		{"Value", Type, 0, ""},
+		{"ValueError", Type, 0, ""},
+		{"ValueOf", Func, 0, ""},
+	},
+	"testing": {
+		{"(*B).ArtifactDir", Method, 26, ""},
+		{"(*B).Attr", Method, 25, ""},
+		{"(*B).Chdir", Method, 24, ""},
+		{"(*B).Cleanup", Method, 14, ""},
+		{"(*B).Context", Method, 24, ""},
+		{"(*B).Elapsed", Method, 20, ""},
+		{"(*B).Error", Method, 0, ""},
+		{"(*B).Errorf", Method, 0, ""},
+		{"(*B).Fail", Method, 0, ""},
+		{"(*B).FailNow", Method, 0, ""},
+		{"(*B).Failed", Method, 0, ""},
+		{"(*B).Fatal", Method, 0, ""},
+		{"(*B).Fatalf", Method, 0, ""},
+		{"(*B).Helper", Method, 9, ""},
+		{"(*B).Log", Method, 0, ""},
+		{"(*B).Logf", Method, 0, ""},
+		{"(*B).Loop", Method, 24, ""},
+		{"(*B).Name", Method, 8, ""},
+		{"(*B).Output", Method, 25, ""},
+		{"(*B).ReportAllocs", Method, 1, ""},
+		{"(*B).ReportMetric", Method, 13, ""},
+		{"(*B).ResetTimer", Method, 0, ""},
+		{"(*B).Run", Method, 7, ""},
+		{"(*B).RunParallel", Method, 3, ""},
+		{"(*B).SetBytes", Method, 0, ""},
+		{"(*B).SetParallelism", Method, 3, ""},
+		{"(*B).Setenv", Method, 17, ""},
+		{"(*B).Skip", Method, 1, ""},
+		{"(*B).SkipNow", Method, 1, ""},
+		{"(*B).Skipf", Method, 1, ""},
+		{"(*B).Skipped", Method, 1, ""},
+		{"(*B).StartTimer", Method, 0, ""},
+		{"(*B).StopTimer", Method, 0, ""},
+		{"(*B).TempDir", Method, 15, ""},
+		{"(*F).Add", Method, 18, ""},
+		{"(*F).ArtifactDir", Method, 26, ""},
+		{"(*F).Attr", Method, 25, ""},
+		{"(*F).Chdir", Method, 24, ""},
+		{"(*F).Cleanup", Method, 18, ""},
+		{"(*F).Context", Method, 24, ""},
+		{"(*F).Error", Method, 18, ""},
+		{"(*F).Errorf", Method, 18, ""},
+		{"(*F).Fail", Method, 18, ""},
+		{"(*F).FailNow", Method, 18, ""},
+		{"(*F).Failed", Method, 18, ""},
+		{"(*F).Fatal", Method, 18, ""},
+		{"(*F).Fatalf", Method, 18, ""},
+		{"(*F).Fuzz", Method, 18, ""},
+		{"(*F).Helper", Method, 18, ""},
+		{"(*F).Log", Method, 18, ""},
+		{"(*F).Logf", Method, 18, ""},
+		{"(*F).Name", Method, 18, ""},
+		{"(*F).Output", Method, 25, ""},
+		{"(*F).Setenv", Method, 18, ""},
+		{"(*F).Skip", Method, 18, ""},
+		{"(*F).SkipNow", Method, 18, ""},
+		{"(*F).Skipf", Method, 18, ""},
+		{"(*F).Skipped", Method, 18, ""},
+		{"(*F).TempDir", Method, 18, ""},
+		{"(*M).Run", Method, 4, ""},
+		{"(*PB).Next", Method, 3, ""},
+		{"(*T).ArtifactDir", Method, 26, ""},
+		{"(*T).Attr", Method, 25, ""},
+		{"(*T).Chdir", Method, 24, ""},
+		{"(*T).Cleanup", Method, 14, ""},
+		{"(*T).Context", Method, 24, ""},
+		{"(*T).Deadline", Method, 15, ""},
+		{"(*T).Error", Method, 0, ""},
+		{"(*T).Errorf", Method, 0, ""},
+		{"(*T).Fail", Method, 0, ""},
+		{"(*T).FailNow", Method, 0, ""},
+		{"(*T).Failed", Method, 0, ""},
+		{"(*T).Fatal", Method, 0, ""},
+		{"(*T).Fatalf", Method, 0, ""},
+		{"(*T).Helper", Method, 9, ""},
+		{"(*T).Log", Method, 0, ""},
+		{"(*T).Logf", Method, 0, ""},
+		{"(*T).Name", Method, 8, ""},
+		{"(*T).Output", Method, 25, ""},
+		{"(*T).Parallel", Method, 0, ""},
+		{"(*T).Run", Method, 7, ""},
+		{"(*T).Setenv", Method, 17, ""},
+		{"(*T).Skip", Method, 1, ""},
+		{"(*T).SkipNow", Method, 1, ""},
+		{"(*T).Skipf", Method, 1, ""},
+		{"(*T).Skipped", Method, 1, ""},
+		{"(*T).TempDir", Method, 15, ""},
+		{"(BenchmarkResult).AllocedBytesPerOp", Method, 1, ""},
+		{"(BenchmarkResult).AllocsPerOp", Method, 1, ""},
+		{"(BenchmarkResult).MemString", Method, 1, ""},
+		{"(BenchmarkResult).NsPerOp", Method, 0, ""},
+		{"(BenchmarkResult).String", Method, 0, ""},
+		{"AllocsPerRun", Func, 1, "func(runs int, f func()) (avg float64)"},
+		{"B", Type, 0, ""},
+		{"B.N", Field, 0, ""},
+		{"Benchmark", Func, 0, "func(f func(b *B)) BenchmarkResult"},
+		{"BenchmarkResult", Type, 0, ""},
+		{"BenchmarkResult.Bytes", Field, 0, ""},
+		{"BenchmarkResult.Extra", Field, 13, ""},
+		{"BenchmarkResult.MemAllocs", Field, 1, ""},
+		{"BenchmarkResult.MemBytes", Field, 1, ""},
+		{"BenchmarkResult.N", Field, 0, ""},
+		{"BenchmarkResult.T", Field, 0, ""},
+		{"Cover", Type, 2, ""},
+		{"Cover.Blocks", Field, 2, ""},
+		{"Cover.Counters", Field, 2, ""},
+		{"Cover.CoveredPackages", Field, 2, ""},
+		{"Cover.Mode", Field, 2, ""},
+		{"CoverBlock", Type, 2, ""},
+		{"CoverBlock.Col0", Field, 2, ""},
+		{"CoverBlock.Col1", Field, 2, ""},
+		{"CoverBlock.Line0", Field, 2, ""},
+		{"CoverBlock.Line1", Field, 2, ""},
+		{"CoverBlock.Stmts", Field, 2, ""},
+		{"CoverMode", Func, 8, "func() string"},
+		{"Coverage", Func, 4, "func() float64"},
+		{"F", Type, 18, ""},
+		{"Init", Func, 13, "func()"},
+		{"InternalBenchmark", Type, 0, ""},
+		{"InternalBenchmark.F", Field, 0, ""},
+		{"InternalBenchmark.Name", Field, 0, ""},
+		{"InternalExample", Type, 0, ""},
+		{"InternalExample.F", Field, 0, ""},
+		{"InternalExample.Name", Field, 0, ""},
+		{"InternalExample.Output", Field, 0, ""},
+		{"InternalExample.Unordered", Field, 7, ""},
+		{"InternalFuzzTarget", Type, 18, ""},
+		{"InternalFuzzTarget.Fn", Field, 18, ""},
+		{"InternalFuzzTarget.Name", Field, 18, ""},
+		{"InternalTest", Type, 0, ""},
+		{"InternalTest.F", Field, 0, ""},
+		{"InternalTest.Name", Field, 0, ""},
+		{"M", Type, 4, ""},
+		{"Main", Func, 0, "func(matchString func(pat string, str string) (bool, error), tests []InternalTest, benchmarks []InternalBenchmark, examples []InternalExample)"},
+		{"MainStart", Func, 4, "func(deps testDeps, tests []InternalTest, benchmarks []InternalBenchmark, fuzzTargets []InternalFuzzTarget, examples []InternalExample) *M"},
+		{"PB", Type, 3, ""},
+		{"RegisterCover", Func, 2, "func(c Cover)"},
+		{"RunBenchmarks", Func, 0, "func(matchString func(pat string, str string) (bool, error), benchmarks []InternalBenchmark)"},
+		{"RunExamples", Func, 0, "func(matchString func(pat string, str string) (bool, error), examples []InternalExample) (ok bool)"},
+		{"RunTests", Func, 0, "func(matchString func(pat string, str string) (bool, error), tests []InternalTest) (ok bool)"},
+		{"Short", Func, 0, "func() bool"},
+		{"T", Type, 0, ""},
+		{"TB", Type, 2, ""},
+		{"Testing", Func, 21, "func() bool"},
+		{"Verbose", Func, 1, "func() bool"},
+	},
+	"testing/fstest": {
+		{"(MapFS).Glob", Method, 16, ""},
+		{"(MapFS).Lstat", Method, 25, ""},
+		{"(MapFS).Open", Method, 16, ""},
+		{"(MapFS).ReadDir", Method, 16, ""},
+		{"(MapFS).ReadFile", Method, 16, ""},
+		{"(MapFS).ReadLink", Method, 25, ""},
+		{"(MapFS).Stat", Method, 16, ""},
+		{"(MapFS).Sub", Method, 16, ""},
+		{"MapFS", Type, 16, ""},
+		{"MapFile", Type, 16, ""},
+		{"MapFile.Data", Field, 16, ""},
+		{"MapFile.ModTime", Field, 16, ""},
+		{"MapFile.Mode", Field, 16, ""},
+		{"MapFile.Sys", Field, 16, ""},
+		{"TestFS", Func, 16, "func(fsys fs.FS, expected ...string) error"},
+	},
+	"testing/iotest": {
+		{"DataErrReader", Func, 0, "func(r io.Reader) io.Reader"},
+		{"ErrReader", Func, 16, "func(err error) io.Reader"},
+		{"ErrTimeout", Var, 0, ""},
+		{"HalfReader", Func, 0, "func(r io.Reader) io.Reader"},
+		{"NewReadLogger", Func, 0, "func(prefix string, r io.Reader) io.Reader"},
+		{"NewWriteLogger", Func, 0, "func(prefix string, w io.Writer) io.Writer"},
+		{"OneByteReader", Func, 0, "func(r io.Reader) io.Reader"},
+		{"TestReader", Func, 16, "func(r io.Reader, content []byte) error"},
+		{"TimeoutReader", Func, 0, "func(r io.Reader) io.Reader"},
+		{"TruncateWriter", Func, 0, "func(w io.Writer, n int64) io.Writer"},
+	},
+	"testing/quick": {
+		{"(*CheckEqualError).Error", Method, 0, ""},
+		{"(*CheckError).Error", Method, 0, ""},
+		{"(SetupError).Error", Method, 0, ""},
+		{"Check", Func, 0, "func(f any, config *Config) error"},
+		{"CheckEqual", Func, 0, "func(f any, g any, config *Config) error"},
+		{"CheckEqualError", Type, 0, ""},
+		{"CheckEqualError.CheckError", Field, 0, ""},
+		{"CheckEqualError.Out1", Field, 0, ""},
+		{"CheckEqualError.Out2", Field, 0, ""},
+		{"CheckError", Type, 0, ""},
+		{"CheckError.Count", Field, 0, ""},
+		{"CheckError.In", Field, 0, ""},
+		{"Config", Type, 0, ""},
+		{"Config.MaxCount", Field, 0, ""},
+		{"Config.MaxCountScale", Field, 0, ""},
+		{"Config.Rand", Field, 0, ""},
+		{"Config.Values", Field, 0, ""},
+		{"Generator", Type, 0, ""},
+		{"SetupError", Type, 0, ""},
+		{"Value", Func, 0, "func(t reflect.Type, rand *rand.Rand) (value reflect.Value, ok bool)"},
+	},
+	"testing/slogtest": {
+		{"Run", Func, 22, "func(t *testing.T, newHandler func(*testing.T) slog.Handler, result func(*testing.T) map[string]any)"},
+		{"TestHandler", Func, 21, "func(h slog.Handler, results func() []map[string]any) error"},
+	},
+	"testing/synctest": {
+		{"Test", Func, 25, "func(t *testing.T, f func(*testing.T))"},
+		{"Wait", Func, 25, "func()"},
+	},
+	"text/scanner": {
+		{"(*Position).IsValid", Method, 0, ""},
+		{"(*Scanner).Init", Method, 0, ""},
+		{"(*Scanner).IsValid", Method, 0, ""},
+		{"(*Scanner).Next", Method, 0, ""},
+		{"(*Scanner).Peek", Method, 0, ""},
+		{"(*Scanner).Pos", Method, 0, ""},
+		{"(*Scanner).Scan", Method, 0, ""},
+		{"(*Scanner).TokenText", Method, 0, ""},
+		{"(Position).String", Method, 0, ""},
+		{"(Scanner).String", Method, 0, ""},
+		{"Char", Const, 0, ""},
+		{"Comment", Const, 0, ""},
+		{"EOF", Const, 0, ""},
+		{"Float", Const, 0, ""},
+		{"GoTokens", Const, 0, ""},
+		{"GoWhitespace", Const, 0, ""},
+		{"Ident", Const, 0, ""},
+		{"Int", Const, 0, ""},
+		{"Position", Type, 0, ""},
+		{"Position.Column", Field, 0, ""},
+		{"Position.Filename", Field, 0, ""},
+		{"Position.Line", Field, 0, ""},
+		{"Position.Offset", Field, 0, ""},
+		{"RawString", Const, 0, ""},
+		{"ScanChars", Const, 0, ""},
+		{"ScanComments", Const, 0, ""},
+		{"ScanFloats", Const, 0, ""},
+		{"ScanIdents", Const, 0, ""},
+		{"ScanInts", Const, 0, ""},
+		{"ScanRawStrings", Const, 0, ""},
+		{"ScanStrings", Const, 0, ""},
+		{"Scanner", Type, 0, ""},
+		{"Scanner.Error", Field, 0, ""},
+		{"Scanner.ErrorCount", Field, 0, ""},
+		{"Scanner.IsIdentRune", Field, 4, ""},
+		{"Scanner.Mode", Field, 0, ""},
+		{"Scanner.Position", Field, 0, ""},
+		{"Scanner.Whitespace", Field, 0, ""},
+		{"SkipComments", Const, 0, ""},
+		{"String", Const, 0, ""},
+		{"TokenString", Func, 0, "func(tok rune) string"},
+	},
+	"text/tabwriter": {
+		{"(*Writer).Flush", Method, 0, ""},
+		{"(*Writer).Init", Method, 0, ""},
+		{"(*Writer).Write", Method, 0, ""},
+		{"AlignRight", Const, 0, ""},
+		{"Debug", Const, 0, ""},
+		{"DiscardEmptyColumns", Const, 0, ""},
+		{"Escape", Const, 0, ""},
+		{"FilterHTML", Const, 0, ""},
+		{"NewWriter", Func, 0, "func(output io.Writer, minwidth int, tabwidth int, padding int, padchar byte, flags uint) *Writer"},
+		{"StripEscape", Const, 0, ""},
+		{"TabIndent", Const, 0, ""},
+		{"Writer", Type, 0, ""},
+	},
+	"text/template": {
+		{"(*Template).AddParseTree", Method, 0, ""},
+		{"(*Template).Clone", Method, 0, ""},
+		{"(*Template).DefinedTemplates", Method, 5, ""},
+		{"(*Template).Delims", Method, 0, ""},
+		{"(*Template).Execute", Method, 0, ""},
+		{"(*Template).ExecuteTemplate", Method, 0, ""},
+		{"(*Template).Funcs", Method, 0, ""},
+		{"(*Template).Lookup", Method, 0, ""},
+		{"(*Template).Name", Method, 0, ""},
+		{"(*Template).New", Method, 0, ""},
+		{"(*Template).Option", Method, 5, ""},
+		{"(*Template).Parse", Method, 0, ""},
+		{"(*Template).ParseFS", Method, 16, ""},
+		{"(*Template).ParseFiles", Method, 0, ""},
+		{"(*Template).ParseGlob", Method, 0, ""},
+		{"(*Template).Templates", Method, 0, ""},
+		{"(ExecError).Error", Method, 6, ""},
+		{"(ExecError).Unwrap", Method, 13, ""},
+		{"(Template).Copy", Method, 2, ""},
+		{"(Template).ErrorContext", Method, 1, ""},
+		{"ExecError", Type, 6, ""},
+		{"ExecError.Err", Field, 6, ""},
+		{"ExecError.Name", Field, 6, ""},
+		{"FuncMap", Type, 0, ""},
+		{"HTMLEscape", Func, 0, "func(w io.Writer, b []byte)"},
+		{"HTMLEscapeString", Func, 0, "func(s string) string"},
+		{"HTMLEscaper", Func, 0, "func(args ...any) string"},
+		{"IsTrue", Func, 6, "func(val any) (truth bool, ok bool)"},
+		{"JSEscape", Func, 0, "func(w io.Writer, b []byte)"},
+		{"JSEscapeString", Func, 0, "func(s string) string"},
+		{"JSEscaper", Func, 0, "func(args ...any) string"},
+		{"Must", Func, 0, "func(t *Template, err error) *Template"},
+		{"New", Func, 0, "func(name string) *Template"},
+		{"ParseFS", Func, 16, "func(fsys fs.FS, patterns ...string) (*Template, error)"},
+		{"ParseFiles", Func, 0, "func(filenames ...string) (*Template, error)"},
+		{"ParseGlob", Func, 0, "func(pattern string) (*Template, error)"},
+		{"Template", Type, 0, ""},
+		{"Template.Tree", Field, 0, ""},
+		{"URLQueryEscaper", Func, 0, "func(args ...any) string"},
+	},
+	"text/template/parse": {
+		{"(*ActionNode).Copy", Method, 0, ""},
+		{"(*ActionNode).String", Method, 0, ""},
+		{"(*BoolNode).Copy", Method, 0, ""},
+		{"(*BoolNode).String", Method, 0, ""},
+		{"(*BranchNode).Copy", Method, 4, ""},
+		{"(*BranchNode).String", Method, 0, ""},
+		{"(*BreakNode).Copy", Method, 18, ""},
+		{"(*BreakNode).String", Method, 18, ""},
+		{"(*ChainNode).Add", Method, 1, ""},
+		{"(*ChainNode).Copy", Method, 1, ""},
+		{"(*ChainNode).String", Method, 1, ""},
+		{"(*CommandNode).Copy", Method, 0, ""},
+		{"(*CommandNode).String", Method, 0, ""},
+		{"(*CommentNode).Copy", Method, 16, ""},
+		{"(*CommentNode).String", Method, 16, ""},
+		{"(*ContinueNode).Copy", Method, 18, ""},
+		{"(*ContinueNode).String", Method, 18, ""},
+		{"(*DotNode).Copy", Method, 0, ""},
+		{"(*DotNode).String", Method, 0, ""},
+		{"(*DotNode).Type", Method, 0, ""},
+		{"(*FieldNode).Copy", Method, 0, ""},
+		{"(*FieldNode).String", Method, 0, ""},
+		{"(*IdentifierNode).Copy", Method, 0, ""},
+		{"(*IdentifierNode).SetPos", Method, 1, ""},
+		{"(*IdentifierNode).SetTree", Method, 4, ""},
+		{"(*IdentifierNode).String", Method, 0, ""},
+		{"(*IfNode).Copy", Method, 0, ""},
+		{"(*IfNode).String", Method, 0, ""},
+		{"(*ListNode).Copy", Method, 0, ""},
+		{"(*ListNode).CopyList", Method, 0, ""},
+		{"(*ListNode).String", Method, 0, ""},
+		{"(*NilNode).Copy", Method, 1, ""},
+		{"(*NilNode).String", Method, 1, ""},
+		{"(*NilNode).Type", Method, 1, ""},
+		{"(*NumberNode).Copy", Method, 0, ""},
+		{"(*NumberNode).String", Method, 0, ""},
+		{"(*PipeNode).Copy", Method, 0, ""},
+		{"(*PipeNode).CopyPipe", Method, 0, ""},
+		{"(*PipeNode).String", Method, 0, ""},
+		{"(*RangeNode).Copy", Method, 0, ""},
+		{"(*RangeNode).String", Method, 0, ""},
+		{"(*StringNode).Copy", Method, 0, ""},
+		{"(*StringNode).String", Method, 0, ""},
+		{"(*TemplateNode).Copy", Method, 0, ""},
+		{"(*TemplateNode).String", Method, 0, ""},
+		{"(*TextNode).Copy", Method, 0, ""},
+		{"(*TextNode).String", Method, 0, ""},
+		{"(*Tree).Copy", Method, 2, ""},
+		{"(*Tree).ErrorContext", Method, 1, ""},
+		{"(*Tree).Parse", Method, 0, ""},
+		{"(*VariableNode).Copy", Method, 0, ""},
+		{"(*VariableNode).String", Method, 0, ""},
+		{"(*WithNode).Copy", Method, 0, ""},
+		{"(*WithNode).String", Method, 0, ""},
+		{"(ActionNode).Position", Method, 1, ""},
+		{"(ActionNode).Type", Method, 0, ""},
+		{"(BoolNode).Position", Method, 1, ""},
+		{"(BoolNode).Type", Method, 0, ""},
+		{"(BranchNode).Position", Method, 1, ""},
+		{"(BranchNode).Type", Method, 0, ""},
+		{"(BreakNode).Position", Method, 18, ""},
+		{"(BreakNode).Type", Method, 18, ""},
+		{"(ChainNode).Position", Method, 1, ""},
+		{"(ChainNode).Type", Method, 1, ""},
+		{"(CommandNode).Position", Method, 1, ""},
+		{"(CommandNode).Type", Method, 0, ""},
+		{"(CommentNode).Position", Method, 16, ""},
+		{"(CommentNode).Type", Method, 16, ""},
+		{"(ContinueNode).Position", Method, 18, ""},
+		{"(ContinueNode).Type", Method, 18, ""},
+		{"(DotNode).Position", Method, 1, ""},
+		{"(FieldNode).Position", Method, 1, ""},
+		{"(FieldNode).Type", Method, 0, ""},
+		{"(IdentifierNode).Position", Method, 1, ""},
+		{"(IdentifierNode).Type", Method, 0, ""},
+		{"(IfNode).Position", Method, 1, ""},
+		{"(IfNode).Type", Method, 0, ""},
+		{"(ListNode).Position", Method, 1, ""},
+		{"(ListNode).Type", Method, 0, ""},
+		{"(NilNode).Position", Method, 1, ""},
+		{"(NodeType).Type", Method, 0, ""},
+		{"(NumberNode).Position", Method, 1, ""},
+		{"(NumberNode).Type", Method, 0, ""},
+		{"(PipeNode).Position", Method, 1, ""},
+		{"(PipeNode).Type", Method, 0, ""},
+		{"(Pos).Position", Method, 1, ""},
+		{"(RangeNode).Position", Method, 1, ""},
+		{"(RangeNode).Type", Method, 0, ""},
+		{"(StringNode).Position", Method, 1, ""},
+		{"(StringNode).Type", Method, 0, ""},
+		{"(TemplateNode).Position", Method, 1, ""},
+		{"(TemplateNode).Type", Method, 0, ""},
+		{"(TextNode).Position", Method, 1, ""},
+		{"(TextNode).Type", Method, 0, ""},
+		{"(VariableNode).Position", Method, 1, ""},
+		{"(VariableNode).Type", Method, 0, ""},
+		{"(WithNode).Position", Method, 1, ""},
+		{"(WithNode).Type", Method, 0, ""},
+		{"ActionNode", Type, 0, ""},
+		{"ActionNode.Line", Field, 0, ""},
+		{"ActionNode.NodeType", Field, 0, ""},
+		{"ActionNode.Pipe", Field, 0, ""},
+		{"ActionNode.Pos", Field, 1, ""},
+		{"BoolNode", Type, 0, ""},
+		{"BoolNode.NodeType", Field, 0, ""},
+		{"BoolNode.Pos", Field, 1, ""},
+		{"BoolNode.True", Field, 0, ""},
+		{"BranchNode", Type, 0, ""},
+		{"BranchNode.ElseList", Field, 0, ""},
+		{"BranchNode.Line", Field, 0, ""},
+		{"BranchNode.List", Field, 0, ""},
+		{"BranchNode.NodeType", Field, 0, ""},
+		{"BranchNode.Pipe", Field, 0, ""},
+		{"BranchNode.Pos", Field, 1, ""},
+		{"BreakNode", Type, 18, ""},
+		{"BreakNode.Line", Field, 18, ""},
+		{"BreakNode.NodeType", Field, 18, ""},
+		{"BreakNode.Pos", Field, 18, ""},
+		{"ChainNode", Type, 1, ""},
+		{"ChainNode.Field", Field, 1, ""},
+		{"ChainNode.Node", Field, 1, ""},
+		{"ChainNode.NodeType", Field, 1, ""},
+		{"ChainNode.Pos", Field, 1, ""},
+		{"CommandNode", Type, 0, ""},
+		{"CommandNode.Args", Field, 0, ""},
+		{"CommandNode.NodeType", Field, 0, ""},
+		{"CommandNode.Pos", Field, 1, ""},
+		{"CommentNode", Type, 16, ""},
+		{"CommentNode.NodeType", Field, 16, ""},
+		{"CommentNode.Pos", Field, 16, ""},
+		{"CommentNode.Text", Field, 16, ""},
+		{"ContinueNode", Type, 18, ""},
+		{"ContinueNode.Line", Field, 18, ""},
+		{"ContinueNode.NodeType", Field, 18, ""},
+		{"ContinueNode.Pos", Field, 18, ""},
+		{"DotNode", Type, 0, ""},
+		{"DotNode.NodeType", Field, 4, ""},
+		{"DotNode.Pos", Field, 1, ""},
+		{"FieldNode", Type, 0, ""},
+		{"FieldNode.Ident", Field, 0, ""},
+		{"FieldNode.NodeType", Field, 0, ""},
+		{"FieldNode.Pos", Field, 1, ""},
+		{"IdentifierNode", Type, 0, ""},
+		{"IdentifierNode.Ident", Field, 0, ""},
+		{"IdentifierNode.NodeType", Field, 0, ""},
+		{"IdentifierNode.Pos", Field, 1, ""},
+		{"IfNode", Type, 0, ""},
+		{"IfNode.BranchNode", Field, 0, ""},
+		{"IsEmptyTree", Func, 0, "func(n Node) bool"},
+		{"ListNode", Type, 0, ""},
+		{"ListNode.NodeType", Field, 0, ""},
+		{"ListNode.Nodes", Field, 0, ""},
+		{"ListNode.Pos", Field, 1, ""},
+		{"Mode", Type, 16, ""},
+		{"New", Func, 0, "func(name string, funcs ...map[string]any) *Tree"},
+		{"NewIdentifier", Func, 0, "func(ident string) *IdentifierNode"},
+		{"NilNode", Type, 1, ""},
+		{"NilNode.NodeType", Field, 4, ""},
+		{"NilNode.Pos", Field, 1, ""},
+		{"Node", Type, 0, ""},
+		{"NodeAction", Const, 0, ""},
+		{"NodeBool", Const, 0, ""},
+		{"NodeBreak", Const, 18, ""},
+		{"NodeChain", Const, 1, ""},
+		{"NodeCommand", Const, 0, ""},
+		{"NodeComment", Const, 16, ""},
+		{"NodeContinue", Const, 18, ""},
+		{"NodeDot", Const, 0, ""},
+		{"NodeField", Const, 0, ""},
+		{"NodeIdentifier", Const, 0, ""},
+		{"NodeIf", Const, 0, ""},
+		{"NodeList", Const, 0, ""},
+		{"NodeNil", Const, 1, ""},
+		{"NodeNumber", Const, 0, ""},
+		{"NodePipe", Const, 0, ""},
+		{"NodeRange", Const, 0, ""},
+		{"NodeString", Const, 0, ""},
+		{"NodeTemplate", Const, 0, ""},
+		{"NodeText", Const, 0, ""},
+		{"NodeType", Type, 0, ""},
+		{"NodeVariable", Const, 0, ""},
+		{"NodeWith", Const, 0, ""},
+		{"NumberNode", Type, 0, ""},
+		{"NumberNode.Complex128", Field, 0, ""},
+		{"NumberNode.Float64", Field, 0, ""},
+		{"NumberNode.Int64", Field, 0, ""},
+		{"NumberNode.IsComplex", Field, 0, ""},
+		{"NumberNode.IsFloat", Field, 0, ""},
+		{"NumberNode.IsInt", Field, 0, ""},
+		{"NumberNode.IsUint", Field, 0, ""},
+		{"NumberNode.NodeType", Field, 0, ""},
+		{"NumberNode.Pos", Field, 1, ""},
+		{"NumberNode.Text", Field, 0, ""},
+		{"NumberNode.Uint64", Field, 0, ""},
+		{"Parse", Func, 0, "func(name string, text string, leftDelim string, rightDelim string, funcs ...map[string]any) (map[string]*Tree, error)"},
+		{"ParseComments", Const, 16, ""},
+		{"PipeNode", Type, 0, ""},
+		{"PipeNode.Cmds", Field, 0, ""},
+		{"PipeNode.Decl", Field, 0, ""},
+		{"PipeNode.IsAssign", Field, 11, ""},
+		{"PipeNode.Line", Field, 0, ""},
+		{"PipeNode.NodeType", Field, 0, ""},
+		{"PipeNode.Pos", Field, 1, ""},
+		{"Pos", Type, 1, ""},
+		{"RangeNode", Type, 0, ""},
+		{"RangeNode.BranchNode", Field, 0, ""},
+		{"SkipFuncCheck", Const, 17, ""},
+		{"StringNode", Type, 0, ""},
+		{"StringNode.NodeType", Field, 0, ""},
+		{"StringNode.Pos", Field, 1, ""},
+		{"StringNode.Quoted", Field, 0, ""},
+		{"StringNode.Text", Field, 0, ""},
+		{"TemplateNode", Type, 0, ""},
+		{"TemplateNode.Line", Field, 0, ""},
+		{"TemplateNode.Name", Field, 0, ""},
+		{"TemplateNode.NodeType", Field, 0, ""},
+		{"TemplateNode.Pipe", Field, 0, ""},
+		{"TemplateNode.Pos", Field, 1, ""},
+		{"TextNode", Type, 0, ""},
+		{"TextNode.NodeType", Field, 0, ""},
+		{"TextNode.Pos", Field, 1, ""},
+		{"TextNode.Text", Field, 0, ""},
+		{"Tree", Type, 0, ""},
+		{"Tree.Mode", Field, 16, ""},
+		{"Tree.Name", Field, 0, ""},
+		{"Tree.ParseName", Field, 1, ""},
+		{"Tree.Root", Field, 0, ""},
+		{"VariableNode", Type, 0, ""},
+		{"VariableNode.Ident", Field, 0, ""},
+		{"VariableNode.NodeType", Field, 0, ""},
+		{"VariableNode.Pos", Field, 1, ""},
+		{"WithNode", Type, 0, ""},
+		{"WithNode.BranchNode", Field, 0, ""},
+	},
+	"time": {
+		{"(*Location).String", Method, 0, ""},
+		{"(*ParseError).Error", Method, 0, ""},
+		{"(*Ticker).Reset", Method, 15, ""},
+		{"(*Ticker).Stop", Method, 0, ""},
+		{"(*Time).GobDecode", Method, 0, ""},
+		{"(*Time).UnmarshalBinary", Method, 2, ""},
+		{"(*Time).UnmarshalJSON", Method, 0, ""},
+		{"(*Time).UnmarshalText", Method, 2, ""},
+		{"(*Timer).Reset", Method, 1, ""},
+		{"(*Timer).Stop", Method, 0, ""},
+		{"(Duration).Abs", Method, 19, ""},
+		{"(Duration).Hours", Method, 0, ""},
+		{"(Duration).Microseconds", Method, 13, ""},
+		{"(Duration).Milliseconds", Method, 13, ""},
+		{"(Duration).Minutes", Method, 0, ""},
+		{"(Duration).Nanoseconds", Method, 0, ""},
+		{"(Duration).Round", Method, 9, ""},
+		{"(Duration).Seconds", Method, 0, ""},
+		{"(Duration).String", Method, 0, ""},
+		{"(Duration).Truncate", Method, 9, ""},
+		{"(Month).String", Method, 0, ""},
+		{"(Time).Add", Method, 0, ""},
+		{"(Time).AddDate", Method, 0, ""},
+		{"(Time).After", Method, 0, ""},
+		{"(Time).AppendBinary", Method, 24, ""},
+		{"(Time).AppendFormat", Method, 5, ""},
+		{"(Time).AppendText", Method, 24, ""},
+		{"(Time).Before", Method, 0, ""},
+		{"(Time).Clock", Method, 0, ""},
+		{"(Time).Compare", Method, 20, ""},
+		{"(Time).Date", Method, 0, ""},
+		{"(Time).Day", Method, 0, ""},
+		{"(Time).Equal", Method, 0, ""},
+		{"(Time).Format", Method, 0, ""},
+		{"(Time).GoString", Method, 17, ""},
+		{"(Time).GobEncode", Method, 0, ""},
+		{"(Time).Hour", Method, 0, ""},
+		{"(Time).ISOWeek", Method, 0, ""},
+		{"(Time).In", Method, 0, ""},
+		{"(Time).IsDST", Method, 17, ""},
+		{"(Time).IsZero", Method, 0, ""},
+		{"(Time).Local", Method, 0, ""},
+		{"(Time).Location", Method, 0, ""},
+		{"(Time).MarshalBinary", Method, 2, ""},
+		{"(Time).MarshalJSON", Method, 0, ""},
+		{"(Time).MarshalText", Method, 2, ""},
+		{"(Time).Minute", Method, 0, ""},
+		{"(Time).Month", Method, 0, ""},
+		{"(Time).Nanosecond", Method, 0, ""},
+		{"(Time).Round", Method, 1, ""},
+		{"(Time).Second", Method, 0, ""},
+		{"(Time).String", Method, 0, ""},
+		{"(Time).Sub", Method, 0, ""},
+		{"(Time).Truncate", Method, 1, ""},
+		{"(Time).UTC", Method, 0, ""},
+		{"(Time).Unix", Method, 0, ""},
+		{"(Time).UnixMicro", Method, 17, ""},
+		{"(Time).UnixMilli", Method, 17, ""},
+		{"(Time).UnixNano", Method, 0, ""},
+		{"(Time).Weekday", Method, 0, ""},
+		{"(Time).Year", Method, 0, ""},
+		{"(Time).YearDay", Method, 1, ""},
+		{"(Time).Zone", Method, 0, ""},
+		{"(Time).ZoneBounds", Method, 19, ""},
+		{"(Weekday).String", Method, 0, ""},
+		{"ANSIC", Const, 0, ""},
+		{"After", Func, 0, "func(d Duration) <-chan Time"},
+		{"AfterFunc", Func, 0, "func(d Duration, f func()) *Timer"},
+		{"April", Const, 0, ""},
+		{"August", Const, 0, ""},
+		{"Date", Func, 0, "func(year int, month Month, day int, hour int, min int, sec int, nsec int, loc *Location) Time"},
+		{"DateOnly", Const, 20, ""},
+		{"DateTime", Const, 20, ""},
+		{"December", Const, 0, ""},
+		{"Duration", Type, 0, ""},
+		{"February", Const, 0, ""},
+		{"FixedZone", Func, 0, "func(name string, offset int) *Location"},
+		{"Friday", Const, 0, ""},
+		{"Hour", Const, 0, ""},
+		{"January", Const, 0, ""},
+		{"July", Const, 0, ""},
+		{"June", Const, 0, ""},
+		{"Kitchen", Const, 0, ""},
+		{"Layout", Const, 17, ""},
+		{"LoadLocation", Func, 0, "func(name string) (*Location, error)"},
+		{"LoadLocationFromTZData", Func, 10, "func(name string, data []byte) (*Location, error)"},
+		{"Local", Var, 0, ""},
+		{"Location", Type, 0, ""},
+		{"March", Const, 0, ""},
+		{"May", Const, 0, ""},
+		{"Microsecond", Const, 0, ""},
+		{"Millisecond", Const, 0, ""},
+		{"Minute", Const, 0, ""},
+		{"Monday", Const, 0, ""},
+		{"Month", Type, 0, ""},
+		{"Nanosecond", Const, 0, ""},
+		{"NewTicker", Func, 0, "func(d Duration) *Ticker"},
+		{"NewTimer", Func, 0, "func(d Duration) *Timer"},
+		{"November", Const, 0, ""},
+		{"Now", Func, 0, "func() Time"},
+		{"October", Const, 0, ""},
+		{"Parse", Func, 0, "func(layout string, value string) (Time, error)"},
+		{"ParseDuration", Func, 0, "func(s string) (Duration, error)"},
+		{"ParseError", Type, 0, ""},
+		{"ParseError.Layout", Field, 0, ""},
+		{"ParseError.LayoutElem", Field, 0, ""},
+		{"ParseError.Message", Field, 0, ""},
+		{"ParseError.Value", Field, 0, ""},
+		{"ParseError.ValueElem", Field, 0, ""},
+		{"ParseInLocation", Func, 1, "func(layout string, value string, loc *Location) (Time, error)"},
+		{"RFC1123", Const, 0, ""},
+		{"RFC1123Z", Const, 0, ""},
+		{"RFC3339", Const, 0, ""},
+		{"RFC3339Nano", Const, 0, ""},
+		{"RFC822", Const, 0, ""},
+		{"RFC822Z", Const, 0, ""},
+		{"RFC850", Const, 0, ""},
+		{"RubyDate", Const, 0, ""},
+		{"Saturday", Const, 0, ""},
+		{"Second", Const, 0, ""},
+		{"September", Const, 0, ""},
+		{"Since", Func, 0, "func(t Time) Duration"},
+		{"Sleep", Func, 0, "func(d Duration)"},
+		{"Stamp", Const, 0, ""},
+		{"StampMicro", Const, 0, ""},
+		{"StampMilli", Const, 0, ""},
+		{"StampNano", Const, 0, ""},
+		{"Sunday", Const, 0, ""},
+		{"Thursday", Const, 0, ""},
+		{"Tick", Func, 0, "func(d Duration) <-chan Time"},
+		{"Ticker", Type, 0, ""},
+		{"Ticker.C", Field, 0, ""},
+		{"Time", Type, 0, ""},
+		{"TimeOnly", Const, 20, ""},
+		{"Timer", Type, 0, ""},
+		{"Timer.C", Field, 0, ""},
+		{"Tuesday", Const, 0, ""},
+		{"UTC", Var, 0, ""},
+		{"Unix", Func, 0, "func(sec int64, nsec int64) Time"},
+		{"UnixDate", Const, 0, ""},
+		{"UnixMicro", Func, 17, "func(usec int64) Time"},
+		{"UnixMilli", Func, 17, "func(msec int64) Time"},
+		{"Until", Func, 8, "func(t Time) Duration"},
+		{"Wednesday", Const, 0, ""},
+		{"Weekday", Type, 0, ""},
+	},
+	"unicode": {
+		{"(SpecialCase).ToLower", Method, 0, ""},
+		{"(SpecialCase).ToTitle", Method, 0, ""},
+		{"(SpecialCase).ToUpper", Method, 0, ""},
+		{"ASCII_Hex_Digit", Var, 0, ""},
+		{"Adlam", Var, 7, ""},
+		{"Ahom", Var, 5, ""},
+		{"Anatolian_Hieroglyphs", Var, 5, ""},
+		{"Arabic", Var, 0, ""},
+		{"Armenian", Var, 0, ""},
+		{"Avestan", Var, 0, ""},
+		{"AzeriCase", Var, 0, ""},
+		{"Balinese", Var, 0, ""},
+		{"Bamum", Var, 0, ""},
+		{"Bassa_Vah", Var, 4, ""},
+		{"Batak", Var, 0, ""},
+		{"Bengali", Var, 0, ""},
+		{"Bhaiksuki", Var, 7, ""},
+		{"Bidi_Control", Var, 0, ""},
+		{"Bopomofo", Var, 0, ""},
+		{"Brahmi", Var, 0, ""},
+		{"Braille", Var, 0, ""},
+		{"Buginese", Var, 0, ""},
+		{"Buhid", Var, 0, ""},
+		{"C", Var, 0, ""},
+		{"Canadian_Aboriginal", Var, 0, ""},
+		{"Carian", Var, 0, ""},
+		{"CaseRange", Type, 0, ""},
+		{"CaseRange.Delta", Field, 0, ""},
+		{"CaseRange.Hi", Field, 0, ""},
+		{"CaseRange.Lo", Field, 0, ""},
+		{"CaseRanges", Var, 0, ""},
+		{"Categories", Var, 0, ""},
+		{"CategoryAliases", Var, 25, ""},
+		{"Caucasian_Albanian", Var, 4, ""},
+		{"Cc", Var, 0, ""},
+		{"Cf", Var, 0, ""},
+		{"Chakma", Var, 1, ""},
+		{"Cham", Var, 0, ""},
+		{"Cherokee", Var, 0, ""},
+		{"Chorasmian", Var, 16, ""},
+		{"Cn", Var, 25, ""},
+		{"Co", Var, 0, ""},
+		{"Common", Var, 0, ""},
+		{"Coptic", Var, 0, ""},
+		{"Cs", Var, 0, ""},
+		{"Cuneiform", Var, 0, ""},
+		{"Cypriot", Var, 0, ""},
+		{"Cypro_Minoan", Var, 21, ""},
+		{"Cyrillic", Var, 0, ""},
+		{"Dash", Var, 0, ""},
+		{"Deprecated", Var, 0, ""},
+		{"Deseret", Var, 0, ""},
+		{"Devanagari", Var, 0, ""},
+		{"Diacritic", Var, 0, ""},
+		{"Digit", Var, 0, ""},
+		{"Dives_Akuru", Var, 16, ""},
+		{"Dogra", Var, 13, ""},
+		{"Duployan", Var, 4, ""},
+		{"Egyptian_Hieroglyphs", Var, 0, ""},
+		{"Elbasan", Var, 4, ""},
+		{"Elymaic", Var, 14, ""},
+		{"Ethiopic", Var, 0, ""},
+		{"Extender", Var, 0, ""},
+		{"FoldCategory", Var, 0, ""},
+		{"FoldScript", Var, 0, ""},
+		{"Georgian", Var, 0, ""},
+		{"Glagolitic", Var, 0, ""},
+		{"Gothic", Var, 0, ""},
+		{"Grantha", Var, 4, ""},
+		{"GraphicRanges", Var, 0, ""},
+		{"Greek", Var, 0, ""},
+		{"Gujarati", Var, 0, ""},
+		{"Gunjala_Gondi", Var, 13, ""},
+		{"Gurmukhi", Var, 0, ""},
+		{"Han", Var, 0, ""},
+		{"Hangul", Var, 0, ""},
+		{"Hanifi_Rohingya", Var, 13, ""},
+		{"Hanunoo", Var, 0, ""},
+		{"Hatran", Var, 5, ""},
+		{"Hebrew", Var, 0, ""},
+		{"Hex_Digit", Var, 0, ""},
+		{"Hiragana", Var, 0, ""},
+		{"Hyphen", Var, 0, ""},
+		{"IDS_Binary_Operator", Var, 0, ""},
+		{"IDS_Trinary_Operator", Var, 0, ""},
+		{"Ideographic", Var, 0, ""},
+		{"Imperial_Aramaic", Var, 0, ""},
+		{"In", Func, 2, "func(r rune, ranges ...*RangeTable) bool"},
+		{"Inherited", Var, 0, ""},
+		{"Inscriptional_Pahlavi", Var, 0, ""},
+		{"Inscriptional_Parthian", Var, 0, ""},
+		{"Is", Func, 0, "func(rangeTab *RangeTable, r rune) bool"},
+		{"IsControl", Func, 0, "func(r rune) bool"},
+		{"IsDigit", Func, 0, "func(r rune) bool"},
+		{"IsGraphic", Func, 0, "func(r rune) bool"},
+		{"IsLetter", Func, 0, "func(r rune) bool"},
+		{"IsLower", Func, 0, "func(r rune) bool"},
+		{"IsMark", Func, 0, "func(r rune) bool"},
+		{"IsNumber", Func, 0, "func(r rune) bool"},
+		{"IsOneOf", Func, 0, "func(ranges []*RangeTable, r rune) bool"},
+		{"IsPrint", Func, 0, "func(r rune) bool"},
+		{"IsPunct", Func, 0, "func(r rune) bool"},
+		{"IsSpace", Func, 0, "func(r rune) bool"},
+		{"IsSymbol", Func, 0, "func(r rune) bool"},
+		{"IsTitle", Func, 0, "func(r rune) bool"},
+		{"IsUpper", Func, 0, "func(r rune) bool"},
+		{"Javanese", Var, 0, ""},
+		{"Join_Control", Var, 0, ""},
+		{"Kaithi", Var, 0, ""},
+		{"Kannada", Var, 0, ""},
+		{"Katakana", Var, 0, ""},
+		{"Kawi", Var, 21, ""},
+		{"Kayah_Li", Var, 0, ""},
+		{"Kharoshthi", Var, 0, ""},
+		{"Khitan_Small_Script", Var, 16, ""},
+		{"Khmer", Var, 0, ""},
+		{"Khojki", Var, 4, ""},
+		{"Khudawadi", Var, 4, ""},
+		{"L", Var, 0, ""},
+		{"LC", Var, 25, ""},
+		{"Lao", Var, 0, ""},
+		{"Latin", Var, 0, ""},
+		{"Lepcha", Var, 0, ""},
+		{"Letter", Var, 0, ""},
+		{"Limbu", Var, 0, ""},
+		{"Linear_A", Var, 4, ""},
+		{"Linear_B", Var, 0, ""},
+		{"Lisu", Var, 0, ""},
+		{"Ll", Var, 0, ""},
+		{"Lm", Var, 0, ""},
+		{"Lo", Var, 0, ""},
+		{"Logical_Order_Exception", Var, 0, ""},
+		{"Lower", Var, 0, ""},
+		{"LowerCase", Const, 0, ""},
+		{"Lt", Var, 0, ""},
+		{"Lu", Var, 0, ""},
+		{"Lycian", Var, 0, ""},
+		{"Lydian", Var, 0, ""},
+		{"M", Var, 0, ""},
+		{"Mahajani", Var, 4, ""},
+		{"Makasar", Var, 13, ""},
+		{"Malayalam", Var, 0, ""},
+		{"Mandaic", Var, 0, ""},
+		{"Manichaean", Var, 4, ""},
+		{"Marchen", Var, 7, ""},
+		{"Mark", Var, 0, ""},
+		{"Masaram_Gondi", Var, 10, ""},
+		{"MaxASCII", Const, 0, ""},
+		{"MaxCase", Const, 0, ""},
+		{"MaxLatin1", Const, 0, ""},
+		{"MaxRune", Const, 0, ""},
+		{"Mc", Var, 0, ""},
+		{"Me", Var, 0, ""},
+		{"Medefaidrin", Var, 13, ""},
+		{"Meetei_Mayek", Var, 0, ""},
+		{"Mende_Kikakui", Var, 4, ""},
+		{"Meroitic_Cursive", Var, 1, ""},
+		{"Meroitic_Hieroglyphs", Var, 1, ""},
+		{"Miao", Var, 1, ""},
+		{"Mn", Var, 0, ""},
+		{"Modi", Var, 4, ""},
+		{"Mongolian", Var, 0, ""},
+		{"Mro", Var, 4, ""},
+		{"Multani", Var, 5, ""},
+		{"Myanmar", Var, 0, ""},
+		{"N", Var, 0, ""},
+		{"Nabataean", Var, 4, ""},
+		{"Nag_Mundari", Var, 21, ""},
+		{"Nandinagari", Var, 14, ""},
+		{"Nd", Var, 0, ""},
+		{"New_Tai_Lue", Var, 0, ""},
+		{"Newa", Var, 7, ""},
+		{"Nko", Var, 0, ""},
+		{"Nl", Var, 0, ""},
+		{"No", Var, 0, ""},
+		{"Noncharacter_Code_Point", Var, 0, ""},
+		{"Number", Var, 0, ""},
+		{"Nushu", Var, 10, ""},
+		{"Nyiakeng_Puachue_Hmong", Var, 14, ""},
+		{"Ogham", Var, 0, ""},
+		{"Ol_Chiki", Var, 0, ""},
+		{"Old_Hungarian", Var, 5, ""},
+		{"Old_Italic", Var, 0, ""},
+		{"Old_North_Arabian", Var, 4, ""},
+		{"Old_Permic", Var, 4, ""},
+		{"Old_Persian", Var, 0, ""},
+		{"Old_Sogdian", Var, 13, ""},
+		{"Old_South_Arabian", Var, 0, ""},
+		{"Old_Turkic", Var, 0, ""},
+		{"Old_Uyghur", Var, 21, ""},
+		{"Oriya", Var, 0, ""},
+		{"Osage", Var, 7, ""},
+		{"Osmanya", Var, 0, ""},
+		{"Other", Var, 0, ""},
+		{"Other_Alphabetic", Var, 0, ""},
+		{"Other_Default_Ignorable_Code_Point", Var, 0, ""},
+		{"Other_Grapheme_Extend", Var, 0, ""},
+		{"Other_ID_Continue", Var, 0, ""},
+		{"Other_ID_Start", Var, 0, ""},
+		{"Other_Lowercase", Var, 0, ""},
+		{"Other_Math", Var, 0, ""},
+		{"Other_Uppercase", Var, 0, ""},
+		{"P", Var, 0, ""},
+		{"Pahawh_Hmong", Var, 4, ""},
+		{"Palmyrene", Var, 4, ""},
+		{"Pattern_Syntax", Var, 0, ""},
+		{"Pattern_White_Space", Var, 0, ""},
+		{"Pau_Cin_Hau", Var, 4, ""},
+		{"Pc", Var, 0, ""},
+		{"Pd", Var, 0, ""},
+		{"Pe", Var, 0, ""},
+		{"Pf", Var, 0, ""},
+		{"Phags_Pa", Var, 0, ""},
+		{"Phoenician", Var, 0, ""},
+		{"Pi", Var, 0, ""},
+		{"Po", Var, 0, ""},
+		{"Prepended_Concatenation_Mark", Var, 7, ""},
+		{"PrintRanges", Var, 0, ""},
+		{"Properties", Var, 0, ""},
+		{"Ps", Var, 0, ""},
+		{"Psalter_Pahlavi", Var, 4, ""},
+		{"Punct", Var, 0, ""},
+		{"Quotation_Mark", Var, 0, ""},
+		{"Radical", Var, 0, ""},
+		{"Range16", Type, 0, ""},
+		{"Range16.Hi", Field, 0, ""},
+		{"Range16.Lo", Field, 0, ""},
+		{"Range16.Stride", Field, 0, ""},
+		{"Range32", Type, 0, ""},
+		{"Range32.Hi", Field, 0, ""},
+		{"Range32.Lo", Field, 0, ""},
+		{"Range32.Stride", Field, 0, ""},
+		{"RangeTable", Type, 0, ""},
+		{"RangeTable.LatinOffset", Field, 1, ""},
+		{"RangeTable.R16", Field, 0, ""},
+		{"RangeTable.R32", Field, 0, ""},
+		{"Regional_Indicator", Var, 10, ""},
+		{"Rejang", Var, 0, ""},
+		{"ReplacementChar", Const, 0, ""},
+		{"Runic", Var, 0, ""},
+		{"S", Var, 0, ""},
+		{"STerm", Var, 0, ""},
+		{"Samaritan", Var, 0, ""},
+		{"Saurashtra", Var, 0, ""},
+		{"Sc", Var, 0, ""},
+		{"Scripts", Var, 0, ""},
+		{"Sentence_Terminal", Var, 7, ""},
+		{"Sharada", Var, 1, ""},
+		{"Shavian", Var, 0, ""},
+		{"Siddham", Var, 4, ""},
+		{"SignWriting", Var, 5, ""},
+		{"SimpleFold", Func, 0, "func(r rune) rune"},
+		{"Sinhala", Var, 0, ""},
+		{"Sk", Var, 0, ""},
+		{"Sm", Var, 0, ""},
+		{"So", Var, 0, ""},
+		{"Soft_Dotted", Var, 0, ""},
+		{"Sogdian", Var, 13, ""},
+		{"Sora_Sompeng", Var, 1, ""},
+		{"Soyombo", Var, 10, ""},
+		{"Space", Var, 0, ""},
+		{"SpecialCase", Type, 0, ""},
+		{"Sundanese", Var, 0, ""},
+		{"Syloti_Nagri", Var, 0, ""},
+		{"Symbol", Var, 0, ""},
+		{"Syriac", Var, 0, ""},
+		{"Tagalog", Var, 0, ""},
+		{"Tagbanwa", Var, 0, ""},
+		{"Tai_Le", Var, 0, ""},
+		{"Tai_Tham", Var, 0, ""},
+		{"Tai_Viet", Var, 0, ""},
+		{"Takri", Var, 1, ""},
+		{"Tamil", Var, 0, ""},
+		{"Tangsa", Var, 21, ""},
+		{"Tangut", Var, 7, ""},
+		{"Telugu", Var, 0, ""},
+		{"Terminal_Punctuation", Var, 0, ""},
+		{"Thaana", Var, 0, ""},
+		{"Thai", Var, 0, ""},
+		{"Tibetan", Var, 0, ""},
+		{"Tifinagh", Var, 0, ""},
+		{"Tirhuta", Var, 4, ""},
+		{"Title", Var, 0, ""},
+		{"TitleCase", Const, 0, ""},
+		{"To", Func, 0, "func(_case int, r rune) rune"},
+		{"ToLower", Func, 0, "func(r rune) rune"},
+		{"ToTitle", Func, 0, "func(r rune) rune"},
+		{"ToUpper", Func, 0, "func(r rune) rune"},
+		{"Toto", Var, 21, ""},
+		{"TurkishCase", Var, 0, ""},
+		{"Ugaritic", Var, 0, ""},
+		{"Unified_Ideograph", Var, 0, ""},
+		{"Upper", Var, 0, ""},
+		{"UpperCase", Const, 0, ""},
+		{"UpperLower", Const, 0, ""},
+		{"Vai", Var, 0, ""},
+		{"Variation_Selector", Var, 0, ""},
+		{"Version", Const, 0, ""},
+		{"Vithkuqi", Var, 21, ""},
+		{"Wancho", Var, 14, ""},
+		{"Warang_Citi", Var, 4, ""},
+		{"White_Space", Var, 0, ""},
+		{"Yezidi", Var, 16, ""},
+		{"Yi", Var, 0, ""},
+		{"Z", Var, 0, ""},
+		{"Zanabazar_Square", Var, 10, ""},
+		{"Zl", Var, 0, ""},
+		{"Zp", Var, 0, ""},
+		{"Zs", Var, 0, ""},
+	},
+	"unicode/utf16": {
+		{"AppendRune", Func, 20, "func(a []uint16, r rune) []uint16"},
+		{"Decode", Func, 0, "func(s []uint16) []rune"},
+		{"DecodeRune", Func, 0, "func(r1 rune, r2 rune) rune"},
+		{"Encode", Func, 0, "func(s []rune) []uint16"},
+		{"EncodeRune", Func, 0, "func(r rune) (r1 rune, r2 rune)"},
+		{"IsSurrogate", Func, 0, "func(r rune) bool"},
+		{"RuneLen", Func, 23, "func(r rune) int"},
+	},
+	"unicode/utf8": {
+		{"AppendRune", Func, 18, "func(p []byte, r rune) []byte"},
+		{"DecodeLastRune", Func, 0, "func(p []byte) (r rune, size int)"},
+		{"DecodeLastRuneInString", Func, 0, "func(s string) (r rune, size int)"},
+		{"DecodeRune", Func, 0, "func(p []byte) (r rune, size int)"},
+		{"DecodeRuneInString", Func, 0, "func(s string) (r rune, size int)"},
+		{"EncodeRune", Func, 0, "func(p []byte, r rune) int"},
+		{"FullRune", Func, 0, "func(p []byte) bool"},
+		{"FullRuneInString", Func, 0, "func(s string) bool"},
+		{"MaxRune", Const, 0, ""},
+		{"RuneCount", Func, 0, "func(p []byte) int"},
+		{"RuneCountInString", Func, 0, "func(s string) (n int)"},
+		{"RuneError", Const, 0, ""},
+		{"RuneLen", Func, 0, "func(r rune) int"},
+		{"RuneSelf", Const, 0, ""},
+		{"RuneStart", Func, 0, "func(b byte) bool"},
+		{"UTFMax", Const, 0, ""},
+		{"Valid", Func, 0, "func(p []byte) bool"},
+		{"ValidRune", Func, 1, "func(r rune) bool"},
+		{"ValidString", Func, 0, "func(s string) bool"},
+	},
+	"unique": {
+		{"(Handle).Value", Method, 23, ""},
+		{"Handle", Type, 23, ""},
+		{"Make", Func, 23, "func[T comparable](value T) Handle[T]"},
+	},
+	"unsafe": {
+		{"Add", Func, 0, ""},
+		{"Alignof", Func, 0, ""},
+		{"Offsetof", Func, 0, ""},
+		{"Pointer", Type, 0, ""},
+		{"Sizeof", Func, 0, ""},
+		{"Slice", Func, 0, ""},
+		{"SliceData", Func, 0, ""},
+		{"String", Func, 0, ""},
+		{"StringData", Func, 0, ""},
+	},
+	"weak": {
+		{"(Pointer).Value", Method, 24, ""},
+		{"Make", Func, 24, "func[T any](ptr *T) Pointer[T]"},
+		{"Pointer", Type, 24, ""},
+	},
+}
diff --git a/vendor/golang.org/x/tools/internal/stdlib/stdlib.go b/vendor/golang.org/x/tools/internal/stdlib/stdlib.go
new file mode 100644
index 00000000..e223e0f3
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/stdlib/stdlib.go
@@ -0,0 +1,105 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:generate go run generate.go
+
+// Package stdlib provides a table of all exported symbols in the
+// standard library, along with the version at which they first
+// appeared. It also provides the import graph of std packages.
+package stdlib
+
+import (
+	"fmt"
+	"strings"
+)
+
+type Symbol struct {
+	Name    string
+	Kind    Kind
+	Version Version // Go version that first included the symbol
+	// Signature provides the type of a function (defined only for Kind=Func).
+	// Imported types are denoted as pkg.T; pkg is not fully qualified.
+	// TODO(adonovan): use an unambiguous encoding that is parseable.
+	//
+	// Example2:
+	//    func[M ~map[K]V, K comparable, V any](m M) M
+	//    func(fi fs.FileInfo, link string) (*Header, error)
+	Signature string // if Kind == stdlib.Func
+}
+
+// A Kind indicates the kind of a symbol:
+// function, variable, constant, type, and so on.
+type Kind int8
+
+const (
+	Invalid Kind = iota // Example name:
+	Type                // "Buffer"
+	Func                // "Println"
+	Var                 // "EOF"
+	Const               // "Pi"
+	Field               // "Point.X"
+	Method              // "(*Buffer).Grow"
+)
+
+func (kind Kind) String() string {
+	return [...]string{
+		Invalid: "invalid",
+		Type:    "type",
+		Func:    "func",
+		Var:     "var",
+		Const:   "const",
+		Field:   "field",
+		Method:  "method",
+	}[kind]
+}
+
+// A Version represents a version of Go of the form "go1.%d".
+type Version int8
+
+// String returns a version string of the form "go1.23", without allocating.
+func (v Version) String() string { return versions[v] }
+
+var versions [30]string // (increase constant as needed)
+
+func init() {
+	for i := range versions {
+		versions[i] = fmt.Sprintf("go1.%d", i)
+	}
+}
+
+// HasPackage reports whether the specified package path is part of
+// the standard library's public API.
+func HasPackage(path string) bool {
+	_, ok := PackageSymbols[path]
+	return ok
+}
+
+// SplitField splits the field symbol name into type and field
+// components. It must be called only on Field symbols.
+//
+// Example: "File.Package" -> ("File", "Package")
+func (sym *Symbol) SplitField() (typename, name string) {
+	if sym.Kind != Field {
+		panic("not a field")
+	}
+	typename, name, _ = strings.Cut(sym.Name, ".")
+	return
+}
+
+// SplitMethod splits the method symbol name into pointer, receiver,
+// and method components. It must be called only on Method symbols.
+//
+// Example: "(*Buffer).Grow" -> (true, "Buffer", "Grow")
+func (sym *Symbol) SplitMethod() (ptr bool, recv, name string) {
+	if sym.Kind != Method {
+		panic("not a method")
+	}
+	recv, name, _ = strings.Cut(sym.Name, ".")
+	recv = recv[len("(") : len(recv)-len(")")]
+	ptr = recv[0] == '*'
+	if ptr {
+		recv = recv[len("*"):]
+	}
+	return
+}
diff --git a/vendor/golang.org/x/tools/internal/versions/features.go b/vendor/golang.org/x/tools/internal/versions/features.go
deleted file mode 100644
index b53f1786..00000000
--- a/vendor/golang.org/x/tools/internal/versions/features.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package versions
-
-// This file contains predicates for working with file versions to
-// decide when a tool should consider a language feature enabled.
-
-// GoVersions that features in x/tools can be gated to.
-const (
-	Go1_18 = "go1.18"
-	Go1_19 = "go1.19"
-	Go1_20 = "go1.20"
-	Go1_21 = "go1.21"
-	Go1_22 = "go1.22"
-)
-
-// Future is an invalid unknown Go version sometime in the future.
-// Do not use directly with Compare.
-const Future = ""
-
-// AtLeast reports whether the file version v comes after a Go release.
-//
-// Use this predicate to enable a behavior once a certain Go release
-// has happened (and stays enabled in the future).
-func AtLeast(v, release string) bool {
-	if v == Future {
-		return true // an unknown future version is always after y.
-	}
-	return Compare(Lang(v), Lang(release)) >= 0
-}
-
-// Before reports whether the file version v is strictly before a Go release.
-//
-// Use this predicate to disable a behavior once a certain Go release
-// has happened (and stays enabled in the future).
-func Before(v, release string) bool {
-	if v == Future {
-		return false // an unknown future version happens after y.
-	}
-	return Compare(Lang(v), Lang(release)) < 0
-}
diff --git a/vendor/golang.org/x/tools/internal/versions/gover.go b/vendor/golang.org/x/tools/internal/versions/gover.go
deleted file mode 100644
index bbabcd22..00000000
--- a/vendor/golang.org/x/tools/internal/versions/gover.go
+++ /dev/null
@@ -1,172 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This is a fork of internal/gover for use by x/tools until
-// go1.21 and earlier are no longer supported by x/tools.
-
-package versions
-
-import "strings"
-
-// A gover is a parsed Go gover: major[.Minor[.Patch]][kind[pre]]
-// The numbers are the original decimal strings to avoid integer overflows
-// and since there is very little actual math. (Probably overflow doesn't matter in practice,
-// but at the time this code was written, there was an existing test that used
-// go1.99999999999, which does not fit in an int on 32-bit platforms.
-// The "big decimal" representation avoids the problem entirely.)
-type gover struct {
-	major string // decimal
-	minor string // decimal or ""
-	patch string // decimal or ""
-	kind  string // "", "alpha", "beta", "rc"
-	pre   string // decimal or ""
-}
-
-// compare returns -1, 0, or +1 depending on whether
-// x < y, x == y, or x > y, interpreted as toolchain versions.
-// The versions x and y must not begin with a "go" prefix: just "1.21" not "go1.21".
-// Malformed versions compare less than well-formed versions and equal to each other.
-// The language version "1.21" compares less than the release candidate and eventual releases "1.21rc1" and "1.21.0".
-func compare(x, y string) int {
-	vx := parse(x)
-	vy := parse(y)
-
-	if c := cmpInt(vx.major, vy.major); c != 0 {
-		return c
-	}
-	if c := cmpInt(vx.minor, vy.minor); c != 0 {
-		return c
-	}
-	if c := cmpInt(vx.patch, vy.patch); c != 0 {
-		return c
-	}
-	if c := strings.Compare(vx.kind, vy.kind); c != 0 { // "" < alpha < beta < rc
-		return c
-	}
-	if c := cmpInt(vx.pre, vy.pre); c != 0 {
-		return c
-	}
-	return 0
-}
-
-// lang returns the Go language version. For example, lang("1.2.3") == "1.2".
-func lang(x string) string {
-	v := parse(x)
-	if v.minor == "" || v.major == "1" && v.minor == "0" {
-		return v.major
-	}
-	return v.major + "." + v.minor
-}
-
-// isValid reports whether the version x is valid.
-func isValid(x string) bool {
-	return parse(x) != gover{}
-}
-
-// parse parses the Go version string x into a version.
-// It returns the zero version if x is malformed.
-func parse(x string) gover {
-	var v gover
-
-	// Parse major version.
-	var ok bool
-	v.major, x, ok = cutInt(x)
-	if !ok {
-		return gover{}
-	}
-	if x == "" {
-		// Interpret "1" as "1.0.0".
-		v.minor = "0"
-		v.patch = "0"
-		return v
-	}
-
-	// Parse . before minor version.
-	if x[0] != '.' {
-		return gover{}
-	}
-
-	// Parse minor version.
-	v.minor, x, ok = cutInt(x[1:])
-	if !ok {
-		return gover{}
-	}
-	if x == "" {
-		// Patch missing is same as "0" for older versions.
-		// Starting in Go 1.21, patch missing is different from explicit .0.
-		if cmpInt(v.minor, "21") < 0 {
-			v.patch = "0"
-		}
-		return v
-	}
-
-	// Parse patch if present.
-	if x[0] == '.' {
-		v.patch, x, ok = cutInt(x[1:])
-		if !ok || x != "" {
-			// Note that we are disallowing prereleases (alpha, beta, rc) for patch releases here (x != "").
-			// Allowing them would be a bit confusing because we already have:
-			//	1.21 < 1.21rc1
-			// But a prerelease of a patch would have the opposite effect:
-			//	1.21.3rc1 < 1.21.3
-			// We've never needed them before, so let's not start now.
-			return gover{}
-		}
-		return v
-	}
-
-	// Parse prerelease.
-	i := 0
-	for i < len(x) && (x[i] < '0' || '9' < x[i]) {
-		if x[i] < 'a' || 'z' < x[i] {
-			return gover{}
-		}
-		i++
-	}
-	if i == 0 {
-		return gover{}
-	}
-	v.kind, x = x[:i], x[i:]
-	if x == "" {
-		return v
-	}
-	v.pre, x, ok = cutInt(x)
-	if !ok || x != "" {
-		return gover{}
-	}
-
-	return v
-}
-
-// cutInt scans the leading decimal number at the start of x to an integer
-// and returns that value and the rest of the string.
-func cutInt(x string) (n, rest string, ok bool) {
-	i := 0
-	for i < len(x) && '0' <= x[i] && x[i] <= '9' {
-		i++
-	}
-	if i == 0 || x[0] == '0' && i != 1 { // no digits or unnecessary leading zero
-		return "", "", false
-	}
-	return x[:i], x[i:], true
-}
-
-// cmpInt returns cmp.Compare(x, y) interpreting x and y as decimal numbers.
-// (Copied from golang.org/x/mod/semver's compareInt.)
-func cmpInt(x, y string) int {
-	if x == y {
-		return 0
-	}
-	if len(x) < len(y) {
-		return -1
-	}
-	if len(x) > len(y) {
-		return +1
-	}
-	if x < y {
-		return -1
-	} else {
-		return +1
-	}
-}
diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain.go b/vendor/golang.org/x/tools/internal/versions/toolchain.go
deleted file mode 100644
index 377bf7a5..00000000
--- a/vendor/golang.org/x/tools/internal/versions/toolchain.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package versions
-
-// toolchain is maximum version (<1.22) that the go toolchain used
-// to build the current tool is known to support.
-//
-// When a tool is built with >=1.22, the value of toolchain is unused.
-//
-// x/tools does not support building with go <1.18. So we take this
-// as the minimum possible maximum.
-var toolchain string = Go1_18
diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go b/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go
deleted file mode 100644
index 1a9efa12..00000000
--- a/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.20
-// +build go1.20
-
-package versions
-
-func init() {
-	if Compare(toolchain, Go1_20) < 0 {
-		toolchain = Go1_20
-	}
-}
diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go b/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go
deleted file mode 100644
index b7ef216d..00000000
--- a/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.21
-// +build go1.21
-
-package versions
-
-func init() {
-	if Compare(toolchain, Go1_21) < 0 {
-		toolchain = Go1_21
-	}
-}
diff --git a/vendor/golang.org/x/tools/internal/versions/types.go b/vendor/golang.org/x/tools/internal/versions/types.go
deleted file mode 100644
index 562eef21..00000000
--- a/vendor/golang.org/x/tools/internal/versions/types.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package versions
-
-import (
-	"go/types"
-)
-
-// GoVersion returns the Go version of the type package.
-// It returns zero if no version can be determined.
-func GoVersion(pkg *types.Package) string {
-	// TODO(taking): x/tools can call GoVersion() [from 1.21] after 1.25.
-	if pkg, ok := any(pkg).(interface{ GoVersion() string }); ok {
-		return pkg.GoVersion()
-	}
-	return ""
-}
diff --git a/vendor/golang.org/x/tools/internal/versions/types_go121.go b/vendor/golang.org/x/tools/internal/versions/types_go121.go
deleted file mode 100644
index b4345d33..00000000
--- a/vendor/golang.org/x/tools/internal/versions/types_go121.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.22
-// +build !go1.22
-
-package versions
-
-import (
-	"go/ast"
-	"go/types"
-)
-
-// FileVersion returns a language version (<=1.21) derived from runtime.Version()
-// or an unknown future version.
-func FileVersion(info *types.Info, file *ast.File) string {
-	// In x/tools built with Go <= 1.21, we do not have Info.FileVersions
-	// available. We use a go version derived from the toolchain used to
-	// compile the tool by default.
-	// This will be <= go1.21. We take this as the maximum version that
-	// this tool can support.
-	//
-	// There are no features currently in x/tools that need to tell fine grained
-	// differences for versions <1.22.
-	return toolchain
-}
-
-// InitFileVersions is a noop when compiled with this Go version.
-func InitFileVersions(*types.Info) {}
diff --git a/vendor/golang.org/x/tools/internal/versions/types_go122.go b/vendor/golang.org/x/tools/internal/versions/types_go122.go
deleted file mode 100644
index e8180632..00000000
--- a/vendor/golang.org/x/tools/internal/versions/types_go122.go
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.22
-// +build go1.22
-
-package versions
-
-import (
-	"go/ast"
-	"go/types"
-)
-
-// FileVersions returns a file's Go version.
-// The reported version is an unknown Future version if a
-// version cannot be determined.
-func FileVersion(info *types.Info, file *ast.File) string {
-	// In tools built with Go >= 1.22, the Go version of a file
-	// follow a cascades of sources:
-	// 1) types.Info.FileVersion, which follows the cascade:
-	//   1.a) file version (ast.File.GoVersion),
-	//   1.b) the package version (types.Config.GoVersion), or
-	// 2) is some unknown Future version.
-	//
-	// File versions require a valid package version to be provided to types
-	// in Config.GoVersion. Config.GoVersion is either from the package's module
-	// or the toolchain (go run). This value should be provided by go/packages
-	// or unitchecker.Config.GoVersion.
-	if v := info.FileVersions[file]; IsValid(v) {
-		return v
-	}
-	// Note: we could instead return runtime.Version() [if valid].
-	// This would act as a max version on what a tool can support.
-	return Future
-}
-
-// InitFileVersions initializes info to record Go versions for Go files.
-func InitFileVersions(info *types.Info) {
-	info.FileVersions = make(map[*ast.File]string)
-}
diff --git a/vendor/golang.org/x/tools/internal/versions/versions.go b/vendor/golang.org/x/tools/internal/versions/versions.go
deleted file mode 100644
index 8d1f7453..00000000
--- a/vendor/golang.org/x/tools/internal/versions/versions.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package versions
-
-import (
-	"strings"
-)
-
-// Note: If we use build tags to use go/versions when go >=1.22,
-// we run into go.dev/issue/53737. Under some operations users would see an
-// import of "go/versions" even if they would not compile the file.
-// For example, during `go get -u ./...` (go.dev/issue/64490) we do not try to include
-// For this reason, this library just a clone of go/versions for the moment.
-
-// Lang returns the Go language version for version x.
-// If x is not a valid version, Lang returns the empty string.
-// For example:
-//
-//	Lang("go1.21rc2") = "go1.21"
-//	Lang("go1.21.2") = "go1.21"
-//	Lang("go1.21") = "go1.21"
-//	Lang("go1") = "go1"
-//	Lang("bad") = ""
-//	Lang("1.21") = ""
-func Lang(x string) string {
-	v := lang(stripGo(x))
-	if v == "" {
-		return ""
-	}
-	return x[:2+len(v)] // "go"+v without allocation
-}
-
-// Compare returns -1, 0, or +1 depending on whether
-// x < y, x == y, or x > y, interpreted as Go versions.
-// The versions x and y must begin with a "go" prefix: "go1.21" not "1.21".
-// Invalid versions, including the empty string, compare less than
-// valid versions and equal to each other.
-// The language version "go1.21" compares less than the
-// release candidate and eventual releases "go1.21rc1" and "go1.21.0".
-// Custom toolchain suffixes are ignored during comparison:
-// "go1.21.0" and "go1.21.0-bigcorp" are equal.
-func Compare(x, y string) int { return compare(stripGo(x), stripGo(y)) }
-
-// IsValid reports whether the version x is valid.
-func IsValid(x string) bool { return isValid(stripGo(x)) }
-
-// stripGo converts from a "go1.21" version to a "1.21" version.
-// If v does not start with "go", stripGo returns the empty string (a known invalid version).
-func stripGo(v string) string {
-	v, _, _ = strings.Cut(v, "-") // strip -bigcorp suffix.
-	if len(v) < 2 || v[:2] != "go" {
-		return ""
-	}
-	return v[2:]
-}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index a8137ddf..1c544411 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -25,7 +25,7 @@ github.com/Nvveen/Gotty
 # github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496
 ## explicit; go 1.12
 github.com/asaskevich/govalidator
-# github.com/brianvoe/gofakeit/v7 v7.1.2
+# github.com/brianvoe/gofakeit/v7 v7.14.1
 ## explicit; go 1.22
 github.com/brianvoe/gofakeit/v7
 github.com/brianvoe/gofakeit/v7/data
@@ -33,11 +33,11 @@ github.com/brianvoe/gofakeit/v7/source
 # github.com/cenkalti/backoff/v4 v4.3.0
 ## explicit; go 1.18
 github.com/cenkalti/backoff/v4
-# github.com/cespare/xxhash/v2 v2.2.0
+# github.com/cespare/xxhash/v2 v2.3.0
 ## explicit; go 1.11
 github.com/cespare/xxhash/v2
-# github.com/containerd/continuity v0.4.3
-## explicit; go 1.19
+# github.com/containerd/continuity v0.4.5
+## explicit; go 1.21
 github.com/containerd/continuity/pathdriver
 # github.com/davecgh/go-spew v1.1.1
 ## explicit
@@ -45,7 +45,7 @@ github.com/davecgh/go-spew/spew
 # github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f
 ## explicit
 github.com/dgryski/go-rendezvous
-# github.com/docker/cli v26.1.4+incompatible
+# github.com/docker/cli v27.4.1+incompatible
 ## explicit
 github.com/docker/cli/cli/compose/interpolation
 github.com/docker/cli/cli/compose/loader
@@ -53,6 +53,7 @@ github.com/docker/cli/cli/compose/schema
 github.com/docker/cli/cli/compose/template
 github.com/docker/cli/cli/compose/types
 github.com/docker/cli/opts
+github.com/docker/cli/pkg/kvfile
 # github.com/docker/docker v27.1.1+incompatible
 ## explicit
 github.com/docker/docker/api/types/blkiodev
@@ -74,27 +75,24 @@ github.com/docker/go-units
 # github.com/fatih/structs v1.1.0
 ## explicit
 github.com/fatih/structs
-# github.com/fsnotify/fsnotify v1.7.0
-## explicit; go 1.17
+# github.com/fsnotify/fsnotify v1.4.9
+## explicit; go 1.13
 github.com/fsnotify/fsnotify
-# github.com/ghodss/yaml v1.0.0
-## explicit
-github.com/ghodss/yaml
 # github.com/go-gorp/gorp/v3 v3.1.0
 ## explicit; go 1.18
 github.com/go-gorp/gorp/v3
-# github.com/go-openapi/jsonpointer v0.21.0
-## explicit; go 1.20
+# github.com/go-openapi/jsonpointer v0.19.6
+## explicit; go 1.13
 github.com/go-openapi/jsonpointer
-# github.com/go-openapi/jsonreference v0.21.0
-## explicit; go 1.20
+# github.com/go-openapi/jsonreference v0.20.2
+## explicit; go 1.13
 github.com/go-openapi/jsonreference
 github.com/go-openapi/jsonreference/internal
-# github.com/go-openapi/spec v0.21.0
-## explicit; go 1.20
+# github.com/go-openapi/spec v0.20.9
+## explicit; go 1.13
 github.com/go-openapi/spec
-# github.com/go-openapi/swag v0.23.0
-## explicit; go 1.20
+# github.com/go-openapi/swag v0.22.3
+## explicit; go 1.18
 github.com/go-openapi/swag
 # github.com/go-ozzo/ozzo-validation v3.6.0+incompatible
 ## explicit
@@ -103,20 +101,21 @@ github.com/go-ozzo/ozzo-validation/is
 # github.com/go-ozzo/ozzo-validation/v4 v4.3.0
 ## explicit; go 1.13
 github.com/go-ozzo/ozzo-validation/v4
-# github.com/go-sql-driver/mysql v1.8.1
-## explicit; go 1.18
+# github.com/go-sql-driver/mysql v1.9.3
+## explicit; go 1.21.0
 github.com/go-sql-driver/mysql
+# github.com/go-viper/mapstructure/v2 v2.1.0
+## explicit; go 1.18
+github.com/go-viper/mapstructure/v2
+github.com/go-viper/mapstructure/v2/internal/errors
 # github.com/gogo/protobuf v1.3.2
 ## explicit; go 1.15
 github.com/gogo/protobuf/proto
-# github.com/golang-jwt/jwt v3.2.2+incompatible
-## explicit
-github.com/golang-jwt/jwt
-# github.com/golang-jwt/jwt/v4 v4.5.0
+# github.com/golang-jwt/jwt/v4 v4.5.2
 ## explicit; go 1.16
 github.com/golang-jwt/jwt/v4
-# github.com/golang-jwt/jwt/v5 v5.0.0
-## explicit; go 1.18
+# github.com/golang-jwt/jwt/v5 v5.3.0
+## explicit; go 1.21
 github.com/golang-jwt/jwt/v5
 # github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
 ## explicit; go 1.13
@@ -124,7 +123,7 @@ github.com/google/shlex
 # github.com/josharian/intern v1.0.0
 ## explicit; go 1.5
 github.com/josharian/intern
-# github.com/kavenegar/kavenegar-go v0.0.0-20221124112814-40341057b5ca
+# github.com/kavenegar/kavenegar-go v0.0.0-20240205151018-77039f51467d
 ## explicit; go 1.14
 github.com/kavenegar/kavenegar-go
 # github.com/knadh/koanf v1.5.0
@@ -135,11 +134,11 @@ github.com/knadh/koanf/parsers/yaml
 github.com/knadh/koanf/providers/env
 github.com/knadh/koanf/providers/file
 github.com/knadh/koanf/providers/structs
-# github.com/labstack/echo-jwt/v4 v4.2.0
-## explicit; go 1.17
+# github.com/labstack/echo-jwt/v4 v4.4.0
+## explicit; go 1.24.0
 github.com/labstack/echo-jwt/v4
-# github.com/labstack/echo/v4 v4.12.0
-## explicit; go 1.18
+# github.com/labstack/echo/v4 v4.15.1
+## explicit; go 1.24.0
 github.com/labstack/echo/v4
 github.com/labstack/echo/v4/middleware
 # github.com/labstack/gommon v0.4.2
@@ -152,8 +151,8 @@ github.com/labstack/gommon/log
 github.com/mailru/easyjson/buffer
 github.com/mailru/easyjson/jlexer
 github.com/mailru/easyjson/jwriter
-# github.com/mattn/go-colorable v0.1.13
-## explicit; go 1.15
+# github.com/mattn/go-colorable v0.1.14
+## explicit; go 1.18
 github.com/mattn/go-colorable
 # github.com/mattn/go-isatty v0.0.20
 ## explicit; go 1.15
@@ -170,6 +169,9 @@ github.com/mitchellh/reflectwalk
 # github.com/moby/docker-image-spec v1.3.1
 ## explicit; go 1.18
 github.com/moby/docker-image-spec/specs-go/v1
+# github.com/moby/sys/user v0.3.0
+## explicit; go 1.17
+github.com/moby/sys/user
 # github.com/moby/term v0.5.0
 ## explicit; go 1.18
 github.com/moby/term
@@ -181,10 +183,10 @@ github.com/opencontainers/go-digest
 ## explicit; go 1.18
 github.com/opencontainers/image-spec/specs-go
 github.com/opencontainers/image-spec/specs-go/v1
-# github.com/opencontainers/runc v1.1.13
-## explicit; go 1.18
+# github.com/opencontainers/runc v1.2.3
+## explicit; go 1.22
 github.com/opencontainers/runc/libcontainer/user
-# github.com/ory/dockertest/v3 v3.11.0
+# github.com/ory/dockertest/v3 v3.12.0
 ## explicit; go 1.22
 github.com/ory/dockertest/v3
 github.com/ory/dockertest/v3/docker
@@ -215,18 +217,26 @@ github.com/pkg/errors
 # github.com/pmezard/go-difflib v1.0.0
 ## explicit
 github.com/pmezard/go-difflib/difflib
-# github.com/redis/go-redis/v9 v9.4.0
-## explicit; go 1.18
+# github.com/redis/go-redis/v9 v9.18.0
+## explicit; go 1.21
 github.com/redis/go-redis/v9
+github.com/redis/go-redis/v9/auth
 github.com/redis/go-redis/v9/internal
+github.com/redis/go-redis/v9/internal/auth/streaming
 github.com/redis/go-redis/v9/internal/hashtag
 github.com/redis/go-redis/v9/internal/hscan
+github.com/redis/go-redis/v9/internal/interfaces
+github.com/redis/go-redis/v9/internal/maintnotifications/logs
+github.com/redis/go-redis/v9/internal/otel
 github.com/redis/go-redis/v9/internal/pool
 github.com/redis/go-redis/v9/internal/proto
 github.com/redis/go-redis/v9/internal/rand
+github.com/redis/go-redis/v9/internal/routing
 github.com/redis/go-redis/v9/internal/util
-# github.com/rubenv/sql-migrate v1.6.0
-## explicit; go 1.21
+github.com/redis/go-redis/v9/maintnotifications
+github.com/redis/go-redis/v9/push
+# github.com/rubenv/sql-migrate v1.8.1
+## explicit; go 1.24.0
 github.com/rubenv/sql-migrate
 github.com/rubenv/sql-migrate/sqlparse
 # github.com/sirupsen/logrus v1.9.3
@@ -235,21 +245,28 @@ github.com/sirupsen/logrus
 # github.com/stretchr/objx v0.5.2
 ## explicit; go 1.20
 github.com/stretchr/objx
-# github.com/stretchr/testify v1.9.0
+# github.com/stretchr/testify v1.11.1
 ## explicit; go 1.17
 github.com/stretchr/testify/assert
+github.com/stretchr/testify/assert/yaml
 github.com/stretchr/testify/mock
 github.com/stretchr/testify/require
 github.com/stretchr/testify/suite
-# github.com/swaggo/echo-swagger v1.4.1
-## explicit; go 1.17
+# github.com/sv-tools/openapi v0.2.1
+## explicit; go 1.18
+github.com/sv-tools/openapi/spec
+# github.com/swaggo/echo-swagger v1.5.2
+## explicit; go 1.23.0
 github.com/swaggo/echo-swagger
 # github.com/swaggo/files/v2 v2.0.0
 ## explicit; go 1.16
 github.com/swaggo/files/v2
-# github.com/swaggo/swag v1.16.3
+# github.com/swaggo/swag v1.16.6
 ## explicit; go 1.18
 github.com/swaggo/swag
+# github.com/swaggo/swag/v2 v2.0.0-rc4
+## explicit; go 1.18
+github.com/swaggo/swag/v2
 # github.com/valyala/bytebufferpool v1.0.0
 ## explicit
 github.com/valyala/bytebufferpool
@@ -265,39 +282,60 @@ github.com/xeipuuv/gojsonreference
 # github.com/xeipuuv/gojsonschema v1.2.0
 ## explicit
 github.com/xeipuuv/gojsonschema
-# golang.org/x/crypto v0.23.0
+# go.uber.org/atomic v1.11.0
 ## explicit; go 1.18
+go.uber.org/atomic
+# golang.org/x/crypto v0.46.0
+## explicit; go 1.24.0
 golang.org/x/crypto/acme
 golang.org/x/crypto/acme/autocert
 golang.org/x/crypto/bcrypt
 golang.org/x/crypto/blowfish
-# golang.org/x/net v0.25.0
-## explicit; go 1.18
+# golang.org/x/mod v0.30.0
+## explicit; go 1.24.0
+golang.org/x/mod/internal/lazyregexp
+golang.org/x/mod/module
+golang.org/x/mod/semver
+# golang.org/x/net v0.48.0
+## explicit; go 1.24.0
 golang.org/x/net/http/httpguts
 golang.org/x/net/http2
 golang.org/x/net/http2/h2c
 golang.org/x/net/http2/hpack
 golang.org/x/net/idna
-# golang.org/x/sys v0.21.0
-## explicit; go 1.18
+golang.org/x/net/internal/httpcommon
+# golang.org/x/sync v0.19.0
+## explicit; go 1.24.0
+golang.org/x/sync/errgroup
+# golang.org/x/sys v0.39.0
+## explicit; go 1.24.0
 golang.org/x/sys/unix
 golang.org/x/sys/windows
-# golang.org/x/text v0.15.0
-## explicit; go 1.18
+# golang.org/x/text v0.32.0
+## explicit; go 1.24.0
 golang.org/x/text/secure/bidirule
 golang.org/x/text/transform
 golang.org/x/text/unicode/bidi
 golang.org/x/text/unicode/norm
-# golang.org/x/time v0.5.0
-## explicit; go 1.18
+# golang.org/x/time v0.14.0
+## explicit; go 1.24.0
 golang.org/x/time/rate
-# golang.org/x/tools v0.21.0
-## explicit; go 1.19
+# golang.org/x/tools v0.39.0
+## explicit; go 1.24.0
 golang.org/x/tools/go/ast/astutil
 golang.org/x/tools/go/buildutil
 golang.org/x/tools/go/internal/cgo
 golang.org/x/tools/go/loader
-golang.org/x/tools/internal/versions
+golang.org/x/tools/imports
+golang.org/x/tools/internal/event
+golang.org/x/tools/internal/event/core
+golang.org/x/tools/internal/event/keys
+golang.org/x/tools/internal/event/label
+golang.org/x/tools/internal/gocommand
+golang.org/x/tools/internal/gopathwalk
+golang.org/x/tools/internal/imports
+golang.org/x/tools/internal/modindex
+golang.org/x/tools/internal/stdlib
 # gopkg.in/natefinch/lumberjack.v2 v2.2.1
 ## explicit; go 1.13
 gopkg.in/natefinch/lumberjack.v2
@@ -307,3 +345,6 @@ gopkg.in/yaml.v2
 # gopkg.in/yaml.v3 v3.0.1
 ## explicit
 gopkg.in/yaml.v3
+# sigs.k8s.io/yaml v1.3.0
+## explicit; go 1.12
+sigs.k8s.io/yaml
diff --git a/vendor/github.com/ghodss/yaml/.gitignore b/vendor/sigs.k8s.io/yaml/.gitignore
similarity index 88%
rename from vendor/github.com/ghodss/yaml/.gitignore
rename to vendor/sigs.k8s.io/yaml/.gitignore
index e256a31e..2dc92904 100644
--- a/vendor/github.com/ghodss/yaml/.gitignore
+++ b/vendor/sigs.k8s.io/yaml/.gitignore
@@ -6,6 +6,10 @@
 .project
 .settings/**
 
+# Idea files
+.idea/**
+.idea/
+
 # Emacs save files
 *~
 
diff --git a/vendor/sigs.k8s.io/yaml/.travis.yml b/vendor/sigs.k8s.io/yaml/.travis.yml
new file mode 100644
index 00000000..54ed8f9c
--- /dev/null
+++ b/vendor/sigs.k8s.io/yaml/.travis.yml
@@ -0,0 +1,12 @@
+language: go
+arch: arm64
+dist: focal
+go: 1.15.x
+script:
+  - diff -u <(echo -n) <(gofmt -d *.go)
+  - diff -u <(echo -n) <(golint $(go list -e ./...) | grep -v YAMLToJSON)
+  - GO111MODULE=on go vet .
+  - GO111MODULE=on go test -v -race ./...
+  - git diff --exit-code
+install:
+  - GO111MODULE=off go get golang.org/x/lint/golint
diff --git a/vendor/sigs.k8s.io/yaml/CONTRIBUTING.md b/vendor/sigs.k8s.io/yaml/CONTRIBUTING.md
new file mode 100644
index 00000000..de471151
--- /dev/null
+++ b/vendor/sigs.k8s.io/yaml/CONTRIBUTING.md
@@ -0,0 +1,31 @@
+# Contributing Guidelines
+
+Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://github.com/kubernetes/community)! The Kubernetes community abides by the CNCF [code of conduct](code-of-conduct.md). Here is an excerpt:
+
+_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._
+
+## Getting Started
+
+We have full documentation on how to get started contributing here:
+
+
+
+- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests
+- [Kubernetes Contributor Guide](http://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](http://git.k8s.io/community/contributors/guide#contributing)
+- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet.md) - Common resources for existing developers
+
+## Mentorship
+
+- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers!
+
+
diff --git a/vendor/github.com/ghodss/yaml/LICENSE b/vendor/sigs.k8s.io/yaml/LICENSE
similarity index 100%
rename from vendor/github.com/ghodss/yaml/LICENSE
rename to vendor/sigs.k8s.io/yaml/LICENSE
diff --git a/vendor/sigs.k8s.io/yaml/OWNERS b/vendor/sigs.k8s.io/yaml/OWNERS
new file mode 100644
index 00000000..325b40b0
--- /dev/null
+++ b/vendor/sigs.k8s.io/yaml/OWNERS
@@ -0,0 +1,27 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+approvers:
+- dims
+- lavalamp
+- smarterclayton
+- deads2k
+- sttts
+- liggitt
+- caesarxuchao
+reviewers:
+- dims
+- thockin
+- lavalamp
+- smarterclayton
+- wojtek-t
+- deads2k
+- derekwaynecarr
+- caesarxuchao
+- mikedanese
+- liggitt
+- gmarek
+- sttts
+- ncdc
+- tallclair
+labels:
+- sig/api-machinery
diff --git a/vendor/github.com/ghodss/yaml/README.md b/vendor/sigs.k8s.io/yaml/README.md
similarity index 86%
rename from vendor/github.com/ghodss/yaml/README.md
rename to vendor/sigs.k8s.io/yaml/README.md
index 0200f75b..e81cc426 100644
--- a/vendor/github.com/ghodss/yaml/README.md
+++ b/vendor/sigs.k8s.io/yaml/README.md
@@ -1,12 +1,14 @@
 # YAML marshaling and unmarshaling support for Go
 
-[![Build Status](https://travis-ci.org/ghodss/yaml.svg)](https://travis-ci.org/ghodss/yaml)
+[![Build Status](https://travis-ci.org/kubernetes-sigs/yaml.svg)](https://travis-ci.org/kubernetes-sigs/yaml)
+
+kubernetes-sigs/yaml is a permanent fork of [ghodss/yaml](https://github.com/ghodss/yaml).
 
 ## Introduction
 
 A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs.
 
-In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/).
+In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://web.archive.org/web/20190603050330/http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/).
 
 ## Compatibility
 
@@ -32,13 +34,13 @@ GOOD:
 To install, run:
 
 ```
-$ go get github.com/ghodss/yaml
+$ go get sigs.k8s.io/yaml
 ```
 
 And import using:
 
 ```
-import "github.com/ghodss/yaml"
+import "sigs.k8s.io/yaml"
 ```
 
 Usage is very similar to the JSON library:
@@ -49,7 +51,7 @@ package main
 import (
 	"fmt"
 
-	"github.com/ghodss/yaml"
+	"sigs.k8s.io/yaml"
 )
 
 type Person struct {
@@ -93,7 +95,7 @@ package main
 import (
 	"fmt"
 
-	"github.com/ghodss/yaml"
+	"sigs.k8s.io/yaml"
 )
 
 func main() {
@@ -105,8 +107,8 @@ func main() {
 	}
 	fmt.Println(string(y))
 	/* Output:
-	name: John
 	age: 30
+	name: John
 	*/
 	j2, err := yaml.YAMLToJSON(y)
 	if err != nil {
diff --git a/vendor/sigs.k8s.io/yaml/RELEASE.md b/vendor/sigs.k8s.io/yaml/RELEASE.md
new file mode 100644
index 00000000..6b642464
--- /dev/null
+++ b/vendor/sigs.k8s.io/yaml/RELEASE.md
@@ -0,0 +1,9 @@
+# Release Process
+
+The `yaml` Project is released on an as-needed basis. The process is as follows:
+
+1. An issue is proposing a new release with a changelog since the last release
+1. All [OWNERS](OWNERS) must LGTM this release
+1. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION`
+1. The release issue is closed
+1. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] kubernetes-template-project $VERSION is released`
diff --git a/vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS b/vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS
new file mode 100644
index 00000000..0648a8eb
--- /dev/null
+++ b/vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS
@@ -0,0 +1,17 @@
+# Defined below are the security contacts for this repo.
+#
+# They are the contact point for the Product Security Team to reach out
+# to for triaging and handling of incoming issues.
+#
+# The below names agree to abide by the
+# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy)
+# and will be removed and replaced if they violate that agreement.
+#
+# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
+# INSTRUCTIONS AT https://kubernetes.io/security/
+
+cjcullen
+jessfraz
+liggitt
+philips
+tallclair
diff --git a/vendor/sigs.k8s.io/yaml/code-of-conduct.md b/vendor/sigs.k8s.io/yaml/code-of-conduct.md
new file mode 100644
index 00000000..0d15c00c
--- /dev/null
+++ b/vendor/sigs.k8s.io/yaml/code-of-conduct.md
@@ -0,0 +1,3 @@
+# Kubernetes Community Code of Conduct
+
+Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md)
diff --git a/vendor/github.com/ghodss/yaml/fields.go b/vendor/sigs.k8s.io/yaml/fields.go
similarity index 99%
rename from vendor/github.com/ghodss/yaml/fields.go
rename to vendor/sigs.k8s.io/yaml/fields.go
index 58600740..235b7f2c 100644
--- a/vendor/github.com/ghodss/yaml/fields.go
+++ b/vendor/sigs.k8s.io/yaml/fields.go
@@ -1,6 +1,7 @@
 // Copyright 2013 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
+
 package yaml
 
 import (
diff --git a/vendor/github.com/ghodss/yaml/yaml.go b/vendor/sigs.k8s.io/yaml/yaml.go
similarity index 66%
rename from vendor/github.com/ghodss/yaml/yaml.go
rename to vendor/sigs.k8s.io/yaml/yaml.go
index 4fb4054a..efbc535d 100644
--- a/vendor/github.com/ghodss/yaml/yaml.go
+++ b/vendor/sigs.k8s.io/yaml/yaml.go
@@ -4,13 +4,14 @@ import (
 	"bytes"
 	"encoding/json"
 	"fmt"
+	"io"
 	"reflect"
 	"strconv"
 
 	"gopkg.in/yaml.v2"
 )
 
-// Marshals the object into JSON then converts JSON to YAML and returns the
+// Marshal marshals the object into JSON then converts JSON to YAML and returns the
 // YAML.
 func Marshal(o interface{}) ([]byte, error) {
 	j, err := json.Marshal(o)
@@ -26,15 +27,35 @@ func Marshal(o interface{}) ([]byte, error) {
 	return y, nil
 }
 
-// Converts YAML to JSON then uses JSON to unmarshal into an object.
-func Unmarshal(y []byte, o interface{}) error {
+// JSONOpt is a decoding option for decoding from JSON format.
+type JSONOpt func(*json.Decoder) *json.Decoder
+
+// Unmarshal converts YAML to JSON then uses JSON to unmarshal into an object,
+// optionally configuring the behavior of the JSON unmarshal.
+func Unmarshal(y []byte, o interface{}, opts ...JSONOpt) error {
+	return yamlUnmarshal(y, o, false, opts...)
+}
+
+// UnmarshalStrict strictly converts YAML to JSON then uses JSON to unmarshal
+// into an object, optionally configuring the behavior of the JSON unmarshal.
+func UnmarshalStrict(y []byte, o interface{}, opts ...JSONOpt) error {
+	return yamlUnmarshal(y, o, true, append(opts, DisallowUnknownFields)...)
+}
+
+// yamlUnmarshal unmarshals the given YAML byte stream into the given interface,
+// optionally performing the unmarshalling strictly
+func yamlUnmarshal(y []byte, o interface{}, strict bool, opts ...JSONOpt) error {
 	vo := reflect.ValueOf(o)
-	j, err := yamlToJSON(y, &vo)
+	unmarshalFn := yaml.Unmarshal
+	if strict {
+		unmarshalFn = yaml.UnmarshalStrict
+	}
+	j, err := yamlToJSON(y, &vo, unmarshalFn)
 	if err != nil {
 		return fmt.Errorf("error converting YAML to JSON: %v", err)
 	}
 
-	err = json.Unmarshal(j, o)
+	err = jsonUnmarshal(bytes.NewReader(j), o, opts...)
 	if err != nil {
 		return fmt.Errorf("error unmarshaling JSON: %v", err)
 	}
@@ -42,7 +63,22 @@ func Unmarshal(y []byte, o interface{}) error {
 	return nil
 }
 
-// Convert JSON to YAML.
+// jsonUnmarshal unmarshals the JSON byte stream from the given reader into the
+// object, optionally applying decoder options prior to decoding.  We are not
+// using json.Unmarshal directly as we want the chance to pass in non-default
+// options.
+func jsonUnmarshal(r io.Reader, o interface{}, opts ...JSONOpt) error {
+	d := json.NewDecoder(r)
+	for _, opt := range opts {
+		d = opt(d)
+	}
+	if err := d.Decode(&o); err != nil {
+		return fmt.Errorf("while decoding JSON: %v", err)
+	}
+	return nil
+}
+
+// JSONToYAML Converts JSON to YAML.
 func JSONToYAML(j []byte) ([]byte, error) {
 	// Convert the JSON to an object.
 	var jsonObj interface{}
@@ -60,8 +96,8 @@ func JSONToYAML(j []byte) ([]byte, error) {
 	return yaml.Marshal(jsonObj)
 }
 
-// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through
-// this method should be a no-op.
+// YAMLToJSON converts YAML to JSON. Since JSON is a subset of YAML,
+// passing JSON through this method should be a no-op.
 //
 // Things YAML can do that are not supported by JSON:
 // * In YAML you can have binary and null keys in your maps. These are invalid
@@ -70,14 +106,22 @@ func JSONToYAML(j []byte) ([]byte, error) {
 //   use binary data with this library, encode the data as base64 as usual but do
 //   not use the !!binary tag in your YAML. This will ensure the original base64
 //   encoded data makes it all the way through to the JSON.
+//
+// For strict decoding of YAML, use YAMLToJSONStrict.
 func YAMLToJSON(y []byte) ([]byte, error) {
-	return yamlToJSON(y, nil)
+	return yamlToJSON(y, nil, yaml.Unmarshal)
 }
 
-func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) {
+// YAMLToJSONStrict is like YAMLToJSON but enables strict YAML decoding,
+// returning an error on any duplicate field names.
+func YAMLToJSONStrict(y []byte) ([]byte, error) {
+	return yamlToJSON(y, nil, yaml.UnmarshalStrict)
+}
+
+func yamlToJSON(y []byte, jsonTarget *reflect.Value, yamlUnmarshal func([]byte, interface{}) error) ([]byte, error) {
 	// Convert the YAML to an object.
 	var yamlObj interface{}
-	err := yaml.Unmarshal(y, &yamlObj)
+	err := yamlUnmarshal(y, &yamlObj)
 	if err != nil {
 		return nil, err
 	}
@@ -272,6 +316,65 @@ func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (in
 		}
 		return yamlObj, nil
 	}
-
-	return nil, nil
+}
+
+// JSONObjectToYAMLObject converts an in-memory JSON object into a YAML in-memory MapSlice,
+// without going through a byte representation. A nil or empty map[string]interface{} input is
+// converted to an empty map, i.e. yaml.MapSlice(nil).
+//
+// interface{} slices stay interface{} slices. map[string]interface{} becomes yaml.MapSlice.
+//
+// int64 and float64 are down casted following the logic of github.com/go-yaml/yaml:
+// - float64s are down-casted as far as possible without data-loss to int, int64, uint64.
+// - int64s are down-casted to int if possible without data-loss.
+//
+// Big int/int64/uint64 do not lose precision as in the json-yaml roundtripping case.
+//
+// string, bool and any other types are unchanged.
+func JSONObjectToYAMLObject(j map[string]interface{}) yaml.MapSlice {
+	if len(j) == 0 {
+		return nil
+	}
+	ret := make(yaml.MapSlice, 0, len(j))
+	for k, v := range j {
+		ret = append(ret, yaml.MapItem{Key: k, Value: jsonToYAMLValue(v)})
+	}
+	return ret
+}
+
+func jsonToYAMLValue(j interface{}) interface{} {
+	switch j := j.(type) {
+	case map[string]interface{}:
+		if j == nil {
+			return interface{}(nil)
+		}
+		return JSONObjectToYAMLObject(j)
+	case []interface{}:
+		if j == nil {
+			return interface{}(nil)
+		}
+		ret := make([]interface{}, len(j))
+		for i := range j {
+			ret[i] = jsonToYAMLValue(j[i])
+		}
+		return ret
+	case float64:
+		// replicate the logic in https://github.com/go-yaml/yaml/blob/51d6538a90f86fe93ac480b35f37b2be17fef232/resolve.go#L151
+		if i64 := int64(j); j == float64(i64) {
+			if i := int(i64); i64 == int64(i) {
+				return i
+			}
+			return i64
+		}
+		if ui64 := uint64(j); j == float64(ui64) {
+			return ui64
+		}
+		return j
+	case int64:
+		if i := int(j); j == int64(i) {
+			return i
+		}
+		return j
+	}
+	return j
 }
diff --git a/vendor/sigs.k8s.io/yaml/yaml_go110.go b/vendor/sigs.k8s.io/yaml/yaml_go110.go
new file mode 100644
index 00000000..ab3e06a2
--- /dev/null
+++ b/vendor/sigs.k8s.io/yaml/yaml_go110.go
@@ -0,0 +1,14 @@
+// This file contains changes that are only compatible with go 1.10 and onwards.
+
+// +build go1.10
+
+package yaml
+
+import "encoding/json"
+
+// DisallowUnknownFields configures the JSON decoder to error out if unknown
+// fields come along, instead of dropping them by default.
+func DisallowUnknownFields(d *json.Decoder) *json.Decoder {
+	d.DisallowUnknownFields()
+	return d
+}

From 380de96b1fb374020fbae14b3304c362a505751f Mon Sep 17 00:00:00 2001
From: mohammadreza javid 
Date: Fri, 20 Mar 2026 13:40:20 +0330
Subject: [PATCH 2/5] bug fix in migration
 1730029129_add_update_benefactor_access.sql

---
 docker-compose.yaml                           |  2 +-
 ...730029129_add_update_benefactor_access.sql | 58 +++++++++----------
 2 files changed, 30 insertions(+), 30 deletions(-)

diff --git a/docker-compose.yaml b/docker-compose.yaml
index 0d75fc25..b5d7f03a 100644
--- a/docker-compose.yaml
+++ b/docker-compose.yaml
@@ -19,7 +19,7 @@ services:
 #      - "niki-redis"
 
   niki-mariadb:
-    image: docker.io/bitnami/mariadb:11.1
+    image: bitnami/mariadb:11.1
     container_name: niki-mariadb
     restart: always
     ports:
diff --git a/repository/mysql/migration/1730029129_add_update_benefactor_access.sql b/repository/mysql/migration/1730029129_add_update_benefactor_access.sql
index 9ea7d774..41d468d2 100644
--- a/repository/mysql/migration/1730029129_add_update_benefactor_access.sql
+++ b/repository/mysql/migration/1730029129_add_update_benefactor_access.sql
@@ -1,36 +1,36 @@
 -- +migrate Up
-ALTER TABLE `admin_access_controls` MODIFY COLUMN `permission`
-    enum (
-    'admin-register',
-    'kindboxreq-accept',
-    'kindboxreq-reject',
-    'kindboxreq-getall',
-    'kindboxreq-deliver',
-    'kindboxreq-assign_sender_agent',
-    'admin-getall_agent',
-    'kindboxreq-get_awaiting_delivery',
-    'kindbox-get',
-    'kindboxreq-add',
-    'kindbox-assign_receiver_agent',
-    'kindbox-getall',
-    'kindboxreq-update',
-    'kindboxreq-get',
-    'kindbox-get_awaiting_return',
-    'kindbox-return',
-    'kindbox-enumerate',
-    'kindbox-update',
-    'benefactor-getall',
-    'benefactor-get',
-    'benefactor-update',
-    'benefactor-update-status'
-    ) NOT NULL;
+ALTER TABLE `admin_access_controls`
+    MODIFY COLUMN `permission`
+        enum (
+            'admin-register',
+            'kindboxreq-accept',
+            'kindboxreq-reject',
+            'kindboxreq-getall',
+            'kindboxreq-deliver',
+            'kindboxreq-assign_sender_agent',
+            'admin-getall_agent',
+            'kindboxreq-get_awaiting_delivery',
+            'kindbox-get',
+            'kindboxreq-add',
+            'kindbox-assign_receiver_agent',
+            'kindbox-getall',
+            'kindboxreq-update',
+            'kindboxreq-get',
+            'kindbox-get_awaiting_return',
+            'kindbox-return',
+            'kindbox-enumerate',
+            'kindbox-update',
+            'benefactor-getall',
+            'benefactor-get',
+            'benefactor-update',
+            'benefactor-update-status'
+            ) NOT NULL;
 
 INSERT INTO `admin_access_controls` (`actor_id`, `actor_type`, `permission`)
 VALUES (1, 'role', 'benefactor-update'),
-VALUES (2, 'role', 'benefactor-update'),
-VALUES (1, 'role', 'benefactor-update-status'),
-VALUES (2, 'role', 'benefactor-update-status');
-
+       (2, 'role', 'benefactor-update'),
+       (1, 'role', 'benefactor-update-status'),
+       (2, 'role', 'benefactor-update-status');
 -- +migrate Down
 DELETE
 FROM `admin_access_controls`;
\ No newline at end of file

From 8a1ee39221474422b15e8d99a807ba1e0b520335 Mon Sep 17 00:00:00 2001
From: mohammadreza javid 
Date: Fri, 20 Mar 2026 13:41:35 +0330
Subject: [PATCH 3/5] update gitignore

---
 .gitignore | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/.gitignore b/.gitignore
index 29059e39..ecfad040 100644
--- a/.gitignore
+++ b/.gitignore
@@ -28,3 +28,5 @@ tmp
 # Logs
 logs/
 mise.log
+
+curl
\ No newline at end of file

From fda7be1d7ae4b93e3538eef09283e67ac5d44126 Mon Sep 17 00:00:00 2001
From: mohammadreza javid 
Date: Fri, 20 Mar 2026 13:51:12 +0330
Subject: [PATCH 4/5] update readme niki project

---
 README.md | 70 +++++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 70 insertions(+)
 create mode 100644 README.md

diff --git a/README.md b/README.md
new file mode 100644
index 00000000..135eaefc
--- /dev/null
+++ b/README.md
@@ -0,0 +1,70 @@
+# Niki
+
+---
+
+## Prerequisites
+
+- **Go 1.25.4** (Ensure your Go version matches this requirement)
+- **Docker 20.10+** (or higher)
+- **Git**
+
+---
+
+## Installation
+
+### 1. Configure Go Module Mirror
+
+To accelerate dependency downloads, set the Go module mirror to **Megan** (Iranian Go mirror):
+
+```url
+https://megan.ir/hub/go
+```
+
+### 2. Install Dependencies
+
+```bash
+go mod tidy
+go mod vendor
+```
+
+### 3. Configure Environment
+
+Copy the example environment file and customize it:
+
+```bash
+cp .env.example .env
+```
+
+---
+
+## Running the Application
+
+### 1. Start Services
+
+Launch the database and Redis services using Docker Compose:
+
+```bash
+docker compose up -d
+```
+
+> 🌐 *Docker images are sourced from [Arvan Cloud's Docker repositories](https://www.arvancloud.ir/fa/dev/docker). Ensure
+your environment has access to these repositories.*
+
+### 2. Apply Database Migrations
+
+Initialize the database schema:
+
+```bash
+go run main.go --migrate
+```
+
+### 3. Start the Application
+
+Run the application in development mode:
+
+```bash
+go run main.go
+```
+
+> ✨ **Alternative**: Use the provided `Makefile` for streamlined execution:
+> [Makefile](Makefile)

From 3771aae4d2517143d70a6a741bb90c5a54d71911 Mon Sep 17 00:00:00 2001
From: mohammadreza javid 
Date: Fri, 20 Mar 2026 14:38:06 +0330
Subject: [PATCH 5/5] add structure of benefactorapp to niki project

---
 benefactorapp/app.go                          |  8 +++++++
 benefactorapp/config.go                       | 11 +++++++++
 benefactorapp/delivery/http/handler.go        | 17 ++++++++++++++
 benefactorapp/delivery/http/route.go          |  1 +
 benefactorapp/delivery/http/server.go         | 23 +++++++++++++++++++
 benefactorapp/repository/database/db.go       | 13 +++++++++++
 benefactorapp/service/entity.go               |  1 +
 benefactorapp/service/param.go                |  1 +
 benefactorapp/service/service.go              |  1 +
 benefactorapp/service/validator.go            |  1 +
 deploy/benefactor/development/.env.example    |  0
 deploy/benefactor/development/Dockerfile      |  0
 deploy/benefactor/development/config.yml      |  0
 .../benefactor/development/docker-compose.yml |  0
 14 files changed, 77 insertions(+)
 create mode 100644 benefactorapp/app.go
 create mode 100644 benefactorapp/config.go
 create mode 100644 benefactorapp/delivery/http/handler.go
 create mode 100644 benefactorapp/delivery/http/route.go
 create mode 100644 benefactorapp/delivery/http/server.go
 create mode 100644 benefactorapp/repository/database/db.go
 create mode 100644 benefactorapp/service/entity.go
 create mode 100644 benefactorapp/service/param.go
 create mode 100644 benefactorapp/service/service.go
 create mode 100644 benefactorapp/service/validator.go
 create mode 100644 deploy/benefactor/development/.env.example
 create mode 100644 deploy/benefactor/development/Dockerfile
 create mode 100644 deploy/benefactor/development/config.yml
 create mode 100644 deploy/benefactor/development/docker-compose.yml

diff --git a/benefactorapp/app.go b/benefactorapp/app.go
new file mode 100644
index 00000000..3a01a26f
--- /dev/null
+++ b/benefactorapp/app.go
@@ -0,0 +1,8 @@
+package benefactorapp
+
+import "net/http"
+
+type Application struct {
+	Config     Config
+	HTTPServer *http.Server
+}
diff --git a/benefactorapp/config.go b/benefactorapp/config.go
new file mode 100644
index 00000000..de3d3f88
--- /dev/null
+++ b/benefactorapp/config.go
@@ -0,0 +1,11 @@
+package benefactorapp
+
+type Config struct {
+	// HTTP server config
+
+	// Database config
+
+	// Logger config
+
+	// Service config
+}
diff --git a/benefactorapp/delivery/http/handler.go b/benefactorapp/delivery/http/handler.go
new file mode 100644
index 00000000..e9549c3c
--- /dev/null
+++ b/benefactorapp/delivery/http/handler.go
@@ -0,0 +1,17 @@
+package http
+
+import (
+	"net/http"
+
+	"github.com/labstack/echo/v4"
+)
+
+type Handler struct{}
+
+func NewHandler() *Handler {
+	return &Handler{}
+}
+
+func (h Handler) HealthCheck(c echo.Context) error {
+	return c.JSON(http.StatusOK, map[string]string{"status": "ok"})
+}
diff --git a/benefactorapp/delivery/http/route.go b/benefactorapp/delivery/http/route.go
new file mode 100644
index 00000000..d02cfda6
--- /dev/null
+++ b/benefactorapp/delivery/http/route.go
@@ -0,0 +1 @@
+package http
diff --git a/benefactorapp/delivery/http/server.go b/benefactorapp/delivery/http/server.go
new file mode 100644
index 00000000..c5adfb44
--- /dev/null
+++ b/benefactorapp/delivery/http/server.go
@@ -0,0 +1,23 @@
+package http
+
+import httpserver "git.gocasts.ir/ebhomengo/niki/delivery/http_server"
+
+type Server struct {
+	HTTPServer *httpserver.Server
+	Handler    *Handler
+}
+
+func NewServer(httpserver *httpserver.Server) *Server {
+	return &Server{
+		HTTPServer: httpserver,
+		Handler:    NewHandler(),
+	}
+}
+
+func (s *Server) Serve() {
+
+}
+
+func (s *Server) Stop() {}
+
+func (s *Server) RegisterRoutes() {}
diff --git a/benefactorapp/repository/database/db.go b/benefactorapp/repository/database/db.go
new file mode 100644
index 00000000..5d07c611
--- /dev/null
+++ b/benefactorapp/repository/database/db.go
@@ -0,0 +1,13 @@
+package database
+
+import "git.gocasts.ir/ebhomengo/niki/repository/mysql"
+
+type DB struct {
+	conn *mysql.DB
+}
+
+func New(conn *mysql.DB) *DB {
+	return &DB{
+		conn: conn,
+	}
+}
diff --git a/benefactorapp/service/entity.go b/benefactorapp/service/entity.go
new file mode 100644
index 00000000..6d43c336
--- /dev/null
+++ b/benefactorapp/service/entity.go
@@ -0,0 +1 @@
+package service
diff --git a/benefactorapp/service/param.go b/benefactorapp/service/param.go
new file mode 100644
index 00000000..6d43c336
--- /dev/null
+++ b/benefactorapp/service/param.go
@@ -0,0 +1 @@
+package service
diff --git a/benefactorapp/service/service.go b/benefactorapp/service/service.go
new file mode 100644
index 00000000..6d43c336
--- /dev/null
+++ b/benefactorapp/service/service.go
@@ -0,0 +1 @@
+package service
diff --git a/benefactorapp/service/validator.go b/benefactorapp/service/validator.go
new file mode 100644
index 00000000..6d43c336
--- /dev/null
+++ b/benefactorapp/service/validator.go
@@ -0,0 +1 @@
+package service
diff --git a/deploy/benefactor/development/.env.example b/deploy/benefactor/development/.env.example
new file mode 100644
index 00000000..e69de29b
diff --git a/deploy/benefactor/development/Dockerfile b/deploy/benefactor/development/Dockerfile
new file mode 100644
index 00000000..e69de29b
diff --git a/deploy/benefactor/development/config.yml b/deploy/benefactor/development/config.yml
new file mode 100644
index 00000000..e69de29b
diff --git a/deploy/benefactor/development/docker-compose.yml b/deploy/benefactor/development/docker-compose.yml
new file mode 100644
index 00000000..e69de29b